Skip to content

Commit b3cc1e4

Browse files
mhaurupenelopeysm
andauthored
Support for DynamicPPL v0.35 (#2488)
* Progress towards compat with DPPL v0.35 * More fixing of DPPL v0.35 stuff * Fix LogDensityFunction argument order * More minor bugfixes * [TEMP] Commit Manifest pointing to DynamicPPL#release-0.35 * remove LogDensityProblemsAD (#2490) * Remove LogDensityProblemsAD, part 1 * update Optimisation code to not use LogDensityProblemsAD * Fix field name change * Don't put chunksize=0 * Remove LogDensityProblemsAD dep * Improve OptimLogDensity docstring * Remove unneeded model argument to _optimize * Fix more tests * Remove essential/ad from the list of CI groups * Fix HMC function * more test fixes (#2491) * Remove LogDensityProblemsAD, part 1 * update Optimisation code to not use LogDensityProblemsAD * Fix field name change * Don't put chunksize=0 * Remove LogDensityProblemsAD dep * Improve OptimLogDensity docstring * Remove unneeded model argument to _optimize * Fix more tests * Remove essential/ad from the list of CI groups * Fix HMC function * More test fixes * Remove Manifest * More fixes for DynamicPPL 0.35 (#2494) * Remove test/dynamicppl/compiler.jl * Remove old regression tests * Remove vdemo2 * Fix last test * Add HISTORY.md entry about DPPL 0.35 * Allow ESS to sample variables with different symbols * Update a TODO note --------- Co-authored-by: Penelope Yong <[email protected]>
1 parent 6088517 commit b3cc1e4

33 files changed

+341
-1225
lines changed

.github/workflows/Tests.yml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@ jobs:
2424
test:
2525
# Run some of the slower test files individually. The last one catches everything
2626
# not included in the others.
27-
- name: "essential/ad"
28-
args: "essential/ad.jl"
2927
- name: "mcmc/gibbs"
3028
args: "mcmc/gibbs.jl"
3129
- name: "mcmc/hmc"
@@ -37,7 +35,7 @@ jobs:
3735
- name: "mcmc/ess"
3836
args: "mcmc/ess.jl"
3937
- name: "everything else"
40-
args: "--skip essential/ad.jl mcmc/gibbs.jl mcmc/hmc.jl mcmc/abstractmcmc.jl mcmc/Inference.jl mcmc/ess.jl"
38+
args: "--skip mcmc/gibbs.jl mcmc/hmc.jl mcmc/abstractmcmc.jl mcmc/Inference.jl mcmc/ess.jl"
4139
runner:
4240
# Default
4341
- version: '1'

HISTORY.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,21 @@
22

33
## Breaking changes
44

5+
### Gibbs constructors
6+
57
0.37 removes the old Gibbs constructors deprecated in 0.36.
68

9+
### DynamicPPL 0.35
10+
11+
Turing.jl v0.37 uses DynamicPPL v0.35, which brings with it several breaking changes:
12+
13+
- The right hand side of `.~` must from now on be a univariate distribution.
14+
- Indexing `VarInfo` objects by samplers has been removed completely.
15+
- The order in which nested submodel prefixes are applied has been reversed.
16+
- The arguments for the constructor of `LogDensityFunction` have changed. `LogDensityFunction` also now satisfies the `LogDensityProblems` interface, without needing a wrapper object.
17+
18+
For more details about all of the above, see the changelog of DynamicPPL [here](https://github.com/TuringLang/DynamicPPL.jl/releases/tag/v0.35.0).
19+
720
# Release 0.36.0
821

922
## Breaking changes

Project.toml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
2323
Libtask = "6f1fad26-d15e-5dc8-ae53-837a1d7b8c9f"
2424
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
2525
LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c"
26-
LogDensityProblemsAD = "996a588d-648d-4e1f-a8f0-a84b347e47b1"
2726
MCMCChains = "c7f686f2-ff18-58e9-bc7b-31028e88f75d"
2827
NamedArrays = "86f7a689-2022-50b4-a561-43c23ac3c673"
2928
Optimization = "7f7a1694-90dd-40f0-9382-eb1efda571ba"
@@ -63,13 +62,12 @@ Distributions = "0.23.3, 0.24, 0.25"
6362
DistributionsAD = "0.6"
6463
DocStringExtensions = "0.8, 0.9"
6564
DynamicHMC = "3.4"
66-
DynamicPPL = "0.34.1"
65+
DynamicPPL = "0.35"
6766
EllipticalSliceSampling = "0.5, 1, 2"
6867
ForwardDiff = "0.10.3"
6968
Libtask = "0.8.8"
7069
LinearAlgebra = "1"
7170
LogDensityProblems = "2"
72-
LogDensityProblemsAD = "1.7.0"
7371
MCMCChains = "5, 6"
7472
NamedArrays = "0.9, 0.10"
7573
Optim = "1"

ext/TuringDynamicHMCExt.jl

Lines changed: 17 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -3,17 +3,10 @@ module TuringDynamicHMCExt
33
### DynamicHMC backend - https://github.com/tpapp/DynamicHMC.jl
44
###
55

6-
if isdefined(Base, :get_extension)
7-
using DynamicHMC: DynamicHMC
8-
using Turing
9-
using Turing: AbstractMCMC, Random, LogDensityProblems, DynamicPPL
10-
using Turing.Inference: ADTypes, LogDensityProblemsAD, TYPEDFIELDS
11-
else
12-
import ..DynamicHMC
13-
using ..Turing
14-
using ..Turing: AbstractMCMC, Random, LogDensityProblems, DynamicPPL
15-
using ..Turing.Inference: ADTypes, LogDensityProblemsAD, TYPEDFIELDS
16-
end
6+
using DynamicHMC: DynamicHMC
7+
using Turing
8+
using Turing: AbstractMCMC, Random, LogDensityProblems, DynamicPPL
9+
using Turing.Inference: ADTypes, TYPEDFIELDS
1710

1811
"""
1912
DynamicNUTS
@@ -25,22 +18,15 @@ To use it, make sure you have DynamicHMC package (version >= 2) loaded:
2518
using DynamicHMC
2619
```
2720
"""
28-
struct DynamicNUTS{AD,space,T<:DynamicHMC.NUTS} <: Turing.Inference.Hamiltonian
21+
struct DynamicNUTS{AD,T<:DynamicHMC.NUTS} <: Turing.Inference.Hamiltonian
2922
sampler::T
3023
adtype::AD
3124
end
3225

33-
function DynamicNUTS(
34-
spl::DynamicHMC.NUTS=DynamicHMC.NUTS(),
35-
space::Tuple=();
36-
adtype::ADTypes.AbstractADType=Turing.DEFAULT_ADTYPE,
37-
)
38-
return DynamicNUTS{typeof(adtype),space,typeof(spl)}(spl, adtype)
39-
end
26+
DynamicNUTS() = DynamicNUTS(DynamicHMC.NUTS())
27+
DynamicNUTS(spl) = DynamicNUTS(spl, Turing.DEFAULT_ADTYPE)
4028
Turing.externalsampler(spl::DynamicHMC.NUTS) = DynamicNUTS(spl)
4129

42-
DynamicPPL.getspace(::DynamicNUTS{<:Any,space}) where {space} = space
43-
4430
"""
4531
DynamicNUTSState
4632
@@ -70,25 +56,28 @@ function DynamicPPL.initialstep(
7056
kwargs...,
7157
)
7258
# Ensure that initial sample is in unconstrained space.
73-
if !DynamicPPL.islinked(vi, spl)
74-
vi = DynamicPPL.link!!(vi, spl, model)
59+
if !DynamicPPL.islinked(vi)
60+
vi = DynamicPPL.link!!(vi, model)
7561
vi = last(DynamicPPL.evaluate!!(model, vi, DynamicPPL.SamplingContext(rng, spl)))
7662
end
7763

7864
# Define log-density function.
79-
= LogDensityProblemsAD.ADgradient(
80-
Turing.LogDensityFunction(vi, model, spl, DynamicPPL.DefaultContext())
65+
= DynamicPPL.LogDensityFunction(
66+
model,
67+
vi,
68+
DynamicPPL.SamplingContext(spl, DynamicPPL.DefaultContext());
69+
adtype=spl.alg.adtype,
8170
)
8271

8372
# Perform initial step.
8473
results = DynamicHMC.mcmc_keep_warmup(
85-
rng, ℓ, 0; initialization=(q=vi[spl],), reporter=DynamicHMC.NoProgressReport()
74+
rng, ℓ, 0; initialization=(q=vi[:],), reporter=DynamicHMC.NoProgressReport()
8675
)
8776
steps = DynamicHMC.mcmc_steps(results.sampling_logdensity, results.final_warmup_state)
8877
Q, _ = DynamicHMC.mcmc_next_step(steps, results.final_warmup_state.Q)
8978

9079
# Update the variables.
91-
vi = DynamicPPL.setindex!!(vi, Q.q, spl)
80+
vi = DynamicPPL.unflatten(vi, Q.q)
9281
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
9382

9483
# Create first sample and state.
@@ -112,7 +101,7 @@ function AbstractMCMC.step(
112101
Q, _ = DynamicHMC.mcmc_next_step(steps, state.cache)
113102

114103
# Update the variables.
115-
vi = DynamicPPL.setindex!!(vi, Q.q, spl)
104+
vi = DynamicPPL.unflatten(vi, Q.q)
116105
vi = DynamicPPL.setlogp!!(vi, Q.ℓq)
117106

118107
# Create next sample and state.

ext/TuringOptimExt.jl

Lines changed: 24 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,8 @@
11
module TuringOptimExt
22

3-
if isdefined(Base, :get_extension)
4-
using Turing: Turing
5-
import Turing: DynamicPPL, NamedArrays, Accessors, Optimisation
6-
using Optim: Optim
7-
else
8-
import ..Turing
9-
import ..Turing: DynamicPPL, NamedArrays, Accessors, Optimisation
10-
import ..Optim
11-
end
3+
using Turing: Turing
4+
import Turing: DynamicPPL, NamedArrays, Accessors, Optimisation
5+
using Optim: Optim
126

137
####################
148
# Optim.jl methods #
@@ -42,7 +36,7 @@ function Optim.optimize(
4236
)
4337
ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
4438
f = Optimisation.OptimLogDensity(model, ctx)
45-
init_vals = DynamicPPL.getparams(f)
39+
init_vals = DynamicPPL.getparams(f.ldf)
4640
optimizer = Optim.LBFGS()
4741
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
4842
end
@@ -65,7 +59,7 @@ function Optim.optimize(
6559
)
6660
ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
6761
f = Optimisation.OptimLogDensity(model, ctx)
68-
init_vals = DynamicPPL.getparams(f)
62+
init_vals = DynamicPPL.getparams(f.ldf)
6963
return _mle_optimize(model, init_vals, optimizer, options; kwargs...)
7064
end
7165
function Optim.optimize(
@@ -81,7 +75,7 @@ end
8175

8276
function _mle_optimize(model::DynamicPPL.Model, args...; kwargs...)
8377
ctx = Optimisation.OptimizationContext(DynamicPPL.LikelihoodContext())
84-
return _optimize(model, Optimisation.OptimLogDensity(model, ctx), args...; kwargs...)
78+
return _optimize(Optimisation.OptimLogDensity(model, ctx), args...; kwargs...)
8579
end
8680

8781
"""
@@ -112,7 +106,7 @@ function Optim.optimize(
112106
)
113107
ctx = Optimisation.OptimizationContext(DynamicPPL.DefaultContext())
114108
f = Optimisation.OptimLogDensity(model, ctx)
115-
init_vals = DynamicPPL.getparams(f)
109+
init_vals = DynamicPPL.getparams(f.ldf)
116110
optimizer = Optim.LBFGS()
117111
return _map_optimize(model, init_vals, optimizer, options; kwargs...)
118112
end
@@ -135,7 +129,7 @@ function Optim.optimize(
135129
)
136130
ctx = Optimisation.OptimizationContext(DynamicPPL.DefaultContext())
137131
f = Optimisation.OptimLogDensity(model, ctx)
138-
init_vals = DynamicPPL.getparams(f)
132+
init_vals = DynamicPPL.getparams(f.ldf)
139133
return _map_optimize(model, init_vals, optimizer, options; kwargs...)
140134
end
141135
function Optim.optimize(
@@ -151,28 +145,29 @@ end
151145

152146
function _map_optimize(model::DynamicPPL.Model, args...; kwargs...)
153147
ctx = Optimisation.OptimizationContext(DynamicPPL.DefaultContext())
154-
return _optimize(model, Optimisation.OptimLogDensity(model, ctx), args...; kwargs...)
148+
return _optimize(Optimisation.OptimLogDensity(model, ctx), args...; kwargs...)
155149
end
156-
157150
"""
158-
_optimize(model::Model, f::OptimLogDensity, optimizer=Optim.LBFGS(), args...; kwargs...)
151+
_optimize(f::OptimLogDensity, optimizer=Optim.LBFGS(), args...; kwargs...)
159152
160153
Estimate a mode, i.e., compute a MLE or MAP estimate.
161154
"""
162155
function _optimize(
163-
model::DynamicPPL.Model,
164156
f::Optimisation.OptimLogDensity,
165-
init_vals::AbstractArray=DynamicPPL.getparams(f),
157+
init_vals::AbstractArray=DynamicPPL.getparams(f.ldf),
166158
optimizer::Optim.AbstractOptimizer=Optim.LBFGS(),
167159
options::Optim.Options=Optim.Options(),
168160
args...;
169161
kwargs...,
170162
)
171163
# Convert the initial values, since it is assumed that users provide them
172164
# in the constrained space.
173-
f = Accessors.@set f.varinfo = DynamicPPL.unflatten(f.varinfo, init_vals)
174-
f = Accessors.@set f.varinfo = DynamicPPL.link(f.varinfo, model)
175-
init_vals = DynamicPPL.getparams(f)
165+
# TODO(penelopeysm): As with in src/optimisation/Optimisation.jl, unclear
166+
# whether initialisation is really necessary at all
167+
vi = DynamicPPL.unflatten(f.ldf.varinfo, init_vals)
168+
vi = DynamicPPL.link(vi, f.ldf.model)
169+
f = Optimisation.OptimLogDensity(f.ldf.model, vi, f.ldf.context; adtype=f.ldf.adtype)
170+
init_vals = DynamicPPL.getparams(f.ldf)
176171

177172
# Optimize!
178173
M = Optim.optimize(Optim.only_fg!(f), init_vals, optimizer, options, args...; kwargs...)
@@ -186,12 +181,16 @@ function _optimize(
186181
end
187182

188183
# Get the optimum in unconstrained space. `getparams` does the invlinking.
189-
f = Accessors.@set f.varinfo = DynamicPPL.unflatten(f.varinfo, M.minimizer)
190-
vns_vals_iter = Turing.Inference.getparams(model, f.varinfo)
184+
vi = f.ldf.varinfo
185+
vi_optimum = DynamicPPL.unflatten(vi, M.minimizer)
186+
logdensity_optimum = Optimisation.OptimLogDensity(
187+
f.ldf.model, vi_optimum, f.ldf.context
188+
)
189+
vns_vals_iter = Turing.Inference.getparams(f.ldf.model, vi_optimum)
191190
varnames = map(Symbol first, vns_vals_iter)
192191
vals = map(last, vns_vals_iter)
193192
vmat = NamedArrays.NamedArray(vals, varnames)
194-
return Optimisation.ModeResult(vmat, M, -M.minimum, f)
193+
return Optimisation.ModeResult(vmat, M, -M.minimum, logdensity_optimum)
195194
end
196195

197196
end # module

src/Turing.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ using Compat: pkgversion
99

1010
using AdvancedVI: AdvancedVI
1111
using DynamicPPL: DynamicPPL, LogDensityFunction
12-
import DynamicPPL: getspace, NoDist, NamedDist
12+
import DynamicPPL: NoDist, NamedDist
1313
using LogDensityProblems: LogDensityProblems
1414
using NamedArrays: NamedArrays
1515
using Accessors: Accessors

src/essential/container.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ function AdvancedPS.advance!(
3939
end
4040

4141
function AdvancedPS.delete_retained!(trace::TracedModel)
42-
DynamicPPL.set_retained_vns_del_by_spl!(trace.varinfo, trace.sampler)
42+
DynamicPPL.set_retained_vns_del!(trace.varinfo)
4343
return trace
4444
end
4545

0 commit comments

Comments
 (0)