@@ -79,7 +79,7 @@ To make the example more realistic we add random normally distributed noise to t
79
79
80
80
``` {julia}
81
81
sol = solve(prob, Tsit5(); saveat=0.1)
82
- odedata = Array(sol) + 0.8 * randn(size(Array(sol)))
82
+ odedata = Array(sol) + 0.5 * randn(size(Array(sol)))
83
83
84
84
# Plot simulation and noisy observations.
85
85
plot(sol; alpha=0.3)
@@ -98,18 +98,18 @@ Therefore, we write the Lotka-Volterra parameter estimation problem using the Tu
98
98
``` {julia}
99
99
@model function fitlv(data, prob)
100
100
# Prior distributions.
101
- σ ~ InverseGamma(2, 3 )
102
- α ~ truncated(Normal(1.5, 0.5 ); lower=0.5, upper=2.5)
103
- β ~ truncated(Normal(1.2 , 0.5 ); lower=0, upper=2)
104
- γ ~ truncated(Normal(3.0, 0.5 ); lower=1, upper=4)
105
- δ ~ truncated(Normal(1.0, 0.5 ); lower=0, upper=2)
101
+ σ ~ InverseGamma(3, 2 )
102
+ α ~ truncated(Normal(1.5, 0.2 ); lower=0.5, upper=2.5)
103
+ β ~ truncated(Normal(1.1 , 0.2 ); lower=0, upper=2)
104
+ γ ~ truncated(Normal(3.0, 0.2 ); lower=1, upper=4)
105
+ δ ~ truncated(Normal(1.0, 0.2 ); lower=0, upper=2)
106
106
107
107
# Simulate Lotka-Volterra model.
108
108
p = [α, β, γ, δ]
109
109
predicted = solve(prob, Tsit5(); p=p, saveat=0.1)
110
110
111
111
# Observations.
112
- for i in 1:length (predicted)
112
+ for i in eachindex (predicted)
113
113
data[:, i] ~ MvNormal(predicted[i], σ^2 * I)
114
114
end
115
115
@@ -160,11 +160,11 @@ I.e., we fit the model only to the $y$ variable of the system without providing
160
160
``` {julia}
161
161
@model function fitlv2(data::AbstractVector, prob)
162
162
# Prior distributions.
163
- σ ~ InverseGamma(2, 3 )
164
- α ~ truncated(Normal(1.5, 0.5 ); lower=0.5, upper=2.5)
165
- β ~ truncated(Normal(1.2 , 0.5 ); lower=0, upper=2)
166
- γ ~ truncated(Normal(3.0, 0.5 ); lower=1, upper=4)
167
- δ ~ truncated(Normal(1.0, 0.5 ); lower=0, upper=2)
163
+ σ ~ InverseGamma(3, 2 )
164
+ α ~ truncated(Normal(1.5, 0.2 ); lower=0.5, upper=2.5)
165
+ β ~ truncated(Normal(1.1 , 0.2 ); lower=0, upper=2)
166
+ γ ~ truncated(Normal(3.0, 0.2 ); lower=1, upper=4)
167
+ δ ~ truncated(Normal(1.0, 0.2 ); lower=0, upper=2)
168
168
169
169
# Simulate Lotka-Volterra model but save only the second state of the system (predators).
170
170
p = [α, β, γ, δ]
@@ -260,18 +260,18 @@ Now we define the Turing model for the Lotka-Volterra model with delay and sampl
260
260
``` {julia}
261
261
@model function fitlv_dde(data, prob)
262
262
# Prior distributions.
263
- σ ~ InverseGamma(2, 3 )
264
- α ~ truncated(Normal(1.5, 0.5 ); lower=0.5, upper=2.5)
265
- β ~ truncated(Normal(1.2 , 0.5 ); lower=0, upper=2)
266
- γ ~ truncated(Normal(3.0, 0.5 ); lower=1, upper=4)
267
- δ ~ truncated(Normal(1.0, 0.5 ); lower=0, upper=2)
263
+ σ ~ InverseGamma(3, 2 )
264
+ α ~ truncated(Normal(1.5, 0.2 ); lower=0.5, upper=2.5)
265
+ β ~ truncated(Normal(1.1 , 0.2 ); lower=0, upper=2)
266
+ γ ~ truncated(Normal(3.0, 0.2 ); lower=1, upper=4)
267
+ δ ~ truncated(Normal(1.0, 0.2 ); lower=0, upper=2)
268
268
269
269
# Simulate Lotka-Volterra model.
270
270
p = [α, β, γ, δ]
271
271
predicted = solve(prob, MethodOfSteps(Tsit5()); p=p, saveat=0.1)
272
272
273
273
# Observations.
274
- for i in 1:length (predicted)
274
+ for i in eachindex (predicted)
275
275
data[:, i] ~ MvNormal(predicted[i], σ^2 * I)
276
276
end
277
277
end
@@ -340,18 +340,18 @@ Here we will not choose a `sensealg` and let it use the default choice:
340
340
``` {julia}
341
341
@model function fitlv_sensealg(data, prob)
342
342
# Prior distributions.
343
- σ ~ InverseGamma(2, 3 )
344
- α ~ truncated(Normal(1.5, 0.5 ); lower=0.5, upper=2.5)
345
- β ~ truncated(Normal(1.2 , 0.5 ); lower=0, upper=2)
346
- γ ~ truncated(Normal(3.0, 0.5 ); lower=1, upper=4)
347
- δ ~ truncated(Normal(1.0, 0.5 ); lower=0, upper=2)
343
+ σ ~ InverseGamma(3, 2 )
344
+ α ~ truncated(Normal(1.5, 0.2 ); lower=0.5, upper=2.5)
345
+ β ~ truncated(Normal(1.1 , 0.2 ); lower=0, upper=2)
346
+ γ ~ truncated(Normal(3.0, 0.2 ); lower=1, upper=4)
347
+ δ ~ truncated(Normal(1.0, 0.2 ); lower=0, upper=2)
348
348
349
349
# Simulate Lotka-Volterra model and use a specific algorithm for computing sensitivities.
350
350
p = [α, β, γ, δ]
351
351
predicted = solve(prob; p=p, saveat=0.1)
352
352
353
353
# Observations.
354
- for i in 1:length (predicted)
354
+ for i in eachindex (predicted)
355
355
data[:, i] ~ MvNormal(predicted[i], σ^2 * I)
356
356
end
357
357
361
361
model_sensealg = fitlv_sensealg(odedata, prob)
362
362
363
363
# Sample a single chain with 1000 samples using Zygote.
364
- sample(model_sensealg, NUTS(;adtype=AutoZygote()), 1000; progress=false)
364
+ sample(model_sensealg, NUTS(; adtype=AutoZygote()), 1000; progress=false)
365
365
```
366
366
367
367
For more examples of adjoint usage on large parameter models, consult the [ DiffEqFlux documentation] ( https://diffeqflux.sciml.ai/dev/ ) .
0 commit comments