Skip to content

Commit 875035f

Browse files
committed
Posted code in the printed book
1 parent fe6d858 commit 875035f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+1401
-1360
lines changed

CHANGES.md

+6
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,12 @@
22

33
# MAJOR CHANGES
44

5+
## 2021-12-05
6+
7+
Code from the proofs replaced with actual code in the book. Additional code not to be printed unchanged.
8+
9+
Tested on Windows 10 with R 4.1.2 and R-devel (as at 2021-12-01), and up-to-date versions of packages, including `IPMbook` 0.1.2.9007, `unmarked` 1.1.1.9014 and `jagsUI` 1.5.2.9002.
10+
511
## 2021-08-28
612

713
Changed the official date of publication of "Integrated Population Models" to 2022 everywhere (though the book should be shipping Q3 2021).

IPM_02/IPM_02.2-5.R

+43-39
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
# Schaub & Kéry (2022) Integrated Population Models
22
# Chapter 2 : Bayesian statistical modeling using JAGS
33
# ----------------------------------------------------
4-
# Code from proofs.
54

65
library(IPMbook)
76

@@ -12,17 +11,17 @@ library(IPMbook)
1211
# ------------------------------------------------------------------------
1312

1413
# Read the data for the tadpole experiment into R
15-
N <- 50 # Number of tadpoles released initially
16-
y <- 20 # Number of tadpoles detected later
14+
N <- 50 # Number of tadpoles released initially
15+
y <- 20 # Number of tadpoles detected later
1716

1817
# 2.3 Maximum likelihood estimation in a nutshell
1918
# ===============================================
2019

21-
# Fig. 2.3
22-
all.possible <- 0:50 # All possible values
23-
pmf1 <- dbinom(all.possible, size=50, prob=0.1) # pmf 1
24-
pmf2 <- dbinom(all.possible, size=50, prob=0.5) # pmf 2
25-
pmf3 <- dbinom(all.possible, size=50, prob=0.9) # pmf 3
20+
# Data for Fig. 2.3
21+
all.possible <- 0:50 # All possible values
22+
pmf1 <- dbinom(all.possible, size=50, prob=0.1) # pmf 1
23+
pmf2 <- dbinom(all.possible, size=50, prob=0.5) # pmf 2
24+
pmf3 <- dbinom(all.possible, size=50, prob=0.9) # pmf 3
2625

2726
# ~~~~ additional code for the plot ~~~~
2827
op <- par(mfrow=c(1, 3), mar=c(5, 5, 4, 1), cex.lab=1.5, cex.axis=1.5, cex.main=2, las=1)
@@ -38,14 +37,14 @@ abline(v=20, col="blue", lwd=2)
3837
par(op)
3938
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4039

41-
# Use RNG to obtain binomial density for pmf 3
42-
hist(rbinom(10^6, size=50, prob=0.9), freq=FALSE, xlim=c(0, 50)) # Not shown
40+
# Use RNG to obtain binomial density for pmf3
41+
hist(rbinom(10^6, size=50, prob=0.9), freq=FALSE, xlim=c(0, 50)) # Not shown
4342

4443
# Brute-force search for MLE of theta for the tadpole data set (Fig. 2.4)
45-
try.theta <- seq(0, 1, by=0.01) # Values to try out
46-
like <- dbinom(20, 50, try.theta, log=FALSE) # Likelihood
47-
loglike <- dbinom(20, 50, try.theta, log=TRUE) # Log-Likelihood
48-
negloglike <- -dbinom(20, 50, try.theta, log=TRUE) # NLL
44+
try.theta <- seq(0, 1, by=0.01) # Values to try out
45+
like <- dbinom(20, 50, try.theta, log=FALSE) # Likelihood
46+
loglike <- dbinom(20, 50, try.theta, log=TRUE) # Log-Likelihood
47+
negloglike <- -dbinom(20, 50, try.theta, log=TRUE) # Negative log-likelihood (NLL)
4948

5049
op <- par(mfrow=c(1, 3), mar=c(5, 5, 4, 1), cex.lab=1.5, cex.axis=1.5)
5150
plot(x=try.theta, y=like, xlab=expression(paste("Detection probability (", theta, ")")),
@@ -168,7 +167,7 @@ set.seed(1) # To initalize your RNGs identically to ours
168167

169168
# Add theta_star into MCMC sample
170169
ltheta[2] <- ltheta_star
171-
ltheta # Our posterior sample up to now (not shown)
170+
ltheta # Our posterior sample up to now (not shown)
172171

173172
( ltheta_star <- rnorm(1, ltheta[2], sigma_prop) )
174173
# [1] 0.5571895
@@ -182,15 +181,15 @@ pd_t <- dbinom(20, 50, plogis(ltheta[2])) * dbeta(plogis(ltheta[2]), 1, 1)
182181
# [1] 0
183182

184183
ltheta[3] <- ltheta[2]
185-
ltheta # Our posterior sample up to now (not shown)
184+
ltheta # Our posterior sample up to now (not shown)
186185

187186
# Iteration 4 to T of RW-MH algorithm
188-
T <- 60000 # Choose chain length
189-
for (t in 4:T){ # Continue where we left off
187+
T <- 60000 # Choose chain length
188+
for (t in 4:T){ # Continue where we left off
190189
ltheta_star <- rnorm(1, ltheta[t-1], sigma_prop)
191190
pd_star <- dbinom(20, 50, plogis(ltheta_star)) * dbeta(plogis(ltheta_star), 1, 1)
192191
pd_t <- dbinom(20, 50, plogis(ltheta[t-1])) * dbeta(plogis(ltheta[t-1]), 1, 1)
193-
R <- min(1, pd_star / pd_t) # Note more general solution here
192+
R <- min(1, pd_star / pd_t) # Note more general solution here
194193
keep.ltheta_star <- rbinom(1, 1, R)
195194
ltheta[t] <- ifelse(keep.ltheta_star == 1, ltheta_star, ltheta[t-1])
196195
}
@@ -234,6 +233,7 @@ tmp <- demoMCMC(y=20, N=50, niter=25000, mu.ltheta=0, sd.ltheta=100, prop.sd=0.1
234233
# ... and you get convergence within 2500 iters with longer step length
235234
tmp <- demoMCMC(y=20, N=50, niter=2500, mu.ltheta=0, sd.ltheta=100, prop.sd=1, init=100)
236235

236+
237237
# ~~~~ extra code to explore step size ~~~~
238238
# Very, very small step size: very inefficient MCMC
239239
str(out <- demoMCMC(prop.s = 0.01))
@@ -255,42 +255,46 @@ str(out <- demoMCMC(prop.s = 1))
255255
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
256256

257257
str(out)
258+
258259
# List of 7
259-
# $ y : num 20
260-
# $ N : num 50
261-
# $ mu.ltheta: num 0
262-
# $ sd.ltheta: num 100
263-
# $ prop.sd : num 1
264-
# $ ltheta : num [1:25000] 0 0 -0.0519 -0.0519 -0.7637 ...
265-
# $ acc.prob : num 0.34
260+
# $ y : num 20
261+
# $ N : num 50
262+
# $ mu.ltheta: num 0
263+
# $ sd.ltheta: num 100
264+
# $ prop.sd : num 1
265+
# $ ltheta : num [1:25000] 0 0 -0.0519 -0.0519 -0.7637 ...
266+
# $ acc.prob : num 0.34
266267

267268
# Measures of central tendency to serve as point estimates
268-
library(MCMCglmm) # For function for mode
269-
mean(plogis(out$ltheta)) # Posterior mean
270-
# [1] 0.4007489 # Your result will differ slightly
271-
median(plogis(out$ltheta)) # Posterior median
272-
# [1] 0.4001582 # Your result will differ
273-
posterior.mode(mcmc(plogis(out$ltheta))) # Posterior mode (ditto)
274-
# var1
269+
library(MCMCglmm) # For function for mode
270+
mean(plogis(out$ltheta)) # Posterior mean
271+
# [1] 0.4007489 # Your result will differ slightly
272+
273+
median(plogis(out$ltheta)) # Posterior median
274+
# [1] 0.4001582 # Your result will differ
275+
276+
posterior.mode(mcmc(plogis(out$ltheta))) # Posterior mode (ditto)
277+
# var1
275278
# 0.4152145
276279

277280
# Measures of spread:
278281
# - Bayesian 'variant' of standard error (= posterior SD)
279282
# - two Bayesian credible intervals (CRI and HPDI)
280-
sd(plogis(out$ltheta)) # Posterior SD
281-
# [1] 0.06950494 # Your result will differ
283+
sd(plogis(out$ltheta)) # Posterior SD
284+
# [1] 0.06950494 # Your result will differ
282285

283-
quantile(plogis(out$ltheta), prob=c(0.025, 0.975)) # Symmetrical Bayesian CRI (your result will differ)
284-
# 2.5% 97.5%
286+
quantile(plogis(out$ltheta), prob=c(0.025, 0.975)) # Symmetrical Bayesian CRI (your result will differ)
287+
# 2.5% 97.5%
285288
# 0.2705029 0.5383629
286289

287-
HPDinterval(mcmc(plogis(out$ltheta))) # Highest posterior density credible
288-
# interval(HPDI); your result will differ
290+
HPDinterval(mcmc(plogis(out$ltheta))) # Highest posterior density credible
291+
# interval(HPDI); your result will differ
289292
# lower upper
290293
# var1 0.2672082 0.5349586
291294
# attr(,"Probability")
292295
# [1] 0.95
293296

297+
294298
# Compute p(theta > 0.5)
295299
mean(plogis(out$ltheta) > 0.5)
296300
# [1] 0.07584

IPM_02/IPM_02.7.1.R

+70-37
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
# Schaub & Kéry (2022) Integrated Population Models
22
# Chapter 2 : Bayesian statistical modeling using JAGS
33
# ----------------------------------------------------
4-
# Code from proofs.
54

65
# Run time approx. 3 mins
76

@@ -15,7 +14,7 @@ library(IPMbook)
1514

1615
# Define imaginary counts and elevation data for 10 sites
1716
original.elev <- c(500, 400, 700, 500, 600, 800, 1000, 900, 1000, 900)
18-
C <- c(6, 10, 2, 7, 4, 1, 1, 2, 0, 0) # Counts
17+
C <- c(6, 10, 2, 7, 4, 1, 1, 2, 0, 0) # Counts
1918

2019
# Look at data and produce summaries of counts (not shown)
2120
cbind(original.elev, C)
@@ -34,6 +33,7 @@ library(jagsUI)
3433

3534
# Bundle data
3635
jags.data <- list(C=C, elev=elev, n=length(C))
36+
3737
str(jags.data)
3838
# List of 3
3939
# $ C : num [1:10] 6 10 2 7 4 1 1 2 0 0
@@ -45,22 +45,19 @@ cat(file="model1.txt", "
4545
model {
4646
# Priors and linear models
4747
# One set of vague priors: 'flat' normal
48-
alpha ~ dnorm(0, 1.0E-06) # log-linear intercept
49-
beta ~ dnorm(0, 1.0E-06) # log-linear slope
50-
48+
alpha ~ dnorm(0, 1.0E-06) # log-linear intercept
49+
beta ~ dnorm(0, 1.0E-06) # log-linear slope
5150
# Another possible set of vague priors: suitably wide uniform
52-
# alpha ~ dunif(-100, 100) # log-linear intercept
53-
# beta ~ dunif(-100, 100) # log-linear slope
54-
51+
# alpha ~ dunif(-100, 100) # log-linear intercept
52+
# beta ~ dunif(-100, 100) # log-linear slope
5553
# Likelihood for the Poisson GLM
5654
for (i in 1:n){
5755
C[i] ~ dpois(lambda[i]) # Stochastic part
58-
log(lambda[i]) <- alpha + beta * elev[i] # Link function and linear pred.
59-
# lambda[i] <- exp(alpha + beta * elev[i]) # same written differently
56+
log(lambda[i]) <- alpha + beta * elev[i] # Link function and linear pred.
57+
# lambda[i] <- exp(alpha + beta * elev[i]) # Same written differently
6058
}
61-
6259
# Derived quantities
63-
mean.exp.count <- exp(alpha) # Backtransformed intercept
60+
mean.exp.count <- exp(alpha) # Backtransformed intercept
6461
}
6562
")
6663

@@ -84,37 +81,73 @@ out1 <- jags(jags.data, inits, parameters, "model1.txt", n.iter=ni, n.burnin=nb,
8481
# Produce an overview of the R object created by jagsUI
8582
str(out1) # Only a small portion of total output is shown here!
8683

87-
# par(mfrow=c(2, 2)); jagsUI::traceplot(out1) # May need jagsUI::traceplot!
88-
jagsUI::traceplot(out1, layout=c(2,2)) # May need jagsUI::traceplot!
84+
# List of 24
85+
# $ sims.list :List of 5
86+
# ..$ alpha : num [1:12000] -0.0859 -0.2759 0.5362 0.5113 ...
87+
# ..$ beta : num [1:12000] -8.13 -7.02 -6.57 -5.51 -5.05 ...
88+
# ..$ mean.exp.count: num [1:12000] 0.918 0.759 1.709 1.667 2.062 ...
89+
# ..$ lambda : num [1:12000, 1:10] 5.96 3.82 7.75 5.93 6.58 ...
90+
# ..$ deviance : num [1:12000] 35.2 39.4 31.5 29 28.7 ...
91+
# $ mean :List of 5
92+
# ..$ alpha : num 0.623
93+
# ..$ beta : num -5.16
94+
# ..$ mean.exp.count: num 1.94
95+
# ..$ lambda : num [1:10(1d)] 6.21 10.52 2.25 6.21 3.71 ...
96+
# ..$ deviance : num 30.7
97+
# [... output truncated ...]
98+
# $ summary : num [1:14, 1:11] 0.623 -5.157 1.936 6.209 10.522 ...
99+
# ..- attr(*, "dimnames")=List of 2
100+
# .. ..$ : chr [1:14] "alpha" "beta" "mean.exp.count" "lambda[1]" ...
101+
# .. ..$ : chr [1:11] "mean" "sd" "2.5%" "25%" ...
102+
# $ samples :List of 3
103+
# ..$ : 'mcmc' num [1:4000, 1:14] -0.0859 -0.2759 0.5362 0.5113 0.7234 ...
104+
# .. ..- attr(*, "dimnames")=List of 2
105+
# .. .. ..$ : NULL
106+
# .. .. ..$ : chr [1:14] "alpha" "beta" "mean.exp.count" "lambda[1]" ...
107+
# .. ..- attr(*, "mcpar")= num [1:3] 10010 50000 10
108+
# ..$ : 'mcmc' num [1:4000, 1:14] 0.9 1.091 0.935 0.579 0.518 ...
109+
# .. ..- attr(*, "dimnames")=List of 2
110+
# .. .. ..$ : NULL
111+
# .. .. ..$ : chr [1:14] "alpha" "beta" "mean.exp.count" "lambda[1]" ...
112+
# .. ..- attr(*, "mcpar")= num [1:3] 10010 50000 10
113+
# ..$ : 'mcmc' num [1:4000, 1:14] 0.294 0.559 0.226 0.646 0.545 ...
114+
# .. ..- attr(*, "dimnames")=List of 2
115+
# .. .. ..$ : NULL
116+
# .. .. ..$ : chr [1:14] "alpha" "beta" "mean.exp.count" "lambda[1]" ...
117+
# .. ..- attr(*, "mcpar")= num [1:3] 10010 50000 10
118+
# ..- attr(*, "class")= chr "mcmc.list"
119+
# [... output truncated ...]
120+
121+
122+
jagsUI::traceplot(out1, layout=c(2, 2)) # May need jagsUI::traceplot!
89123

90124
# ~~~~ code for Fig 2.10 ~~~~
91-
jagsUI::traceplot(out1, c("alpha", "beta", "mean.exp.count"), layout=c(1,3)) # just some
125+
jagsUI::traceplot(out1, c("alpha", "beta", "mean.exp.count"), layout=c(1,3))
92126
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
93127

94128
print(out1, 3)
95129

96130
# JAGS output for model 'model1.txt', generated by jagsUI.
97-
# Estimates based on 3 chains of 50000 iterations,
98-
# adaptation = 1000 iterations (sufficient),
99-
# burn-in = 10000 iterations and thin rate = 10,
100-
# yielding 12000 total samples from the joint posterior.
131+
# Estimates based on 3 chains of 50000 iterations, adaptation = 1000 iterations (sufficient),
132+
# burn-in = 10000 iterations and thin rate = 10, yielding 12000 total samples from the
133+
# joint posterior.
101134
# MCMC ran for 0.081 minutes at time 2020-11-19 18:09:13.
102135

103-
# mean sd 2.5% 50% 97.5% overlap0 f Rhat n.eff
104-
# alpha 0.619 0.280 0.027 0.637 1.114 FALSE 0.979 1.000 12000
105-
# beta -5.178 1.140 -7.531 -5.122 -3.087 FALSE 1.000 1.000 12000
106-
# mean.exp.count 1.929 0.520 1.028 1.892 3.048 FALSE 1.000 1.000 12000
107-
# lambda[1] 6.210 1.115 4.233 6.132 8.558 FALSE 1.000 1.000 12000
108-
# lambda[2] 10.541 2.479 6.354 10.327 16.022 FALSE 1.000 1.000 10153
109-
# lambda[3] 2.238 0.550 1.271 2.206 3.414 FALSE 1.000 1.000 12000
110-
# lambda[4] 6.210 1.115 4.233 6.132 8.558 FALSE 1.000 1.000 12000
111-
# lambda[5] 3.705 0.689 2.472 3.657 5.171 FALSE 1.000 1.000 12000
112-
# lambda[6] 1.368 0.453 0.618 1.325 2.365 FALSE 1.000 1.000 12000
113-
# lambda[7] 0.529 0.283 0.141 0.478 1.229 FALSE 1.000 1.000 12000
114-
# lambda[8] 0.846 0.363 0.296 0.795 1.692 FALSE 1.000 1.000 12000
115-
# lambda[9] 0.529 0.283 0.141 0.478 1.229 FALSE 1.000 1.000 12000
116-
# lambda[10] 0.846 0.363 0.296 0.795 1.692 FALSE 1.000 1.000 12000
117-
# deviance 30.633 2.044 28.661 30.021 36.144 FALSE 1.000 1.001 3355
136+
# mean sd 2.5% 50% 97.5% overlap0 f Rhat n.eff
137+
# alpha 0.619 0.280 0.027 0.637 1.114 FALSE 0.979 1.000 12000
138+
# beta -5.178 1.140 -7.531 -5.122 -3.087 FALSE 1.000 1.000 12000
139+
# mean.exp.count 1.929 0.520 1.028 1.892 3.048 FALSE 1.000 1.000 12000
140+
# lambda[1] 6.210 1.115 4.233 6.132 8.558 FALSE 1.000 1.000 12000
141+
# lambda[2] 10.541 2.479 6.354 10.327 16.022 FALSE 1.000 1.000 10153
142+
# lambda[3] 2.238 0.550 1.271 2.206 3.414 FALSE 1.000 1.000 12000
143+
# lambda[4] 6.210 1.115 4.233 6.132 8.558 FALSE 1.000 1.000 12000
144+
# lambda[5] 3.705 0.689 2.472 3.657 5.171 FALSE 1.000 1.000 12000
145+
# lambda[6] 1.368 0.453 0.618 1.325 2.365 FALSE 1.000 1.000 12000
146+
# lambda[7] 0.529 0.283 0.141 0.478 1.229 FALSE 1.000 1.000 12000
147+
# lambda[8] 0.846 0.363 0.296 0.795 1.692 FALSE 1.000 1.000 12000
148+
# lambda[9] 0.529 0.283 0.141 0.478 1.229 FALSE 1.000 1.000 12000
149+
# lambda[10] 0.846 0.363 0.296 0.795 1.692 FALSE 1.000 1.000 12000
150+
# deviance 30.633 2.044 28.661 30.021 36.144 FALSE 1.000 1.001 3355
118151

119152
# Successful convergence based on Rhat values (all < 1.1).
120153
# Rhat is the potential scale reduction factor (at convergence, Rhat=1).
@@ -148,17 +181,17 @@ predict(fm, type='response', newdata=data.frame(elev=0), se=TRUE)
148181

149182
# [... output truncated ...]
150183
# Coefficients:
151-
# Estimate Std. Error z value Pr(>|z|)
184+
# Estimate Std. Error z value Pr(>|z|)
152185
# (Intercept) 0.6787 0.2705 2.509 0.0121 *
153186
# elev -5.0198 1.1035 -4.549 5.4e-06 ***
154187
# [... output truncated ...]
155188

156189
# $fit
157-
# 1
190+
# 1
158191
# 1.971256
159192

160193
# $se.fit
161-
# 1
194+
# 1
162195
# 0.5331537
163196

164197
# New data bundle with variant of data with bigger sample size

0 commit comments

Comments
 (0)