Skip to content

Commit adf9a90

Browse files
committed
feat(pst_Q): add p_pred, y_pred
1 parent 1ae9f34 commit adf9a90

3 files changed

Lines changed: 23 additions & 4 deletions

File tree

R/R/pst_Q.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
#' @templateVar DATA_COLUMNS "subjID", "type", "choice", "reward"
1111
#' @templateVar PARAMETERS \code{alpha} (learning rate), \code{beta} (inverse temperature)
1212
#' @templateVar REGRESSORS
13-
#' @templateVar POSTPREDS "y_pred"
13+
#' @templateVar POSTPREDS "p_pred", "y_pred"
1414
#' @templateVar LENGTH_DATA_COLUMNS 4
1515
#' @templateVar DETAILS_DATA_1 \item{subjID}{A unique identifier for each subject in the data-set.}
1616
#' @templateVar DETAILS_DATA_2 \item{type}{Two-digit number indicating which pair of stimuli were presented for that trial, e.g. 12, 34, or 56. The digit on the left (tens-digit) indicates the presented stimulus for option1, while the digit on the right (ones-digit) indicates that for option2. Code for each stimulus type (1~6) is defined as for 80\% (type 1), 20\% (type 2), 70\% (type 3), 30\% (type 4), 60\% (type 5), 40\% (type 6). The modeling will still work even if different probabilities are used for the stimuli; however, the total number of stimuli should be less than or equal to 6.}
@@ -40,5 +40,5 @@ pst_Q <- hBayesDM_model(
4040
),
4141
additional_args = NULL,
4242
regressors = NULL,
43-
postpreds = c("y_pred"),
43+
postpreds = c("p_pred", "y_pred"),
4444
preprocess_func = pst_preprocess_func)

commons/models/pst_Q.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,5 +38,6 @@ parameters:
3838
info: [0, 1, 10]
3939
regressors:
4040
postpreds:
41+
- p_pred
4142
- y_pred
4243
additional_args:

commons/stan_files/pst_Q.stan

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -70,11 +70,26 @@ generated quantities {
7070
// For group-level parameters
7171
real<lower=0,upper=1> mu_alpha;
7272
real<lower=0,upper=10> mu_beta;
73-
real pe[N, T]; // Prediction error
73+
74+
real pe[N, T]; // prediction error
7475

7576
// For log-likelihood calculation
7677
real log_lik[N];
7778

79+
// predicted probability of choosing option1
80+
real<lower=0,upper=1> p_pred[N, T];
81+
82+
// For posterior predictive check
83+
real<lower=0,upper=1> y_pred[N, T];
84+
85+
// Set all posterior predictions to 0 (avoids NULL values)
86+
for (i in 1:N) {
87+
for (t in 1:T) {
88+
p_pred[i, t] = 0.5;
89+
y_pred[i, t] = 0;
90+
}
91+
}
92+
7893
mu_alpha = Phi_approx(mu_pr[1]);
7994
mu_beta = Phi_approx(mu_pr[2]) * 10;
8095

@@ -95,10 +110,13 @@ generated quantities {
95110
delta = ev[option1[i, t]] - ev[option2[i, t]];
96111
log_lik[i] += bernoulli_logit_lpmf(choice[i, t] | beta[i] * delta);
97112

113+
// generate posterior prediction for current trial
114+
p_pred[i, t] = inv_logit(beta[i] * delta);
115+
y_pred[i, t] = bernoulli_rng(p_pred[i, t]);
116+
98117
pe[i, t] = reward[i, t] - ev[co];
99118
ev[co] += alpha[i] * pe[i, t];
100119
}
101120
}
102121
}
103122
}
104-

0 commit comments

Comments
 (0)