Skip to content

Commit edf708f

Browse files
committed
feat: edit prl_fictitious_rewmag model
1 parent 53664c1 commit edf708f

3 files changed

Lines changed: 30 additions & 23 deletions

File tree

R/R/prl_fictitious_rewmag.R

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@
1414
#' @templateVar LENGTH_DATA_COLUMNS 5
1515
#' @templateVar DETAILS_DATA_1 \item{subjID}{A unique identifier for each subject in the data-set.}
1616
#' @templateVar DETAILS_DATA_2 \item{choice}{Integer value representing the option chosen on that trial: 1 or 2.}
17-
#' @templateVar DETAILS_DATA_3 \item{rewardMagnitude1}{The reward amount associated with option 1 on a given trial, which is paid out if option 1 is chosen and is the correct choice (1~99).}
18-
#' @templateVar DETAILS_DATA_4 \item{rewardMagnitude2}{The reward amount associated with option 2 on a given trial, which is paid out if option 2 is chosen and is the correct choice (1~99).}
17+
#' @templateVar DETAILS_DATA_3 \item{rewardMagnitude1}{The amount of reward paid out if option 1 is chosen and is the correct choice (0~99).}
18+
#' @templateVar DETAILS_DATA_4 \item{rewardMagnitude2}{The amount of reward paid out if option 2 is chosen and is the correct choice (0~99).}
1919
#' @templateVar DETAILS_DATA_5 \item{outcome}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).}
2020
#' @templateVar LENGTH_ADDITIONAL_ARGS 0
2121
#'
@@ -39,7 +39,7 @@ prl_fictitious_rewmag <- hBayesDM_model(
3939
"eta" = c(0, 0.5, 1),
4040
"alpha" = c(-Inf, 0, Inf),
4141
"beta" = c(0, 1, 10),
42-
"gamma" = c(0, 0.5, 2)
42+
"gamma" = c(0, 0.5, 1)
4343
),
4444
additional_args = NULL,
4545
regressors = list(

commons/models/prl_fictitious_rewmag.yml

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,9 @@ data_columns:
2121
subjID: A unique identifier for each subject in the data-set.
2222
choice: "Integer value representing the option chosen on that trial: 1 or 2."
2323
rewardMagnitude1:
24-
The reward amount associated with option 1 on a given trial,
25-
which is paid out if option 1 is chosen and is the correct choice (1~99).
24+
The amount of reward paid out if option 1 is chosen and is the correct choice (0~99).
2625
rewardMagnitude2:
27-
The reward amount associated with option 2 on a given trial,
28-
which is paid out if option 2 is chosen and is the correct choice (1~99).
26+
The amount of reward paid out if option 2 is chosen and is the correct choice (0~99).
2927
outcome:
3028
Integer value representing the outcome of that trial (where reward == 1,
3129
and loss == -1).
@@ -41,7 +39,7 @@ parameters:
4139
info: [0, 1, 10]
4240
gamma:
4341
desc: reward magnitude sensitivity
44-
info: [0, 0.5, 2]
42+
info: [0, 0.5, 1]
4543
regressors:
4644
ev_c: 2
4745
ev_nc: 2

commons/stan_files/prl_fictitious_rewmag.stan

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,12 @@
77
*/
88

99
data {
10-
int<lower=1> N; // Number of subjects
11-
int<lower=1> T; // Maximum number of trials across subjects
12-
int<lower=1, upper=T> Tsubj[N]; // Number of trials/blocks for each subject
13-
real rewardMagnitude[N, T, 2]; // Expected reward of each option for each trial for each subject (1~99)
14-
int<lower=-1, upper=2> choice[N, T]; // The choices subjects made
15-
real outcome[N, T]; // The outcome
10+
int<lower=1> N; // Number of subjects
11+
int<lower=1> T; // Maximum number of trials across subjects
12+
int<lower=1, upper=T> Tsubj[N]; // Number of trials/blocks for each subject
13+
real<lower=0, upper=99> rewardMagnitude[N, T, 2]; // Expected reward of each option for each trial for each subject
14+
int<lower=-1, upper=2> choice[N, T]; // The choices subjects made
15+
real<lower=-1, upper=1> outcome[N, T]; // The outcome
1616
}
1717

1818
transformed data {
@@ -39,12 +39,12 @@ transformed parameters {
3939
vector<lower=0, upper=1>[N] eta;
4040
vector[N] alpha;
4141
vector<lower=0, upper=10>[N] beta;
42-
vector<lower=0, upper=2>[N] gamma;
42+
vector<lower=0, upper=1>[N] gamma;
4343

4444
for (i in 1:N) {
4545
eta[i] = Phi_approx(mu_pr[1] + sigma[1] * eta_pr[i]);
4646
beta[i] = Phi_approx(mu_pr[3] + sigma[3] * beta_pr[i]) * 10;
47-
gamma[i] = Phi_approx(mu_pr[4] + sigma[4] * gamma_pr[i]) * 2;
47+
gamma[i] = Phi_approx(mu_pr[4] + sigma[4] * gamma_pr[i]);
4848
}
4949
alpha = mu_pr[2] + sigma[2] * alpha_pr;
5050
}
@@ -79,15 +79,19 @@ model {
7979

8080
for (t in 1:(Tsubj[i])) {
8181
// Compute action probabilities with reward magnitude
82-
8382
ev_diff = (ev[1] - ev[2]) + (gamma[i] * (rewardMagnitude[i,t,1] - rewardMagnitude[i,t,2]) / 99.0);
8483
prob_logits[1] = beta[i] * (alpha[i] - ev_diff);
8584
prob_logits[2] = 0;
8685
choice[i, t] ~ categorical_logit(prob_logits);
8786

8887
// Prediction error
89-
reward = outcome[i,t] * (rewardMagnitude[i,t,choice[i, t]] / 99.0);
90-
reward_of_unchosen = (-outcome[i,t]) * (rewardMagnitude[i,t,3-choice[i, t]] / 99.0);
88+
if (outcome[i,t] > 0) {
89+
reward = rewardMagnitude[i, t, choice[i, t]] / 99.0;
90+
reward_of_unchosen = 0;
91+
} else {
92+
reward = 0;
93+
reward_of_unchosen = rewardMagnitude[i, t, 3-choice[i, t]] / 99.0;
94+
}
9195
PE = reward - ev[choice[i, t]];
9296
PEnc = reward_of_unchosen - ev[3-choice[i, t]];
9397

@@ -103,7 +107,7 @@ generated quantities {
103107
real<lower=0, upper=1> mu_eta;
104108
real mu_alpha;
105109
real<lower=0, upper=10> mu_beta;
106-
real<lower=0, upper=2> mu_gamma;
110+
real<lower=0, upper=1> mu_gamma;
107111

108112
// For log likelihood calculation
109113
real log_lik[N];
@@ -136,7 +140,7 @@ generated quantities {
136140
mu_eta = Phi_approx(mu_pr[1]);
137141
mu_alpha = mu_pr[2];
138142
mu_beta = Phi_approx(mu_pr[3]) * 10;
139-
mu_gamma = Phi_approx(mu_pr[4]) * 2;
143+
mu_gamma = Phi_approx(mu_pr[4]);
140144

141145
{ // local section, this saves time and space
142146
for (i in 1:N) {
@@ -166,8 +170,13 @@ generated quantities {
166170
y_pred[i, t] = categorical_logit_rng(prob_logits);
167171

168172
// prediction error
169-
reward = outcome[i,t] * (rewardMagnitude[i,t,choice[i, t]] / 99.0);
170-
reward_of_unchosen = (-outcome[i,t]) * (rewardMagnitude[i,t,3-choice[i, t]] / 99.0);
173+
if (outcome[i,t] > 0) {
174+
reward = rewardMagnitude[i, t, choice[i, t]] / 99.0;
175+
reward_of_unchosen = 0;
176+
} else {
177+
reward = 0;
178+
reward_of_unchosen = rewardMagnitude[i, t, 3-choice[i, t]] / 99.0;
179+
}
171180
PE = reward - ev[choice[i, t]];
172181
PEnc = reward_of_unchosen - ev[3-choice[i, t]];
173182

0 commit comments

Comments
 (0)