Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions Python/hbayesdm/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,13 @@
from ._peer_ocu import peer_ocu
from ._prl_ewa import prl_ewa
from ._prl_fictitious import prl_fictitious
from ._prl_fictitious_cnc import prl_fictitious_cnc
from ._prl_fictitious_cnc_multipleB import prl_fictitious_cnc_multipleB
from ._prl_fictitious_multipleB import prl_fictitious_multipleB
from ._prl_fictitious_rp import prl_fictitious_rp
from ._prl_fictitious_rp_woa import prl_fictitious_rp_woa
from ._prl_fictitious_woa import prl_fictitious_woa
from ._prl_fictitious_woa_multipleB import prl_fictitious_woa_multipleB
from ._prl_rp import prl_rp
from ._prl_rp_multipleB import prl_rp_multipleB
from ._pstRT_ddm import pstRT_ddm
Expand Down Expand Up @@ -106,10 +109,13 @@
'peer_ocu',
'prl_ewa',
'prl_fictitious',
'prl_fictitious_cnc',
'prl_fictitious_cnc_multipleB',
'prl_fictitious_multipleB',
'prl_fictitious_rp',
'prl_fictitious_rp_woa',
'prl_fictitious_woa',
'prl_fictitious_woa_multipleB',
'prl_rp',
'prl_rp_multipleB',
'pstRT_ddm',
Expand Down
244 changes: 244 additions & 0 deletions Python/hbayesdm/models/_prl_fictitious_cnc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,244 @@
from typing import Sequence, Union, Any
from collections import OrderedDict

from numpy import Inf, exp
import pandas as pd

from hbayesdm.base import TaskModel
from hbayesdm.preprocess_funcs import prl_preprocess_func

__all__ = ['prl_fictitious_cnc']


class PrlFictitiousCnc(TaskModel):
def __init__(self, **kwargs):
super().__init__(
task_name='prl',
model_name='fictitious_cnc',
model_type='',
data_columns=(
'subjID',
'choice',
'outcome',
),
parameters=OrderedDict([
('eta_c', (0, 0.5, 1)),
('eta_nc', (0, 0.5, 1)),
('alpha', (-Inf, 0, Inf)),
('beta', (0, 1, 10)),
]),
regressors=OrderedDict([
('ev_c', 2),
('ev_nc', 2),
('pe_c', 2),
('pe_nc', 2),
('dv', 2),
]),
postpreds=['y_pred'],
parameters_desc=OrderedDict([
('eta_c', 'learning rate for chosen option'),
('eta_nc', 'learning rate for non-chosen option'),
('alpha', 'indecision point'),
('beta', 'inverse temperature'),
]),
additional_args=OrderedDict([

]),
additional_args_desc=OrderedDict([

]),
**kwargs,
)

_preprocess_func = prl_preprocess_func


def prl_fictitious_cnc(
data: Union[pd.DataFrame, str, None] = None,
niter: int = 4000,
nwarmup: int = 1000,
nchain: int = 4,
ncore: int = 1,
nthin: int = 1,
inits: Union[str, Sequence[float]] = 'vb',
ind_pars: str = 'mean',
model_regressor: bool = False,
vb: bool = False,
inc_postpred: bool = False,
adapt_delta: float = 0.95,
stepsize: float = 1,
max_treedepth: int = 10,
**additional_args: Any) -> TaskModel:
"""Probabilistic Reversal Learning Task - Fictitious Update Model, with chosen/not-chosen learning rates

Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task
using Fictitious Update Model, with chosen/not-chosen learning rates [Glascher2009]_ with the following parameters:
"eta_c" (learning rate for chosen option), "eta_nc" (learning rate for non-chosen option), "alpha" (indecision point), "beta" (inverse temperature).




.. [Glascher2009] Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. https://doi.org/10.1093/cercor/bhn098

.. codeauthor:: Jinwoo Jeong <jwjeong96@gmail.com>

User data should contain the behavioral data-set of all subjects of interest for
the current analysis. When loading from a file, the datafile should be a
**tab-delimited** text file, whose rows represent trial-by-trial observations
and columns represent variables.

For the Probabilistic Reversal Learning Task, there should be 3 columns of data
with the labels "subjID", "choice", "outcome". It is not necessary for the columns to be
in this particular order; however, it is necessary that they be labeled
correctly and contain the information below:

- "subjID": A unique identifier for each subject in the data-set.
- "choice": Integer value representing the option chosen on that trial: 1 or 2.
- "outcome": Integer value representing the outcome of that trial (where reward == 1, and loss == -1).

.. note::
User data may contain other columns of data (e.g. ``ReactionTime``,
``trial_number``, etc.), but only the data within the column names listed
above will be used during the modeling. As long as the necessary columns
mentioned above are present and labeled correctly, there is no need to
remove other miscellaneous data columns.

.. note::

``adapt_delta``, ``stepsize``, and ``max_treedepth`` are advanced options that
give the user more control over Stan's MCMC sampler. It is recommended that
only advanced users change the default values, as alterations can profoundly
change the sampler's behavior. See [Hoffman2014]_ for more information on the
sampler control parameters. One can also refer to 'Section 34.2. HMC Algorithm
Parameters' of the `Stan User's Guide and Reference Manual`__.

.. [Hoffman2014]
Hoffman, M. D., & Gelman, A. (2014).
The No-U-Turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo.
Journal of Machine Learning Research, 15(1), 1593-1623.

__ https://mc-stan.org/users/documentation/

Parameters
----------
data
Data to be modeled. It should be given as a Pandas DataFrame object,
a filepath for a data file, or ``"example"`` for example data.
Data columns should be labeled as: "subjID", "choice", "outcome".
niter
Number of iterations, including warm-up. Defaults to 4000.
nwarmup
Number of iterations used for warm-up only. Defaults to 1000.

``nwarmup`` is a numerical value that specifies how many MCMC samples
should not be stored upon the beginning of each chain. For those
familiar with Bayesian methods, this is equivalent to burn-in samples.
Due to the nature of the MCMC algorithm, initial values (i.e., where the
sampling chains begin) can have a heavy influence on the generated
posterior distributions. The ``nwarmup`` argument can be set to a
higher number in order to curb the effects that initial values have on
the resulting posteriors.
nchain
Number of Markov chains to run. Defaults to 4.

``nchain`` is a numerical value that specifies how many chains (i.e.,
independent sampling sequences) should be used to draw samples from
the posterior distribution. Since the posteriors are generated from a
sampling process, it is good practice to run multiple chains to ensure
that a reasonably representative posterior is attained. When the
sampling is complete, it is possible to check the multiple chains for
convergence by running the following line of code:

.. code:: python

output.plot(type='trace')
ncore
Number of CPUs to be used for running. Defaults to 1.
nthin
Every ``nthin``-th sample will be used to generate the posterior
distribution. Defaults to 1. A higher number can be used when
auto-correlation within the MCMC sampling is high.

``nthin`` is a numerical value that specifies the "skipping" behavior
of the MCMC sampler. That is, only every ``nthin``-th sample is used to
generate posterior distributions. By default, ``nthin`` is equal to 1,
meaning that every sample is used to generate the posterior.
inits
String or list specifying how the initial values should be generated.
Options are ``'fixed'`` or ``'random'``, or your own initial values.
ind_pars
String specifying how to summarize the individual parameters.
Current options are: ``'mean'``, ``'median'``, or ``'mode'``.
model_regressor
Whether to export model-based regressors. For this model they are: "ev_c", "ev_nc", "pe_c", "pe_nc", "dv".
vb
Whether to use variational inference to approximately draw from a
posterior distribution. Defaults to ``False``.
inc_postpred
Include trial-level posterior predictive simulations in
model output (may greatly increase file size). Defaults to ``False``.
adapt_delta
Floating point value representing the target acceptance probability of a new
sample in the MCMC chain. Must be between 0 and 1. See note below.
stepsize
Integer value specifying the size of each leapfrog step that the MCMC sampler
can take on each new iteration. See note below.
max_treedepth
Integer value specifying how many leapfrog steps the MCMC sampler can take
on each new iteration. See note below.
**additional_args
Not used for this model.

Returns
-------
model_data
An ``hbayesdm.TaskModel`` instance with the following components:

- ``model``: String value that is the name of the model ('prl_fictitious_cnc').
- ``all_ind_pars``: Pandas DataFrame containing the summarized parameter values
(as specified by ``ind_pars``) for each subject.
- ``par_vals``: OrderedDict holding the posterior samples over different parameters.
- ``fit``: A PyStan StanFit object that contains the fitted Stan model.
- ``raw_data``: Pandas DataFrame containing the raw data used to fit the model,
as specified by the user.
- ``model_regressor``: Dict holding the extracted model-based regressors.

Examples
--------

.. code:: python

from hbayesdm import rhat, print_fit
from hbayesdm.models import prl_fictitious_cnc

# Run the model and store results in "output"
output = prl_fictitious_cnc(data='example', niter=2000, nwarmup=1000, nchain=4, ncore=4)

# Visually check convergence of the sampling chains (should look like "hairy caterpillars")
output.plot(type='trace')

# Plot posterior distributions of the hyper-parameters (distributions should be unimodal)
output.plot()

# Check Rhat values (all Rhat values should be less than or equal to 1.1)
rhat(output, less=1.1)

# Show the LOOIC and WAIC model fit estimates
print_fit(output)
"""
return PrlFictitiousCnc(
data=data,
niter=niter,
nwarmup=nwarmup,
nchain=nchain,
ncore=ncore,
nthin=nthin,
inits=inits,
ind_pars=ind_pars,
model_regressor=model_regressor,
vb=vb,
inc_postpred=inc_postpred,
adapt_delta=adapt_delta,
stepsize=stepsize,
max_treedepth=max_treedepth,
**additional_args)
Loading