From b875f0f6781638bdeb3092669ea22028e0187052 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 08:57:37 -0500 Subject: [PATCH 1/7] ENH: Remove examples --- examples/dmri_camino_dti.py | 303 --- examples/dmri_connectivity.py | 613 ------ examples/dmri_connectivity_advanced.py | 641 ------ examples/dmri_dtk_dti.py | 197 -- examples/dmri_dtk_odf.py | 190 -- examples/dmri_fsl_dti.py | 254 --- examples/dmri_group_connectivity_camino.py | 168 -- examples/dmri_group_connectivity_mrtrix.py | 184 -- examples/dmri_mrtrix_dti.py | 271 --- examples/dmri_preprocessing.py | 171 -- examples/dmri_tbss_nki.py | 124 -- examples/fmri_ants_openfmri.py | 1140 ----------- examples/fmri_freesurfer_smooth.py | 616 ------ examples/fmri_fsl.py | 629 ------ examples/fmri_fsl_feeds.py | 160 -- examples/fmri_fsl_reuse.py | 254 --- examples/fmri_nipy_glm.py | 261 --- examples/fmri_slicer_coregistration.py | 115 -- examples/fmri_spm.py | 390 ---- examples/fmri_spm_auditory.py | 404 ---- examples/fmri_spm_dartel.py | 539 ----- examples/fmri_spm_face.py | 504 ----- examples/fmri_spm_nested.py | 489 ----- .../frontiers_paper/smoothing_comparison.py | 186 -- .../frontiers_paper/workflow_from_scratch.py | 143 -- examples/howto_caching_example.py | 51 - examples/nipype_tutorial.ipynb | 1750 ----------------- examples/rsfmri_vol_surface_preprocessing.py | 1082 ---------- .../rsfmri_vol_surface_preprocessing_nipy.py | 1083 ---------- examples/smri_ants_build_template.py | 152 -- examples/smri_ants_registration.py | 109 - .../smri_antsregistration_build_template.py | 222 --- examples/smri_cbs_skullstripping.py | 34 - examples/smri_freesurfer.py | 64 - examples/smri_fsreconall.py | 89 - examples/tessellation_tutorial.py | 111 -- examples/test_spm.py | 77 - examples/workshop_dartmouth_2010.py | 288 --- 38 files changed, 14058 deletions(-) delete mode 100755 examples/dmri_camino_dti.py delete mode 100755 examples/dmri_connectivity.py delete mode 100755 examples/dmri_connectivity_advanced.py delete mode 100755 examples/dmri_dtk_dti.py delete mode 100755 examples/dmri_dtk_odf.py delete mode 100755 examples/dmri_fsl_dti.py delete mode 100644 examples/dmri_group_connectivity_camino.py delete mode 100644 examples/dmri_group_connectivity_mrtrix.py delete mode 100755 examples/dmri_mrtrix_dti.py delete mode 100644 examples/dmri_preprocessing.py delete mode 100755 examples/dmri_tbss_nki.py delete mode 100755 examples/fmri_ants_openfmri.py delete mode 100755 examples/fmri_freesurfer_smooth.py delete mode 100755 examples/fmri_fsl.py delete mode 100755 examples/fmri_fsl_feeds.py delete mode 100755 examples/fmri_fsl_reuse.py delete mode 100755 examples/fmri_nipy_glm.py delete mode 100755 examples/fmri_slicer_coregistration.py delete mode 100755 examples/fmri_spm.py delete mode 100755 examples/fmri_spm_auditory.py delete mode 100755 examples/fmri_spm_dartel.py delete mode 100755 examples/fmri_spm_face.py delete mode 100755 examples/fmri_spm_nested.py delete mode 100644 examples/frontiers_paper/smoothing_comparison.py delete mode 100644 examples/frontiers_paper/workflow_from_scratch.py delete mode 100644 examples/howto_caching_example.py delete mode 100644 examples/nipype_tutorial.ipynb delete mode 100644 examples/rsfmri_vol_surface_preprocessing.py delete mode 100644 examples/rsfmri_vol_surface_preprocessing_nipy.py delete mode 100644 examples/smri_ants_build_template.py delete mode 100644 examples/smri_ants_registration.py delete mode 100644 examples/smri_antsregistration_build_template.py delete mode 100644 examples/smri_cbs_skullstripping.py delete mode 100644 examples/smri_freesurfer.py delete mode 100644 examples/smri_fsreconall.py delete mode 100644 examples/tessellation_tutorial.py delete mode 100644 examples/test_spm.py delete mode 100644 examples/workshop_dartmouth_2010.py diff --git a/examples/dmri_camino_dti.py b/examples/dmri_camino_dti.py deleted file mode 100755 index 7928fd7cfa..0000000000 --- a/examples/dmri_camino_dti.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/env python -""" -================= -dMRI: Camino, DTI -================= - -Introduction -============ - -This script, camino_dti_tutorial.py, demonstrates the ability to perform basic diffusion analysis -in a Nipype pipeline:: - - python dmri_camino_dti.py - -We perform this analysis using the FSL course data, which can be acquired from here: -http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -Import necessary modules from nipype. -""" - -import os # system functions -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.interfaces.camino as camino -import nipype.interfaces.fsl as fsl -import nipype.interfaces.camino2trackvis as cam2trk -import nipype.algorithms.misc as misc -""" -We use the following functions to scrape the voxel and data dimensions of the input images. This allows the -pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial -(fmri_spm_face.py) also implements this inferral of voxel size from the data. -""" - - -def get_vox_dims(volume): - import nibabel as nb - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume) - hdr = nii.header - voxdims = hdr.get_zooms() - return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] - - -def get_data_dims(volume): - import nibabel as nb - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume) - hdr = nii.header - datadims = hdr.get_data_shape() - return [int(datadims[0]), int(datadims[1]), int(datadims[2])] - - -def get_affine(volume): - import nibabel as nb - nii = nb.load(volume) - return nii.affine - - -subject_list = ['subj1'] -fsl.FSLCommand.set_default_output_type('NIFTI') -""" -Map field names to individual subject runs -""" - -info = dict( - dwi=[['subject_id', 'data']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.engine.Node` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - -datasource.inputs.template = "%s/%s" - -# This needs to point to the fdt folder you can find after extracting -# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz -datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/') - -datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -An inputnode is used to pass the data obtained by the data grabber to the actual processing functions -""" - -inputnode = pe.Node( - interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), - name="inputnode") -""" -Setup for Diffusion Tensor Computation --------------------------------------- -In this section we create the nodes necessary for diffusion analysis. -First, the diffusion image is converted to voxel order. -""" - -image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") -fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") -fsl2scheme.inputs.usegradmod = True -""" -Second, diffusion tensors are fit to the voxel-order data. -""" - -dtifit = pe.Node(interface=camino.DTIFit(), name='dtifit') -""" -Next, a lookup table is generated from the schemefile and the -signal-to-noise ratio (SNR) of the unweighted (q=0) data. -""" - -dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") -dtlutgen.inputs.snr = 16.0 -dtlutgen.inputs.inversion = 1 -""" -In this tutorial we implement probabilistic tractography using the PICo algorithm. -PICo tractography requires an estimate of the fibre direction and a model of its -uncertainty in each voxel; this is produced using the following node. -""" - -picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") -picopdfs.inputs.inputmodel = 'dt' -""" -An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography. -""" - -bet = pe.Node(interface=fsl.BET(), name="bet") -bet.inputs.mask = True -""" -Finally, tractography is performed. -First DT streamline tractography. -""" - -trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt") -""" -Now camino's Probablistic Index of connectivity algorithm. -In this tutorial, we will use only 1 iteration for time-saving purposes. -""" - -trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico") -trackpico.inputs.iterations = 1 -""" -Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to -convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. -""" - -cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt") -cam2trk_dt.inputs.min_length = 30 -cam2trk_dt.inputs.voxel_order = 'LAS' - -cam2trk_pico = pe.Node( - interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico") -cam2trk_pico.inputs.min_length = 30 -cam2trk_pico.inputs.voxel_order = 'LAS' - -trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino") -""" -Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, -using the following two nodes. For VTK use VtkStreamlines. -""" - -procstreamlines = pe.Node( - interface=camino.ProcStreamlines(), name="procstreamlines") -procstreamlines.inputs.outputtracts = 'oogl' -""" -We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the -fractional anisotropy and diffusivity trace maps and their associated headers. -""" - -fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa') -trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace') -dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') - -analyzeheader_fa = pe.Node( - interface=camino.AnalyzeHeader(), name="analyzeheader_fa") -analyzeheader_fa.inputs.datatype = "double" -analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace') - -fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii') -trace2nii = fa2nii.clone("trace2nii") -""" -Since we have now created all our nodes, we can now define our workflow and start making connections. -""" - -tractography = pe.Workflow(name='tractography') - -tractography.connect([(inputnode, bet, [("dwi", "in_file")])]) -""" -File format conversion -""" - -tractography.connect([(inputnode, image2voxel, [("dwi", "in_file")]), - (inputnode, fsl2scheme, [("bvecs", "bvec_file"), - ("bvals", "bval_file")])]) -""" -Tensor fitting -""" - -tractography.connect([(image2voxel, dtifit, [['voxel_order', 'in_file']]), - (fsl2scheme, dtifit, [['scheme', 'scheme_file']])]) -""" -Workflow for applying DT streamline tractogpahy -""" - -tractography.connect([(bet, trackdt, [("mask_file", "seed_file")])]) -tractography.connect([(dtifit, trackdt, [("tensor_fitted", "in_file")])]) -""" -Workflow for applying PICo -""" - -tractography.connect([(bet, trackpico, [("mask_file", "seed_file")])]) -tractography.connect([(fsl2scheme, dtlutgen, [("scheme", "scheme_file")])]) -tractography.connect([(dtlutgen, picopdfs, [("dtLUT", "luts")])]) -tractography.connect([(dtifit, picopdfs, [("tensor_fitted", "in_file")])]) -tractography.connect([(picopdfs, trackpico, [("pdfs", "in_file")])]) - -# ProcStreamlines might throw memory errors - comment this line out in such case -tractography.connect([(trackdt, procstreamlines, [("tracked", "in_file")])]) -""" -Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the -tensor fitting. - -This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with -the original DWI image from the input node, to the header-generating nodes. This ensures that the files -will be correct and readable. -""" - -tractography.connect([(dtifit, fa, [("tensor_fitted", "in_file")])]) -tractography.connect([(fa, analyzeheader_fa, [("fa", "in_file")])]) -tractography.connect([(inputnode, analyzeheader_fa, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) -tractography.connect([(fa, fa2nii, [('fa', 'data_file')])]) -tractography.connect([(inputnode, fa2nii, [(('dwi', get_affine), 'affine')])]) -tractography.connect([(analyzeheader_fa, fa2nii, [('header', 'header_file')])]) - -tractography.connect([(dtifit, trace, [("tensor_fitted", "in_file")])]) -tractography.connect([(trace, analyzeheader_trace, [("trace", "in_file")])]) -tractography.connect([(inputnode, analyzeheader_trace, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) -tractography.connect([(trace, trace2nii, [('trace', 'data_file')])]) -tractography.connect([(inputnode, trace2nii, [(('dwi', get_affine), - 'affine')])]) -tractography.connect([(analyzeheader_trace, trace2nii, [('header', - 'header_file')])]) - -tractography.connect([(dtifit, dteig, [("tensor_fitted", "in_file")])]) - -tractography.connect([(trackpico, cam2trk_pico, [('tracked', 'in_file')])]) -tractography.connect([(trackdt, cam2trk_dt, [('tracked', 'in_file')])]) -tractography.connect([(inputnode, cam2trk_pico, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) - -tractography.connect([(inputnode, cam2trk_dt, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) -""" -Finally, we create another higher-level workflow to connect our tractography workflow with the info and datagrabbing nodes -declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding -their names to the subject list and their data to the proper folders. -""" - -workflow = pe.Workflow(name="workflow") -workflow.base_dir = os.path.abspath('camino_dti_tutorial') -workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, tractography, - [('dwi', 'inputnode.dwi'), ('bvals', 'inputnode.bvals'), - ('bvecs', 'inputnode.bvecs')])]) -""" -The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline. -""" - -if __name__ == '__main__': - workflow.run() - workflow.write_graph() -""" -You can choose the format of the experted graph with the ``format`` option. For example ``workflow.write_graph(format='eps')`` - -""" diff --git a/examples/dmri_connectivity.py b/examples/dmri_connectivity.py deleted file mode 100755 index fff79bc550..0000000000 --- a/examples/dmri_connectivity.py +++ /dev/null @@ -1,613 +0,0 @@ -#!/usr/bin/env python -""" -============================================= -dMRI: Connectivity - Camino, CMTK, FreeSurfer -============================================= - -Introduction -============ - -This script, connectivity_tutorial.py, demonstrates the ability to perform connectivity mapping -using Nipype for pipelining, Freesurfer for Reconstruction / Parcellation, Camino for tensor-fitting -and tractography, and the Connectome Mapping Toolkit (CMTK) for connectivity analysis:: - - python connectivity_tutorial.py - -We perform this analysis using the FSL course data, which can be acquired from here: - - * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -This pipeline also requires the Freesurfer directory for 'subj1' from the FSL course data. -To save time, this data can be downloaded from here: - - * http://dl.dropbox.com/u/315714/subj1.zip?dl=1 - -A data package containing the outputs of this pipeline can be obtained from here: - - * http://db.tt/1vx4vLeP - -Along with Camino_, Camino2Trackvis_, FSL_, and FreeSurfer_, -you must also have the Connectome File Format -library installed as well as the Connectome Mapper. - -These are written by Stephan Gerhard and can be obtained from: - - http://www.cmtk.org/ - -Or on github at: - - CFFlib: https://github.com/LTS5/cfflib - CMP: https://github.com/LTS5/cmp - -Output data can be visualized in the ConnectomeViewer - - ConnectomeViewer: https://github.com/LTS5/connectomeviewer - -First, we import the necessary modules from nipype. -""" - -import inspect - -import os.path as op # system functions -import cmp # connectome mapper -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.interfaces.camino as camino -import nipype.interfaces.fsl as fsl -import nipype.interfaces.camino2trackvis as cam2trk -import nipype.interfaces.freesurfer as fs # freesurfer -import nipype.interfaces.cmtk as cmtk -import nipype.algorithms.misc as misc -""" -We define the following functions to scrape the voxel and data dimensions of the input images. This allows the -pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial -(fmri_spm_face.py) also implements this inferral of voxel size from the data. We also define functions to -select the proper parcellation/segregation file from Freesurfer's output for each subject. For the mapping in -this tutorial, we use the aparc+seg.mgz file. While it is possible to change this to use the regions defined in -aparc.a2009s+aseg.mgz, one would also have to write/obtain a network resolution map defining the nodes based on those -regions. -""" - - -def get_vox_dims(volume): - import nibabel as nb - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume) - hdr = nii.header - voxdims = hdr.get_zooms() - return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] - - -def get_data_dims(volume): - import nibabel as nb - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume) - hdr = nii.header - datadims = hdr.get_data_shape() - return [int(datadims[0]), int(datadims[1]), int(datadims[2])] - - -def get_affine(volume): - import nibabel as nb - nii = nb.load(volume) - return nii.affine - - -def select_aparc(list_of_files): - for in_file in list_of_files: - if 'aparc+aseg.mgz' in in_file: - idx = list_of_files.index(in_file) - return list_of_files[idx] - - -def select_aparc_annot(list_of_files): - for in_file in list_of_files: - if '.aparc.annot' in in_file: - idx = list_of_files.index(in_file) - return list_of_files[idx] - - -""" -These need to point to the main Freesurfer directory as well as the freesurfer subjects directory. -No assumptions are made about where the directory of subjects is placed. -Recon-all must have been run on subj1 from the FSL course data. -""" - -fs_dir = op.abspath('/usr/local/freesurfer') -subjects_dir = op.abspath(op.join(op.curdir, './subjects')) -fsl.FSLCommand.set_default_output_type('NIFTI') -""" -This needs to point to the fdt folder you can find after extracting -http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz -""" - -data_dir = op.abspath('fsl_course_data/fdt/') -fs.FSCommand.set_default_subjects_dir(subjects_dir) -subject_list = ['subj1'] -""" -An infosource node is used to loop through the subject list and define the input files. -For our purposes, these are the diffusion-weighted MR image, b vectors, and b values. -The info dictionary is used to provide a template of the naming of these files. For instance, -the 4D nifti diffusion image is stored in the FSL course data as data.nii.gz. -""" - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -infosource.iterables = ('subject_id', subject_list) - -info = dict( - dwi=[['subject_id', 'data']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) -""" -A datasource node is used to perform the actual data grabbing. -Templates for the associated images are used to obtain the correct images. -The data are assumed to lie in data_dir/subject_id/. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - -datasource.inputs.template = "%s/%s" -datasource.inputs.base_directory = data_dir -datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') -datasource.inputs.template_args = info -datasource.inputs.base_directory = data_dir -datasource.inputs.sort_filelist = True -""" -FreeSurferSource nodes are used to retrieve a number of image -files that were automatically generated by the recon-all process. -Here we use three of these nodes, two of which are defined to return files for solely the left and right hemispheres. -""" - -FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource') -FreeSurferSource.inputs.subjects_dir = subjects_dir - -FreeSurferSourceLH = pe.Node( - interface=nio.FreeSurferSource(), name='fssourceLH') -FreeSurferSourceLH.inputs.subjects_dir = subjects_dir -FreeSurferSourceLH.inputs.hemi = 'lh' - -FreeSurferSourceRH = pe.Node( - interface=nio.FreeSurferSource(), name='fssourceRH') -FreeSurferSourceRH.inputs.subjects_dir = subjects_dir -FreeSurferSourceRH.inputs.hemi = 'rh' -""" -Since the b values and b vectors come from the FSL course, we must convert it to a scheme file -for use in Camino. -""" - -fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") -fsl2scheme.inputs.usegradmod = True -""" -FSL's Brain Extraction tool is used to create a mask from the b0 image -""" - -b0Strip = pe.Node(interface=fsl.BET(mask=True), name='bet_b0') -""" -FSL's FLIRT function is used to coregister the b0 mask and the structural image. -A convert_xfm node is then used to obtain the inverse of the transformation matrix. -FLIRT is used once again to apply the inverse transformation to the parcellated brain image. -""" - -coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister') -coregister.inputs.cost = ('corratio') - -convertxfm = pe.Node(interface=fsl.ConvertXFM(), name='convertxfm') -convertxfm.inputs.invert_xfm = True - -inverse = pe.Node(interface=fsl.FLIRT(), name='inverse') -inverse.inputs.interp = ('nearestneighbour') - -inverse_AparcAseg = pe.Node(interface=fsl.FLIRT(), name='inverse_AparcAseg') -inverse_AparcAseg.inputs.interp = ('nearestneighbour') -""" -A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. -Nodes are used to convert the following: - - * Original structural image to NIFTI - * Parcellated white matter image to NIFTI - * Parcellated whole-brain image to NIFTI - * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres - are converted to GIFTI for visualization in ConnectomeViewer - * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI -""" - -mri_convert_Brain = pe.Node( - interface=fs.MRIConvert(), name='mri_convert_Brain') -mri_convert_Brain.inputs.out_type = 'nii' - -mri_convert_WMParc = mri_convert_Brain.clone('mri_convert_WMParc') -mri_convert_AparcAseg = mri_convert_Brain.clone('mri_convert_AparcAseg') - -mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') -mris_convertLH.inputs.out_datatype = 'gii' -mris_convertRH = mris_convertLH.clone('mris_convertRH') -mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') -mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') -mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') -mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') -mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') -mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') -mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') -mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') -""" -An inputnode is used to pass the data obtained by the data grabber to the actual processing functions -""" - -inputnode = pe.Node( - interface=util.IdentityInterface( - fields=["dwi", "bvecs", "bvals", "subject_id"]), - name="inputnode") -""" -In this section we create the nodes necessary for diffusion analysis. -First, the diffusion image is converted to voxel order, since this is the format in which Camino does -its processing. -""" - -image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") -""" -Second, diffusion tensors are fit to the voxel-order data. -If desired, these tensors can be converted to a Nifti tensor image using the DT2NIfTI interface. -""" - -dtifit = pe.Node(interface=camino.DTIFit(), name='dtifit') -""" -Next, a lookup table is generated from the schemefile and the -signal-to-noise ratio (SNR) of the unweighted (q=0) data. -""" - -dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") -dtlutgen.inputs.snr = 16.0 -dtlutgen.inputs.inversion = 1 -""" -In this tutorial we implement probabilistic tractography using the PICo algorithm. -PICo tractography requires an estimate of the fibre direction and a model of its uncertainty in each voxel; -this probabilitiy distribution map is produced using the following node. -""" - -picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") -picopdfs.inputs.inputmodel = 'dt' -""" -Finally, tractography is performed. In this tutorial, we will use only one iteration for time-saving purposes. -It is important to note that we use the TrackPICo interface here. This interface now expects the files required -for PICo tracking (i.e. the output from picopdfs). Similar interfaces exist for alternative types of tracking, -such as Bayesian tracking with Dirac priors (TrackBayesDirac). -""" - -track = pe.Node(interface=camino.TrackPICo(), name="track") -track.inputs.iterations = 1 -""" -Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to -convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. -""" - -camino2trackvis = pe.Node( - interface=cam2trk.Camino2Trackvis(), name="camino2trk") -camino2trackvis.inputs.min_length = 30 -camino2trackvis.inputs.voxel_order = 'LAS' -trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino") -""" -Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, -using the following two nodes. -""" - -vtkstreamlines = pe.Node( - interface=camino.VtkStreamlines(), name="vtkstreamlines") -procstreamlines = pe.Node( - interface=camino.ProcStreamlines(), name="procstreamlines") -procstreamlines.inputs.outputtracts = 'oogl' -""" -We can easily produce a variety of scalar values from our fitted tensors. The following nodes generate the -fractional anisotropy and diffusivity trace maps and their associated headers, and then merge them back -into a single .nii file. -""" - -fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa') -trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace') -dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') - -analyzeheader_fa = pe.Node( - interface=camino.AnalyzeHeader(), name='analyzeheader_fa') -analyzeheader_fa.inputs.datatype = 'double' -analyzeheader_trace = pe.Node( - interface=camino.AnalyzeHeader(), name='analyzeheader_trace') -analyzeheader_trace.inputs.datatype = 'double' - -fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii') -trace2nii = fa2nii.clone("trace2nii") -""" -This section adds the Connectome Mapping Toolkit (CMTK) nodes. -These interfaces are fairly experimental and may not function properly. -In order to perform connectivity mapping using CMTK, the parcellated structural data is rewritten -using the indices and parcellation scheme from the connectome mapper (CMP). This process has been -written into the ROIGen interface, which will output a remapped aparc+aseg image as well as a -dictionary of label information (i.e. name, display colours) pertaining to the original and remapped regions. -These label values are input from a user-input lookup table, if specified, and otherwise the default -Freesurfer LUT (/freesurfer/FreeSurferColorLUT.txt). -""" - -roigen = pe.Node(interface=cmtk.ROIGen(), name="ROIGen") -cmp_config = cmp.configuration.PipelineConfiguration( - parcellation_scheme="NativeFreesurfer") -cmp_config.parcellation_scheme = "NativeFreesurfer" -roigen.inputs.LUT_file = cmp_config.get_freeview_lut("NativeFreesurfer")[ - 'freesurferaparc'] -roigen_structspace = roigen.clone('ROIGen_structspace') -""" -The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts -and outputs a number of different files. The most important of which is the connectivity network itself, which is stored -as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various -NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and -standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the -specific tracts that connect between user-selected regions. -""" - -creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") -creatematrix.inputs.count_region_intersections = True -createnodes = pe.Node(interface=cmtk.CreateNodes(), name="CreateNodes") -createnodes.inputs.resolution_network_file = cmp_config.parcellation[ - 'freesurferaparc']['node_information_graphml'] -""" -Here we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use -the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. -""" - -CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") - -giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces") -giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") -niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") -fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") -gpickledNetworks = pe.Node(interface=util.Merge(1), name="NetworkFiles") -""" -Since we have now created all our nodes, we can define our workflow and start making connections. -""" - -mapping = pe.Workflow(name='mapping') -""" -First, we connect the input node to the early conversion functions. -FreeSurfer input nodes: -""" - -mapping.connect([(inputnode, FreeSurferSource, [("subject_id", - "subject_id")])]) -mapping.connect([(inputnode, FreeSurferSourceLH, [("subject_id", - "subject_id")])]) -mapping.connect([(inputnode, FreeSurferSourceRH, [("subject_id", - "subject_id")])]) -""" -Required conversions for processing in Camino: -""" - -mapping.connect([(inputnode, image2voxel, [("dwi", "in_file")]), - (inputnode, fsl2scheme, - [("bvecs", "bvec_file"), - ("bvals", "bval_file")]), (image2voxel, dtifit, - [['voxel_order', 'in_file']]), - (fsl2scheme, dtifit, [['scheme', 'scheme_file']])]) -""" -Nifti conversions for the parcellated white matter image (used in Camino's conmap), -and the subject's stripped brain image from Freesurfer: -""" - -mapping.connect([(FreeSurferSource, mri_convert_WMParc, [('wmparc', - 'in_file')])]) -mapping.connect([(FreeSurferSource, mri_convert_Brain, [('brain', - 'in_file')])]) -""" -Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) -""" - -mapping.connect([(FreeSurferSourceLH, mris_convertLH, [('pial', 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRH, [('pial', 'in_file')])]) -mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite, [('white', - 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite, [('white', - 'in_file')])]) -mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated, [('inflated', - 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated, [('inflated', - 'in_file')])]) -mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere, [('sphere', - 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere, [('sphere', - 'in_file')])]) -""" -The annotation files are converted using the pial surface as a map via the MRIsConvert interface. -One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files -specifically (rather than i.e. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. -""" - -mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [('pial', - 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [('pial', - 'in_file')])]) -mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, - [(('annot', select_aparc_annot), 'annot_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, - [(('annot', select_aparc_annot), 'annot_file')])]) -""" -This section coregisters the diffusion-weighted and parcellated white-matter / whole brain images. -At present the conmap node connection is left commented, as there have been recent changes in Camino -code that have presented some users with errors. -""" - -mapping.connect([(inputnode, b0Strip, [('dwi', 'in_file')])]) -mapping.connect([(b0Strip, coregister, [('out_file', 'in_file')])]) -mapping.connect([(mri_convert_Brain, coregister, [('out_file', 'reference')])]) -mapping.connect([(coregister, convertxfm, [('out_matrix_file', 'in_file')])]) -mapping.connect([(b0Strip, inverse, [('out_file', 'reference')])]) -mapping.connect([(convertxfm, inverse, [('out_file', 'in_matrix_file')])]) -mapping.connect([(mri_convert_WMParc, inverse, [('out_file', 'in_file')])]) -""" -The tractography pipeline consists of the following nodes. Further information about the tractography -can be found in nipype/examples/dmri_camino_dti.py. -""" - -mapping.connect([(b0Strip, track, [("mask_file", "seed_file")])]) -mapping.connect([(fsl2scheme, dtlutgen, [("scheme", "scheme_file")])]) -mapping.connect([(dtlutgen, picopdfs, [("dtLUT", "luts")])]) -mapping.connect([(dtifit, picopdfs, [("tensor_fitted", "in_file")])]) -mapping.connect([(picopdfs, track, [("pdfs", "in_file")])]) -""" -Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the -tensor fitting. This is also where our voxel- and data-grabbing functions come in. We pass these functions, -along with the original DWI image from the input node, to the header-generating nodes. This ensures that the -files will be correct and readable. -""" - -mapping.connect([(dtifit, fa, [("tensor_fitted", "in_file")])]) -mapping.connect([(fa, analyzeheader_fa, [("fa", "in_file")])]) -mapping.connect([(inputnode, analyzeheader_fa, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) -mapping.connect([(fa, fa2nii, [('fa', 'data_file')])]) -mapping.connect([(inputnode, fa2nii, [(('dwi', get_affine), 'affine')])]) -mapping.connect([(analyzeheader_fa, fa2nii, [('header', 'header_file')])]) - -mapping.connect([(dtifit, trace, [("tensor_fitted", "in_file")])]) -mapping.connect([(trace, analyzeheader_trace, [("trace", "in_file")])]) -mapping.connect([(inputnode, analyzeheader_trace, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) -mapping.connect([(trace, trace2nii, [('trace', 'data_file')])]) -mapping.connect([(inputnode, trace2nii, [(('dwi', get_affine), 'affine')])]) -mapping.connect([(analyzeheader_trace, trace2nii, [('header', - 'header_file')])]) - -mapping.connect([(dtifit, dteig, [("tensor_fitted", "in_file")])]) -""" -The output tracts are converted to Trackvis format (and back). Here we also use the voxel- and data-grabbing -functions defined at the beginning of the pipeline. -""" - -mapping.connect([(track, camino2trackvis, [('tracked', 'in_file')]), - (track, vtkstreamlines, [['tracked', 'in_file']]), - (camino2trackvis, trk2camino, [['trackvis', 'in_file']])]) -mapping.connect([(inputnode, camino2trackvis, - [(('dwi', get_vox_dims), 'voxel_dims'), - (('dwi', get_data_dims), 'data_dims')])]) -""" -Here the CMTK connectivity mapping nodes are connected. -The original aparc+aseg image is converted to NIFTI, then registered to -the diffusion image and delivered to the ROIGen node. The remapped parcellation, -original tracts, and label file are then given to CreateMatrix. -""" - -mapping.connect(createnodes, 'node_network', creatematrix, - 'resolution_network_file') -mapping.connect([(FreeSurferSource, mri_convert_AparcAseg, - [(('aparc_aseg', select_aparc), 'in_file')])]) - -mapping.connect([(b0Strip, inverse_AparcAseg, [('out_file', 'reference')])]) -mapping.connect([(convertxfm, inverse_AparcAseg, [('out_file', - 'in_matrix_file')])]) -mapping.connect([(mri_convert_AparcAseg, inverse_AparcAseg, [('out_file', - 'in_file')])]) -mapping.connect([(mri_convert_AparcAseg, roigen_structspace, - [('out_file', 'aparc_aseg_file')])]) -mapping.connect([(roigen_structspace, createnodes, [("roi_file", - "roi_file")])]) - -mapping.connect([(inverse_AparcAseg, roigen, [("out_file", - "aparc_aseg_file")])]) -mapping.connect([(roigen, creatematrix, [("roi_file", "roi_file")])]) -mapping.connect([(camino2trackvis, creatematrix, [("trackvis", - "tract_file")])]) -mapping.connect([(inputnode, creatematrix, [("subject_id", - "out_matrix_file")])]) -mapping.connect([(inputnode, creatematrix, [("subject_id", - "out_matrix_mat_file")])]) -""" -The merge nodes defined earlier are used here to create lists of the files which are -destined for the CFFConverter. -""" - -mapping.connect([(creatematrix, gpickledNetworks, [("matrix_files", "in1")])]) - -mapping.connect([(mris_convertLH, giftiSurfaces, [("converted", "in1")])]) -mapping.connect([(mris_convertRH, giftiSurfaces, [("converted", "in2")])]) -mapping.connect([(mris_convertLHwhite, giftiSurfaces, [("converted", "in3")])]) -mapping.connect([(mris_convertRHwhite, giftiSurfaces, [("converted", "in4")])]) -mapping.connect([(mris_convertLHinflated, giftiSurfaces, [("converted", - "in5")])]) -mapping.connect([(mris_convertRHinflated, giftiSurfaces, [("converted", - "in6")])]) -mapping.connect([(mris_convertLHsphere, giftiSurfaces, [("converted", - "in7")])]) -mapping.connect([(mris_convertRHsphere, giftiSurfaces, [("converted", - "in8")])]) - -mapping.connect([(mris_convertLHlabels, giftiLabels, [("converted", "in1")])]) -mapping.connect([(mris_convertRHlabels, giftiLabels, [("converted", "in2")])]) - -mapping.connect([(roigen, niftiVolumes, [("roi_file", "in1")])]) -mapping.connect([(inputnode, niftiVolumes, [("dwi", "in2")])]) -mapping.connect([(mri_convert_Brain, niftiVolumes, [("out_file", "in3")])]) - -mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file", "in1")])]) -mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file_mm", - "in2")])]) -mapping.connect([(creatematrix, fiberDataArrays, [("fiber_length_file", - "in3")])]) -mapping.connect([(creatematrix, fiberDataArrays, [("fiber_label_file", - "in4")])]) -""" -This block actually connects the merged lists to the CFF converter. We pass the surfaces -and volumes that are to be included, as well as the tracts and the network itself. The currently -running pipeline (dmri_connectivity.py) is also scraped and included in the CFF file. This -makes it easy for the user to examine the entire processing pathway used to generate the end -product. -""" - -CFFConverter.inputs.script_files = op.abspath( - inspect.getfile(inspect.currentframe())) -mapping.connect([(giftiSurfaces, CFFConverter, [("out", "gifti_surfaces")])]) -mapping.connect([(giftiLabels, CFFConverter, [("out", "gifti_labels")])]) -mapping.connect([(gpickledNetworks, CFFConverter, [("out", - "gpickled_networks")])]) -mapping.connect([(niftiVolumes, CFFConverter, [("out", "nifti_volumes")])]) -mapping.connect([(fiberDataArrays, CFFConverter, [("out", "data_files")])]) -mapping.connect([(creatematrix, CFFConverter, [("filtered_tractographies", - "tract_files")])]) -mapping.connect([(inputnode, CFFConverter, [("subject_id", "title")])]) -""" -Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes -declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding -their names to the subject list and their data to the proper folders. -""" - -connectivity = pe.Workflow(name="connectivity") -connectivity.base_dir = op.abspath('dmri_connectivity') -connectivity.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, mapping, - [('dwi', 'inputnode.dwi'), ('bvals', 'inputnode.bvals'), - ('bvecs', 'inputnode.bvecs')]), - (infosource, mapping, [('subject_id', - 'inputnode.subject_id')])]) -""" -The following functions run the whole workflow and produce graphs describing the processing pipeline. -By default, write_graph outputs a .dot file and a .png image, but here we set it to output the image -as a vector graphic, by passing the format='eps' argument. -""" - -if __name__ == '__main__': - connectivity.run() - connectivity.write_graph(format='eps') -""" -The output CFF file of this pipeline can be loaded in the `Connectome Viewer `_. -After loading the network into memory it can be examined in 3D or as a connectivity matrix -using the default scripts produced by the Code Oracle. -To compare networks, one must use the MergeCNetworks interface to merge two networks into -a single CFF file. Statistics can then be run using the Network Brain Statistics (NBS) plugin -Surfaces can also be loaded along with their labels from the aparc+aseg file. The tractography -is included in the file so that region-to-region fibers can be individually plotted using the -Code Oracle. - -""" diff --git a/examples/dmri_connectivity_advanced.py b/examples/dmri_connectivity_advanced.py deleted file mode 100755 index c25f1fe488..0000000000 --- a/examples/dmri_connectivity_advanced.py +++ /dev/null @@ -1,641 +0,0 @@ -#!/usr/bin/env python -""" -============================================= -dMRI: Connectivity - MRtrix, CMTK, FreeSurfer -============================================= - -Introduction -============ - -This script, connectivity_tutorial_advanced.py, demonstrates the ability to perform connectivity mapping -using Nipype for pipelining, Freesurfer for Reconstruction / Segmentation, MRtrix for spherical deconvolution -and tractography, and the Connectome Mapping Toolkit (CMTK) for further parcellation and connectivity analysis:: - - python connectivity_tutorial_advanced.py - -We perform this analysis using the FSL course data, which can be acquired from here: - - * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -This pipeline also requires the Freesurfer directory for 'subj1' from the FSL course data. -To save time, this data can be downloaded from here: - - * http://dl.dropbox.com/u/315714/subj1.zip?dl=1 - -The result of this processing will be the connectome for subj1 as a Connectome File Format (CFF) File, using -the Lausanne2008 parcellation scheme. A data package containing the outputs of this pipeline can be obtained -from here: - - * http://db.tt/909Q3AC1 - -.. seealso:: - - connectivity_tutorial.py - Original tutorial using Camino and the NativeFreesurfer Parcellation Scheme - - www.cmtk.org - For more info about the parcellation scheme - -.. warning:: - - The ConnectomeMapper (https://github.com/LTS5/cmp or www.cmtk.org) must be installed for this tutorial to function! - -Packages and Data Setup -======================= - -Import necessary modules from nipype. -""" - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.interfaces.fsl as fsl -import nipype.interfaces.freesurfer as fs # freesurfer -import nipype.interfaces.mrtrix as mrtrix -import nipype.algorithms.misc as misc -import nipype.interfaces.cmtk as cmtk -import nipype.interfaces.dipy as dipy -import inspect -import os -import os.path as op # system functions -from niflow.nipype1.workflows.dmri.fsl.dti import create_eddy_correct_pipeline -from niflow.nipype1.workflows.dmri.camino.connectivity_mapping import select_aparc_annot -from nipype.utils.misc import package_check -import warnings -from niflow.nipype1.workflows.dmri.connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline -from niflow.nipype1.workflows.smri.freesurfer import create_tessellation_flow - -try: - package_check('cmp') -except Exception as e: - warnings.warn('cmp not installed') -else: - import cmp -""" -This needs to point to the freesurfer subjects directory (Recon-all must have been run on subj1 from the FSL course data) -Alternatively, the reconstructed subject data can be downloaded from: - - * http://dl.dropbox.com/u/315714/subj1.zip - -""" - -subjects_dir = op.abspath(op.join(op.curdir, './subjects')) -fs.FSCommand.set_default_subjects_dir(subjects_dir) -fsl.FSLCommand.set_default_output_type('NIFTI') - -fs_dir = os.environ['FREESURFER_HOME'] -lookup_file = op.join(fs_dir, 'FreeSurferColorLUT.txt') -""" -This needs to point to the fdt folder you can find after extracting - - * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -""" - -data_dir = op.abspath(op.join(op.curdir, 'exdata/')) -subject_list = ['subj1'] -""" -Use infosource node to loop through the subject list and define the input files. -For our purposes, these are the diffusion-weighted MR image, b vectors, and b values. -""" - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -infosource.iterables = ('subject_id', subject_list) - -info = dict( - dwi=[['subject_id', 'data']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) -""" -Use datasource node to perform the actual data grabbing. -Templates for the associated images are used to obtain the correct images. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - -datasource.inputs.template = "%s/%s" -datasource.inputs.base_directory = data_dir -datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -The input node and Freesurfer sources declared here will be the main -conduits for the raw data to the rest of the processing pipeline. -""" - -inputnode = pe.Node( - interface=util.IdentityInterface( - fields=["subject_id", "dwi", "bvecs", "bvals", "subjects_dir"]), - name="inputnode") -inputnode.inputs.subjects_dir = subjects_dir - -FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource') -FreeSurferSourceLH = FreeSurferSource.clone('fssourceLH') -FreeSurferSourceLH.inputs.hemi = 'lh' -FreeSurferSourceRH = FreeSurferSource.clone('fssourceRH') -FreeSurferSourceRH.inputs.hemi = 'rh' -""" -Creating the workflow's nodes -============================= - -Conversion nodes ----------------- - -A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. -Nodes are used to convert the following: - - * Original structural image to NIFTI - * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer - * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI - -""" - -mri_convert_Brain = pe.Node( - interface=fs.MRIConvert(), name='mri_convert_Brain') -mri_convert_Brain.inputs.out_type = 'nii' -mri_convert_ROI_scale500 = mri_convert_Brain.clone('mri_convert_ROI_scale500') - -mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') -mris_convertLH.inputs.out_datatype = 'gii' -mris_convertRH = mris_convertLH.clone('mris_convertRH') -mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') -mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') -mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') -mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') -mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') -mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') -mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') -mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') -""" -Diffusion processing nodes --------------------------- - -.. seealso:: - - dmri_mrtrix_dti.py - Tutorial that focuses solely on the MRtrix diffusion processing - - http://www.brain.org.au/software/mrtrix/index.html - MRtrix's online documentation - - - -b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix. -""" - -fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix') -""" -Distortions induced by eddy currents are corrected prior to fitting the tensors. -The first image is used as a reference for which to warp the others. -""" - -eddycorrect = create_eddy_correct_pipeline(name='eddycorrect') -eddycorrect.inputs.inputnode.ref_num = 1 -""" -Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created: - - * Major eigenvector in each voxel - * Apparent diffusion coefficient - * Fractional anisotropy -""" - -dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(), name='dwi2tensor') -tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(), name='tensor2vector') -tensor2adc = pe.Node( - interface=mrtrix.Tensor2ApparentDiffusion(), name='tensor2adc') -tensor2fa = pe.Node( - interface=mrtrix.Tensor2FractionalAnisotropy(), name='tensor2fa') -MRconvert_fa = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert_fa') -MRconvert_fa.inputs.extension = 'nii' -""" - -These nodes are used to create a rough brain mask from the b0 image. -The b0 image is extracted from the original diffusion-weighted image, -put through a simple thresholding routine, and smoothed using a 3x3 median filter. -""" - -MRconvert = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert') -MRconvert.inputs.extract_at_axis = 3 -MRconvert.inputs.extract_at_coordinate = [0] -threshold_b0 = pe.Node(interface=mrtrix.Threshold(), name='threshold_b0') -median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3d') -""" -The brain mask is also used to help identify single-fiber voxels. -This is done by passing the brain mask through two erosion steps, -multiplying the remaining mask with the fractional anisotropy map, and -thresholding the result to obtain some highly anisotropic within-brain voxels. -""" - -erode_mask_firstpass = pe.Node( - interface=mrtrix.Erode(), name='erode_mask_firstpass') -erode_mask_secondpass = pe.Node( - interface=mrtrix.Erode(), name='erode_mask_secondpass') -MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply') -MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') -threshold_FA = pe.Node(interface=mrtrix.Threshold(), name='threshold_FA') -threshold_FA.inputs.absolute_threshold_value = 0.7 -""" -For whole-brain tracking we also require a broad white-matter seed mask. -This is created by generating a white matter mask, given a brainmask, and -thresholding it at a reasonably high level. -""" - -bet = pe.Node(interface=fsl.BET(mask=True), name='bet_b0') -gen_WM_mask = pe.Node( - interface=mrtrix.GenerateWhiteMatterMask(), name='gen_WM_mask') -threshold_wmmask = pe.Node( - interface=mrtrix.Threshold(), name='threshold_wmmask') -threshold_wmmask.inputs.absolute_threshold_value = 0.4 -""" -The spherical deconvolution step depends on the estimate of the response function -in the highly anisotropic voxels we obtained above. - -.. warning:: - - For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps. - -""" - -estimateresponse = pe.Node( - interface=mrtrix.EstimateResponseForSH(), name='estimateresponse') -estimateresponse.inputs.maximum_harmonic_order = 6 -csdeconv = pe.Node( - interface=mrtrix.ConstrainedSphericalDeconvolution(), name='csdeconv') -csdeconv.inputs.maximum_harmonic_order = 6 -""" -Finally, we track probabilistically using the orientation distribution functions obtained earlier. -The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format. -""" - -probCSDstreamtrack = pe.Node( - interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(), - name='probCSDstreamtrack') -probCSDstreamtrack.inputs.inputmodel = 'SD_PROB' -probCSDstreamtrack.inputs.desired_number_of_tracks = 150000 -tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(), name='tracks2prob') -tracks2prob.inputs.colour = True -MRconvert_tracks2prob = MRconvert_fa.clone(name='MRconvert_tracks2prob') -tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk') -trk2tdi = pe.Node(interface=dipy.TrackDensityMap(), name='trk2tdi') -""" -Structural segmentation nodes ------------------------------ - -The following node identifies the transformation between the diffusion-weighted -image and the structural image. This transformation is then applied to the tracts -so that they are in the same space as the regions of interest. -""" - -coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister') -coregister.inputs.cost = ('normmi') -""" -Parcellation is performed given the aparc+aseg image from Freesurfer. -The CMTK Parcellation step subdivides these regions to return a higher-resolution parcellation scheme. -The parcellation used here is entitled "scale500" and returns 1015 regions. -""" - -parcellation_name = 'scale500' -parcellate = pe.Node(interface=cmtk.Parcellate(), name="Parcellate") -parcellate.inputs.parcellation_name = parcellation_name -""" -The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts -and outputs a number of different files. The most important of which is the connectivity network itself, which is stored -as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various -NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and -standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the -specific tracts that connect between user-selected regions. - -Here we choose the Lausanne2008 parcellation scheme, since we are incorporating the CMTK parcellation step. -""" - -parcellation_name = 'scale500' -cmp_config = cmp.configuration.PipelineConfiguration() -cmp_config.parcellation_scheme = "Lausanne2008" -createnodes = pe.Node(interface=cmtk.CreateNodes(), name="CreateNodes") -createnodes.inputs.resolution_network_file = cmp_config._get_lausanne_parcellation( - 'Lausanne2008')[parcellation_name]['node_information_graphml'] - -creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") -creatematrix.inputs.count_region_intersections = True -""" -Next we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use -the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. -The inspect.getfile command is used to package this script into the resulting CFF file, so that it is easy to -look back at the processing parameters that were used. -""" - -CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") -CFFConverter.inputs.script_files = op.abspath( - inspect.getfile(inspect.currentframe())) -giftiSurfaces = pe.Node(interface=util.Merge(9), name="GiftiSurfaces") -giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") -niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") -fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") -gpickledNetworks = pe.Node(interface=util.Merge(2), name="NetworkFiles") -""" -We also create a workflow to calculate several network metrics on our resulting file, and another CFF converter -which will be used to package these networks into a single file. -""" - -networkx = create_networkx_pipeline(name='networkx') -cmats_to_csv = create_cmats_to_csv_pipeline(name='cmats_to_csv') -NxStatsCFFConverter = pe.Node( - interface=cmtk.CFFConverter(), name="NxStatsCFFConverter") -NxStatsCFFConverter.inputs.script_files = op.abspath( - inspect.getfile(inspect.currentframe())) - -tessflow = create_tessellation_flow(name='tessflow', out_format='gii') -tessflow.inputs.inputspec.lookup_file = lookup_file -""" -Connecting the workflow -======================= -Here we connect our processing pipeline. - -Connecting the inputs, FreeSurfer nodes, and conversions --------------------------------------------------------- -""" - -mapping = pe.Workflow(name='mapping') -""" -First, we connect the input node to the FreeSurfer input nodes. -""" - -mapping.connect([(inputnode, FreeSurferSource, [("subjects_dir", - "subjects_dir")])]) -mapping.connect([(inputnode, FreeSurferSource, [("subject_id", - "subject_id")])]) - -mapping.connect([(inputnode, FreeSurferSourceLH, [("subjects_dir", - "subjects_dir")])]) -mapping.connect([(inputnode, FreeSurferSourceLH, [("subject_id", - "subject_id")])]) - -mapping.connect([(inputnode, FreeSurferSourceRH, [("subjects_dir", - "subjects_dir")])]) -mapping.connect([(inputnode, FreeSurferSourceRH, [("subject_id", - "subject_id")])]) - -mapping.connect([(inputnode, tessflow, [("subjects_dir", - "inputspec.subjects_dir")])]) -mapping.connect([(inputnode, tessflow, [("subject_id", - "inputspec.subject_id")])]) - -mapping.connect([(inputnode, parcellate, [("subjects_dir", "subjects_dir")])]) -mapping.connect([(inputnode, parcellate, [("subject_id", "subject_id")])]) -mapping.connect([(parcellate, mri_convert_ROI_scale500, [('roi_file', - 'in_file')])]) -""" -Nifti conversion for subject's stripped brain image from Freesurfer: -""" - -mapping.connect([(FreeSurferSource, mri_convert_Brain, [('brain', - 'in_file')])]) -""" -Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) -""" - -mapping.connect([(FreeSurferSourceLH, mris_convertLH, [('pial', 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRH, [('pial', 'in_file')])]) -mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite, [('white', - 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite, [('white', - 'in_file')])]) -mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated, [('inflated', - 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated, [('inflated', - 'in_file')])]) -mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere, [('sphere', - 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere, [('sphere', - 'in_file')])]) -""" -The annotation files are converted using the pial surface as a map via the MRIsConvert interface. -One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files -specifically (rather than e.g. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. -""" - -mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, [('pial', - 'in_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, [('pial', - 'in_file')])]) -mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, - [(('annot', select_aparc_annot), 'annot_file')])]) -mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, - [(('annot', select_aparc_annot), 'annot_file')])]) -""" -Diffusion Processing --------------------- -Now we connect the tensor computations: -""" - -mapping.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"), - ("bvals", "bval_file")])]) -mapping.connect([(inputnode, eddycorrect, [("dwi", "inputnode.in_file")])]) -mapping.connect([(eddycorrect, dwi2tensor, [("outputnode.eddy_corrected", - "in_file")])]) -mapping.connect([(fsl2mrtrix, dwi2tensor, [("encoding_file", - "encoding_file")])]) - -mapping.connect([ - (dwi2tensor, tensor2vector, [['tensor', 'in_file']]), - (dwi2tensor, tensor2adc, [['tensor', 'in_file']]), - (dwi2tensor, tensor2fa, [['tensor', 'in_file']]), -]) -mapping.connect([(tensor2fa, MRmult_merge, [("FA", "in1")])]) -mapping.connect([(tensor2fa, MRconvert_fa, [("FA", "in_file")])]) -""" -This block creates the rough brain mask to be multiplied, mulitplies it with the -fractional anisotropy image, and thresholds it to get the single-fiber voxels. -""" - -mapping.connect([(eddycorrect, MRconvert, [("outputnode.eddy_corrected", - "in_file")])]) -mapping.connect([(MRconvert, threshold_b0, [("converted", "in_file")])]) -mapping.connect([(threshold_b0, median3d, [("out_file", "in_file")])]) -mapping.connect([(median3d, erode_mask_firstpass, [("out_file", "in_file")])]) -mapping.connect([(erode_mask_firstpass, erode_mask_secondpass, [("out_file", - "in_file")])]) -mapping.connect([(erode_mask_secondpass, MRmult_merge, [("out_file", "in2")])]) -mapping.connect([(MRmult_merge, MRmultiply, [("out", "in_files")])]) -mapping.connect([(MRmultiply, threshold_FA, [("out_file", "in_file")])]) -""" -Here the thresholded white matter mask is created for seeding the tractography. -""" - -mapping.connect([(eddycorrect, bet, [("outputnode.eddy_corrected", - "in_file")])]) -mapping.connect([(eddycorrect, gen_WM_mask, [("outputnode.eddy_corrected", - "in_file")])]) -mapping.connect([(bet, gen_WM_mask, [("mask_file", "binary_mask")])]) -mapping.connect([(fsl2mrtrix, gen_WM_mask, [("encoding_file", - "encoding_file")])]) -mapping.connect([(gen_WM_mask, threshold_wmmask, [("WMprobabilitymap", - "in_file")])]) -""" -Next we estimate the fiber response distribution. -""" - -mapping.connect([(eddycorrect, estimateresponse, [("outputnode.eddy_corrected", - "in_file")])]) -mapping.connect([(fsl2mrtrix, estimateresponse, [("encoding_file", - "encoding_file")])]) -mapping.connect([(threshold_FA, estimateresponse, [("out_file", - "mask_image")])]) -""" -Run constrained spherical deconvolution. -""" - -mapping.connect([(eddycorrect, csdeconv, [("outputnode.eddy_corrected", - "in_file")])]) -mapping.connect([(gen_WM_mask, csdeconv, [("WMprobabilitymap", - "mask_image")])]) -mapping.connect([(estimateresponse, csdeconv, [("response", - "response_file")])]) -mapping.connect([(fsl2mrtrix, csdeconv, [("encoding_file", "encoding_file")])]) -""" -Connect the tractography and compute the tract density image. -""" - -mapping.connect([(threshold_wmmask, probCSDstreamtrack, [("out_file", - "seed_file")])]) -mapping.connect([(csdeconv, probCSDstreamtrack, [("spherical_harmonics_image", - "in_file")])]) -mapping.connect([(probCSDstreamtrack, tracks2prob, [("tracked", "in_file")])]) -mapping.connect([(eddycorrect, tracks2prob, [("outputnode.eddy_corrected", - "template_file")])]) -mapping.connect([(tracks2prob, MRconvert_tracks2prob, [("tract_image", - "in_file")])]) -""" -Structural Processing ---------------------- -First, we coregister the diffusion image to the structural image -""" - -mapping.connect([(eddycorrect, coregister, [("outputnode.eddy_corrected", - "in_file")])]) -mapping.connect([(mri_convert_Brain, coregister, [('out_file', 'reference')])]) -""" -The MRtrix-tracked fibers are converted to TrackVis format (with voxel and data dimensions grabbed from the DWI). -The connectivity matrix is created with the transformed .trk fibers and the parcellation file. -""" - -mapping.connect([(eddycorrect, tck2trk, [("outputnode.eddy_corrected", - "image_file")])]) -mapping.connect([(mri_convert_Brain, tck2trk, [("out_file", - "registration_image_file")])]) -mapping.connect([(coregister, tck2trk, [("out_matrix_file", "matrix_file")])]) -mapping.connect([(probCSDstreamtrack, tck2trk, [("tracked", "in_file")])]) -mapping.connect([(tck2trk, creatematrix, [("out_file", "tract_file")])]) -mapping.connect([(tck2trk, trk2tdi, [("out_file", "in_file")])]) -mapping.connect([(inputnode, creatematrix, [("subject_id", - "out_matrix_file")])]) -mapping.connect([(inputnode, creatematrix, [("subject_id", - "out_matrix_mat_file")])]) -mapping.connect([(parcellate, creatematrix, [("roi_file", "roi_file")])]) -mapping.connect([(parcellate, createnodes, [("roi_file", "roi_file")])]) -mapping.connect([(createnodes, creatematrix, [("node_network", - "resolution_network_file")])]) -""" -The merge nodes defined earlier are used here to create lists of the files which are -destined for the CFFConverter. -""" - -mapping.connect([(mris_convertLH, giftiSurfaces, [("converted", "in1")])]) -mapping.connect([(mris_convertRH, giftiSurfaces, [("converted", "in2")])]) -mapping.connect([(mris_convertLHwhite, giftiSurfaces, [("converted", "in3")])]) -mapping.connect([(mris_convertRHwhite, giftiSurfaces, [("converted", "in4")])]) -mapping.connect([(mris_convertLHinflated, giftiSurfaces, [("converted", - "in5")])]) -mapping.connect([(mris_convertRHinflated, giftiSurfaces, [("converted", - "in6")])]) -mapping.connect([(mris_convertLHsphere, giftiSurfaces, [("converted", - "in7")])]) -mapping.connect([(mris_convertRHsphere, giftiSurfaces, [("converted", - "in8")])]) -mapping.connect([(tessflow, giftiSurfaces, [("outputspec.meshes", "in9")])]) - -mapping.connect([(mris_convertLHlabels, giftiLabels, [("converted", "in1")])]) -mapping.connect([(mris_convertRHlabels, giftiLabels, [("converted", "in2")])]) - -mapping.connect([(parcellate, niftiVolumes, [("roi_file", "in1")])]) -mapping.connect([(eddycorrect, niftiVolumes, [("outputnode.eddy_corrected", - "in2")])]) -mapping.connect([(mri_convert_Brain, niftiVolumes, [("out_file", "in3")])]) - -mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file", "in1")])]) -mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file_mm", - "in2")])]) -mapping.connect([(creatematrix, fiberDataArrays, [("fiber_length_file", - "in3")])]) -mapping.connect([(creatematrix, fiberDataArrays, [("fiber_label_file", - "in4")])]) -""" -This block actually connects the merged lists to the CFF converter. We pass the surfaces -and volumes that are to be included, as well as the tracts and the network itself. The currently -running pipeline (dmri_connectivity_advanced.py) is also scraped and included in the CFF file. This -makes it easy for the user to examine the entire processing pathway used to generate the end -product. -""" - -mapping.connect([(giftiSurfaces, CFFConverter, [("out", "gifti_surfaces")])]) -mapping.connect([(giftiLabels, CFFConverter, [("out", "gifti_labels")])]) -mapping.connect([(creatematrix, CFFConverter, [("matrix_files", - "gpickled_networks")])]) -mapping.connect([(niftiVolumes, CFFConverter, [("out", "nifti_volumes")])]) -mapping.connect([(fiberDataArrays, CFFConverter, [("out", "data_files")])]) -mapping.connect([(creatematrix, CFFConverter, [("filtered_tractographies", - "tract_files")])]) -mapping.connect([(inputnode, CFFConverter, [("subject_id", "title")])]) -""" -The graph theoretical metrics are computed using the networkx workflow and placed in another CFF file -""" - -mapping.connect([(inputnode, networkx, [("subject_id", - "inputnode.extra_field")])]) -mapping.connect([(creatematrix, networkx, [("intersection_matrix_file", - "inputnode.network_file")])]) - -mapping.connect([(networkx, NxStatsCFFConverter, [("outputnode.network_files", - "gpickled_networks")])]) -mapping.connect([(giftiSurfaces, NxStatsCFFConverter, [("out", - "gifti_surfaces")])]) -mapping.connect([(giftiLabels, NxStatsCFFConverter, [("out", - "gifti_labels")])]) -mapping.connect([(niftiVolumes, NxStatsCFFConverter, [("out", - "nifti_volumes")])]) -mapping.connect([(fiberDataArrays, NxStatsCFFConverter, [("out", - "data_files")])]) -mapping.connect([(inputnode, NxStatsCFFConverter, [("subject_id", "title")])]) - -mapping.connect([(inputnode, cmats_to_csv, [("subject_id", - "inputnode.extra_field")])]) -mapping.connect([(creatematrix, cmats_to_csv, - [("matlab_matrix_files", "inputnode.matlab_matrix_files")])]) -""" -Create a higher-level workflow ------------------------------- -Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes -declared at the beginning. Our tutorial is now extensible to any arbitrary number of subjects by simply adding -their names to the subject list and their data to the proper folders. -""" - -connectivity = pe.Workflow(name="connectivity") - -connectivity.base_dir = op.abspath('dmri_connectivity_advanced') -connectivity.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, mapping, - [('dwi', 'inputnode.dwi'), ('bvals', 'inputnode.bvals'), - ('bvecs', 'inputnode.bvecs')]), - (infosource, mapping, [('subject_id', - 'inputnode.subject_id')])]) -""" -The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline. -""" - -if __name__ == '__main__': - connectivity.run() - connectivity.write_graph() diff --git a/examples/dmri_dtk_dti.py b/examples/dmri_dtk_dti.py deleted file mode 100755 index cd02d16391..0000000000 --- a/examples/dmri_dtk_dti.py +++ /dev/null @@ -1,197 +0,0 @@ -#!/usr/bin/env python -""" -================================== -dMRI: DTI - Diffusion Toolkit, FSL -================================== - -A pipeline example that uses several interfaces to perform analysis on -diffusion weighted images using Diffusion Toolkit tools. - -This tutorial is based on the 2010 FSL course and uses data freely available at -the FSL website at: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -More details can be found at -http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/fdt/index.htm - -In order to run this tutorial you need to have Diffusion Toolkit and FSL tools -installed and accessible from matlab/command line. Check by calling fslinfo and -dtk from the command line. - -Tell python where to find the appropriate functions. -""" - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.fsl as fsl # fsl -import nipype.interfaces.diffusion_toolkit as dtk -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import os # system functions -from niflow.nipype1.workflows.dmri.fsl.dti import create_eddy_correct_pipeline -""" -Confirm package dependencies are installed. (This is only for the -tutorial, rarely would you put this in your own code.) -""" - -from nipype.utils.misc import package_check - -package_check('numpy', '1.3', 'tutorial1') -package_check('scipy', '0.7', 'tutorial1') -package_check('IPython', '0.10', 'tutorial1') -""" -Setting up workflows --------------------- -This is a generic workflow for DTI data analysis using the FSL - - -Data specific components ------------------------- - -The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``dwis1`` and ``dwis2``. Each subject directory -contains each of the following files: bvec, bval, diffusion weighted data, a -set of target masks, a seed file, and a transformation matrix. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``dwi`` or ``bvals``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -Specify the subject directories -""" - -subject_list = ['subj1'] -""" -Map field names to individual subject runs -""" - -info = dict( - dwi=[['subject_id', 'data']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.engine.Node` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - -datasource.inputs.template = "%s/%s" - -# This needs to point to the fdt folder you can find after extracting -# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz -datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/') - -datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -Setup for Diffusion Tensor Computation --------------------------------------- -Here we will create a generic workflow for DTI computation -""" - -computeTensor = pe.Workflow(name='computeTensor') -""" -extract the volume with b=0 (nodif_brain) -""" - -fslroi = pe.Node(interface=fsl.ExtractROI(), name='fslroi') -fslroi.inputs.t_min = 0 -fslroi.inputs.t_size = 1 -""" -create a brain mask from the nodif_brain -""" - -bet = pe.Node(interface=fsl.BET(), name='bet') -bet.inputs.mask = True -bet.inputs.frac = 0.34 -""" -correct the diffusion weighted images for eddy_currents -""" - -eddycorrect = create_eddy_correct_pipeline('eddycorrect') -eddycorrect.inputs.inputnode.ref_num = 0 -""" -compute the diffusion tensor in each voxel -""" - -dtifit = pe.Node(interface=dtk.DTIRecon(), name='dtifit') -""" -connect all the nodes for this workflow -""" - -computeTensor.connect([(fslroi, bet, [('roi_file', 'in_file')]), - (eddycorrect, dtifit, [('outputnode.eddy_corrected', - 'DWI')])]) -""" -Setup for Tracktography ------------------------ -Here we will create a workflow to enable deterministic tracktography -""" - -tractography = pe.Workflow(name='tractography') - -dtk_tracker = pe.Node(interface=dtk.DTITracker(), name="dtk_tracker") -dtk_tracker.inputs.invert_x = True - -smooth_trk = pe.Node(interface=dtk.SplineFilter(), name="smooth_trk") -smooth_trk.inputs.step_length = 0.5 -""" -connect all the nodes for this workflow -""" - -tractography.connect([(dtk_tracker, smooth_trk, [('track_file', - 'track_file')])]) -""" -Setup data storage area -""" - -datasink = pe.Node(interface=nio.DataSink(), name='datasink') -datasink.inputs.base_directory = os.path.abspath('dtiresults') - - -def getstripdir(subject_id): - return os.path.join( - os.path.abspath('data/workingdir/dwiproc'), - '_subject_id_%s' % subject_id) - - -""" -Setup the pipeline that combines the 2 workflows: tractography & computeTensor ------------------------------------------------------------------------------- -""" - -dwiproc = pe.Workflow(name="dwiproc") -dwiproc.base_dir = os.path.abspath('dtk_dti_tutorial') -dwiproc.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, computeTensor, - [('dwi', 'fslroi.in_file'), ('bvals', 'dtifit.bvals'), - ('bvecs', 'dtifit.bvecs'), - ('dwi', 'eddycorrect.inputnode.in_file')]), - (computeTensor, tractography, - [('bet.mask_file', 'dtk_tracker.mask1_file'), - ('dtifit.tensor', 'dtk_tracker.tensor_file')])]) - -if __name__ == '__main__': - dwiproc.run() - dwiproc.write_graph() diff --git a/examples/dmri_dtk_odf.py b/examples/dmri_dtk_odf.py deleted file mode 100755 index 42a3b0e03a..0000000000 --- a/examples/dmri_dtk_odf.py +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/env python -""" -==================================== -dMRI: HARDI - Diffusion Toolkit, FSL -==================================== - -A pipeline example that uses several interfaces to perform analysis on -diffusion weighted images using Diffusion Toolkit tools. - -This tutorial is based on the 2010 FSL course and uses data freely available at -the FSL website at: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -More details can be found at -http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/fdt/index.htm - -In order to run this tutorial you need to have Diffusion Toolkit and FSL tools -installed and accessible from matlab/command line. Check by calling fslinfo and -dtk from the command line. - -Tell python where to find the appropriate functions. -""" - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.fsl as fsl # fsl -import nipype.interfaces.diffusion_toolkit as dtk -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import os # system functions -from niflow.nipype1.workflows.dmri.fsl.dti import create_eddy_correct_pipeline -""" -Confirm package dependencies are installed. (This is only for the -tutorial, rarely would you put this in your own code.) -""" - -from nipype.utils.misc import package_check - -package_check('numpy', '1.3', 'tutorial1') -package_check('scipy', '0.7', 'tutorial1') -package_check('IPython', '0.10', 'tutorial1') -""" -Setting up workflows --------------------- -This is a generic workflow for DTI data analysis using the FSL - - -Data specific components ------------------------- - -The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``dwis1`` and ``dwis2``. Each subject directory -contains each of the following files: bvec, bval, diffusion weighted data, a -set of target masks, a seed file, and a transformation matrix. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``dwi`` or ``bvals``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -Specify the subject directories -""" - -subject_list = ['siemens_hardi_test'] -""" -Map field names to individual subject runs -""" - -info = dict( - dwi=[['subject_id', 'siemens_hardi_test_data']], - bvecs=[['subject_id', 'siemens_hardi_test_data.bvec']], - bvals=[['subject_id', 'siemens_hardi_test_data.bval']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.engine.Node` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - -datasource.inputs.template = "%s/%s" - -# This needs to point to the fdt folder you can find after extracting -# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz -datasource.inputs.base_directory = os.path.abspath('data') - -datasource.inputs.field_template = dict(dwi='%s/%s.nii') -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -Setup for ODF Computation --------------------------------------- -Here we will create a generic workflow for ODF computation -""" - -compute_ODF = pe.Workflow(name='compute_ODF') -""" -extract the volume with b=0 (nodif_brain) -""" - -fslroi = pe.Node(interface=fsl.ExtractROI(), name='fslroi') -fslroi.inputs.t_min = 0 -fslroi.inputs.t_size = 1 -""" -create a brain mask from the nodif_brain -""" - -bet = pe.Node(interface=fsl.BET(), name='bet') -bet.inputs.mask = True -bet.inputs.frac = 0.34 -""" -correct the diffusion weighted images for eddy_currents -""" - -eddycorrect = create_eddy_correct_pipeline('eddycorrect') -eddycorrect.inputs.inputnode.ref_num = 0 - -hardi_mat = pe.Node(interface=dtk.HARDIMat(), name='hardi_mat') - -odf_recon = pe.Node(interface=dtk.ODFRecon(), name='odf_recon') -""" -connect all the nodes for this workflow -""" - -compute_ODF.connect( - [(fslroi, bet, [('roi_file', 'in_file')]), - (eddycorrect, odf_recon, [('outputnode.eddy_corrected', 'DWI')]), - (eddycorrect, hardi_mat, - [('outputnode.eddy_corrected', - 'reference_file')]), (hardi_mat, odf_recon, [('out_file', 'matrix')])]) -""" -Setup for Tracktography ------------------------ -Here we will create a workflow to enable deterministic tracktography -""" - -tractography = pe.Workflow(name='tractography') - -odf_tracker = pe.Node(interface=dtk.ODFTracker(), name="odf_tracker") - -smooth_trk = pe.Node(interface=dtk.SplineFilter(), name="smooth_trk") -smooth_trk.inputs.step_length = 1 -""" -connect all the nodes for this workflow -""" - -tractography.connect([(odf_tracker, smooth_trk, [('track_file', - 'track_file')])]) -""" -Setup the pipeline that combines the 2 workflows: tractography and compute_ODF ------------------------------------------------------------------------------- -""" - -dwiproc = pe.Workflow(name="dwiproc") -dwiproc.base_dir = os.path.abspath('dtk_odf_tutorial') -dwiproc.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, compute_ODF, - [('dwi', 'fslroi.in_file'), ('bvals', 'hardi_mat.bvals'), - ('bvecs', 'hardi_mat.bvecs'), - ('dwi', 'eddycorrect.inputnode.in_file')]), - (compute_ODF, tractography, - [('bet.mask_file', 'odf_tracker.mask1_file'), - ('odf_recon.ODF', 'odf_tracker.ODF'), - ('odf_recon.max', 'odf_tracker.max')])]) - -dwiproc.inputs.compute_ODF.hardi_mat.oblique_correction = True -dwiproc.inputs.compute_ODF.odf_recon.n_directions = 31 -dwiproc.inputs.compute_ODF.odf_recon.n_b0 = 5 -dwiproc.inputs.compute_ODF.odf_recon.n_output_directions = 181 - -if __name__ == '__main__': - dwiproc.run() - dwiproc.write_graph() diff --git a/examples/dmri_fsl_dti.py b/examples/dmri_fsl_dti.py deleted file mode 100755 index ffd114d2b3..0000000000 --- a/examples/dmri_fsl_dti.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env python -""" -============== -dMRI: DTI, FSL -============== - -A pipeline example that uses several interfaces to perform analysis on -diffusion weighted images using FSL FDT tools. - -This tutorial is based on the 2010 FSL course and uses data freely available at -the FSL website at: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -More details can be found at -http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/fdt/index.htm - -In order to run this tutorial you need to have fsl tools installed and -accessible from matlab/command line. Check by calling fslinfo from the command -line. - -Tell python where to find the appropriate functions. -""" - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.fsl as fsl # fsl -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import os # system functions -from niflow.nipype1.workflows.dmri.fsl.dti import create_eddy_correct_pipeline,\ - create_bedpostx_pipeline -""" -Confirm package dependencies are installed. (This is only for the -tutorial, rarely would you put this in your own code.) -""" - -from nipype.utils.misc import package_check - -package_check('numpy', '1.3', 'tutorial1') -package_check('scipy', '0.7', 'tutorial1') -package_check('IPython', '0.10', 'tutorial1') -""" -Setting up workflows --------------------- - -This is a generic workflow for DTI data analysis using the FSL - -Data specific components ------------------------- - -The nipype tutorial contains data for two subjects. Subject data is in two -subdirectories, ``dwis1`` and ``dwis2``. Each subject directory contains each -of the following files: bvec, bval, diffusion weighted data, a set of target -masks, a seed file, and a transformation matrix. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``dwi`` or ``bvals``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -Specify the subject directories -""" - -subject_list = ['subj1'] -""" -Map field names to individual subject runs -""" - -info = dict( - dwi=[['subject_id', 'data']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']], - seed_file=[['subject_id', 'MASK_average_thal_right']], - target_masks=[[ - 'subject_id', [ - 'MASK_average_M1_right', 'MASK_average_S1_right', - 'MASK_average_occipital_right', 'MASK_average_pfc_right', - 'MASK_average_pmc_right', 'MASK_average_ppc_right', - 'MASK_average_temporal_right' - ] - ]]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -""" -Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.engine.Node` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - -datasource.inputs.template = "%s/%s" - -# This needs to point to the fdt folder you can find after extracting -# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz -datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/') - -datasource.inputs.field_template = dict( - dwi='%s/%s.nii.gz', - seed_file="%s.bedpostX/%s.nii.gz", - target_masks="%s.bedpostX/%s.nii.gz") -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -Setup for Diffusion Tensor Computation --------------------------------------- - -Here we will create a generic workflow for DTI computation -""" - -computeTensor = pe.Workflow(name='computeTensor') -""" -extract the volume with b=0 (nodif_brain) -""" - -fslroi = pe.Node(interface=fsl.ExtractROI(), name='fslroi') -fslroi.inputs.t_min = 0 -fslroi.inputs.t_size = 1 -""" -create a brain mask from the nodif_brain -""" - -bet = pe.Node(interface=fsl.BET(), name='bet') -bet.inputs.mask = True -bet.inputs.frac = 0.34 -""" -correct the diffusion weighted images for eddy_currents -""" - -eddycorrect = create_eddy_correct_pipeline('eddycorrect') -eddycorrect.inputs.inputnode.ref_num = 0 -""" -compute the diffusion tensor in each voxel -""" - -dtifit = pe.Node(interface=fsl.DTIFit(), name='dtifit') -""" -connect all the nodes for this workflow -""" - -computeTensor.connect( - [(fslroi, bet, [('roi_file', 'in_file')]), - (eddycorrect, dtifit, [('outputnode.eddy_corrected', 'dwi')]), - (infosource, dtifit, - [['subject_id', 'base_name']]), (bet, dtifit, [('mask_file', 'mask')])]) -""" -Setup for Tracktography ------------------------ - -Here we will create a workflow to enable probabilistic tracktography -and hard segmentation of the seed region -""" - -tractography = pe.Workflow(name='tractography') -tractography.base_dir = os.path.abspath('fsl_dti_tutorial') -""" -estimate the diffusion parameters: phi, theta, and so on -""" - -bedpostx = create_bedpostx_pipeline() -bedpostx.get_node("xfibres").iterables = ("n_fibres", [1, 2]) - -flirt = pe.Node(interface=fsl.FLIRT(), name='flirt') -flirt.inputs.in_file = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz') -flirt.inputs.dof = 12 -""" -perform probabilistic tracktography -""" - -probtrackx = pe.Node(interface=fsl.ProbTrackX(), name='probtrackx') -probtrackx.inputs.mode = 'seedmask' -probtrackx.inputs.c_thresh = 0.2 -probtrackx.inputs.n_steps = 2000 -probtrackx.inputs.step_length = 0.5 -probtrackx.inputs.n_samples = 5000 -probtrackx.inputs.opd = True -probtrackx.inputs.os2t = True -probtrackx.inputs.loop_check = True -""" -perform hard segmentation on the output of probtrackx -""" - -findthebiggest = pe.Node(interface=fsl.FindTheBiggest(), name='findthebiggest') -""" -connect all the nodes for this workflow -""" - -tractography.add_nodes([bedpostx, flirt]) -tractography.connect([(bedpostx, probtrackx, - [('outputnode.thsamples', - 'thsamples'), ('outputnode.phsamples', 'phsamples'), - ('outputnode.fsamples', 'fsamples')]), - (probtrackx, findthebiggest, [('targets', 'in_files')]), - (flirt, probtrackx, [('out_matrix_file', 'xfm')])]) -""" -Setup data storage area -""" - -datasink = pe.Node(interface=nio.DataSink(), name='datasink') -datasink.inputs.base_directory = os.path.abspath('dtiresults') - - -def getstripdir(subject_id): - import os - return os.path.join( - os.path.abspath('data/workingdir/dwiproc'), - '_subject_id_%s' % subject_id) - - -""" -Setup the pipeline that combines the 2 workflows: tractography & computeTensor ------------------------------------------------------------------------------- -""" - -dwiproc = pe.Workflow(name="dwiproc") -dwiproc.base_dir = os.path.abspath('fsl_dti_tutorial') -dwiproc.connect( - [(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, computeTensor, - [('dwi', 'fslroi.in_file'), ('bvals', 'dtifit.bvals'), - ('bvecs', 'dtifit.bvecs'), ('dwi', 'eddycorrect.inputnode.in_file')]), - (datasource, tractography, - [('bvals', 'bedpostx.inputnode.bvals'), - ('bvecs', 'bedpostx.inputnode.bvecs'), ('seed_file', 'probtrackx.seed'), - ('target_masks', 'probtrackx.target_masks')]), - (computeTensor, tractography, - [('eddycorrect.outputnode.eddy_corrected', 'bedpostx.inputnode.dwi'), - ('bet.mask_file', 'bedpostx.inputnode.mask'), ('bet.mask_file', - 'probtrackx.mask'), - ('fslroi.roi_file', 'flirt.reference')]), (infosource, datasink, [ - ('subject_id', 'container'), (('subject_id', getstripdir), - 'strip_dir') - ]), (tractography, datasink, [('findthebiggest.out_file', - 'fbiggest.@biggestsegmentation')])]) - -if __name__ == '__main__': - dwiproc.run() - dwiproc.write_graph() diff --git a/examples/dmri_group_connectivity_camino.py b/examples/dmri_group_connectivity_camino.py deleted file mode 100644 index 8dbceb606c..0000000000 --- a/examples/dmri_group_connectivity_camino.py +++ /dev/null @@ -1,168 +0,0 @@ -""" -================================================== -dMRI: Group connectivity - Camino, FSL, FreeSurfer -================================================== - -Introduction -============ - -This script, dmri_group_connectivity_camino.py, runs group-based connectivity analysis using -the dmri.camino.connectivity_mapping Nipype workflow. Further detail on the processing can be -found in :doc:`dmri_connectivity`. This tutorial can be run using:: - - python dmri_group_connectivity_camino.py - -We perform this analysis using one healthy subject and two subjects who suffer from Parkinson's disease. - -The whole package (960 mb as .tar.gz / 1.3 gb uncompressed) including the Freesurfer directories for these subjects, can be acquired from here: - - * http://db.tt/b6F1t0QV - -A data package containing the outputs of this pipeline can be obtained from here: - - * http://db.tt/kNvAI751 - -Along with Camino, Camino-Trackvis, FSL, and Freesurfer, you must also have the Connectome File Format -library installed as well as the Connectome Mapper. - - * Camino: http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php?n=Main.HomePage - * Camino-Trackvis: http://www.nitrc.org/projects/camino-trackvis/ - * FSL: http://www.fmrib.ox.ac.uk/fsl/ - * Freesurfer: http://surfer.nmr.mgh.harvard.edu/ - * CTMK: http://www.cmtk.org/ - * CFF: sudo apt-get install python-cfflib - -Or on github at: - - * CFFlib: https://github.com/LTS5/cfflib - * CMP: https://github.com/LTS5/cmp - -Output data can be visualized in ConnectomeViewer, TrackVis, -and anything that can view Nifti files. - - * ConnectomeViewer: https://github.com/LTS5/connectomeviewer - * TrackVis: http://trackvis.org/ - -The fiber data is available in Numpy arrays, and the connectivity matrix -is also produced as a MATLAB matrix. - - - -Import the workflows --------------------- -First, we import the necessary modules from nipype. -""" - -import nipype.interfaces.fsl as fsl -import nipype.interfaces.freesurfer as fs # freesurfer -import os.path as op # system functions -import cmp -from niflow.nipype1.workflows.dmri.camino.group_connectivity import create_group_connectivity_pipeline -from niflow.nipype1.workflows.dmri.connectivity.group_connectivity import ( - create_merge_networks_by_group_workflow, - create_merge_group_networks_workflow, - create_average_networks_by_group_workflow) -""" -Set the proper directories --------------------------- -First, we import the necessary modules from nipype. -""" - -fs_dir = op.abspath('/usr/local/freesurfer') -subjects_dir = op.abspath('groupcondatapackage/subjects/') -data_dir = op.abspath('groupcondatapackage/data/') -fs.FSCommand.set_default_subjects_dir(subjects_dir) -fsl.FSLCommand.set_default_output_type('NIFTI') -""" -Define the groups ------------------ -Here we define the groups for this study. We would like to search for differences between the healthy subject and the two -vegetative patients. The group list is defined as a Python dictionary (see http://docs.python.org/tutorial/datastructures.html), -with group IDs ('controls', 'parkinsons') as keys, and subject/patient names as values. We set the main output directory as 'groupcon'. -""" - -group_list = {} -group_list['controls'] = ['cont17'] -group_list['parkinsons'] = ['pat10', 'pat20'] -""" -The output directory must be named as well. -""" - -global output_dir -output_dir = op.abspath('dmri_group_connectivity_camino') -""" -Main processing loop -==================== -The title for the final grouped-network connectome file is dependent on the group names. The resulting file for this example -is 'parkinsons-controls.cff'. The following code implements the format a-b-c-...x.cff for an arbitary number of groups. - -.. warning:: - - The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'. - The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme. - -This line creates the processing workflow given the information input about the groups and subjects. - -.. seealso:: - - * nipype/workflows/dmri/mrtrix/group_connectivity.py - * nipype/workflows/dmri/camino/connectivity_mapping.py - * :doc:`dmri_connectivity` - -The purpose of the second-level workflow is simple: It is used to merge each -subject's CFF file into one, so that there is a single file containing all of the -networks for each group. This can be useful for performing Network Brain Statistics -using the NBS plugin in ConnectomeViewer. - -.. seealso:: - - http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html - -""" - -title = '' -for idx, group_id in enumerate(group_list.keys()): - title += group_id - if not idx == len(list(group_list.keys())) - 1: - title += '-' - - info = dict( - dwi=[['subject_id', 'dti']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) - - l1pipeline = create_group_connectivity_pipeline( - group_list, group_id, data_dir, subjects_dir, output_dir, info) - - # Here we define the parcellation scheme and the number of tracks to produce - parcellation_scheme = 'NativeFreesurfer' - cmp_config = cmp.configuration.PipelineConfiguration() - cmp_config.parcellation_scheme = parcellation_scheme - l1pipeline.inputs.connectivity.inputnode.resolution_network_file = cmp_config._get_lausanne_parcellation( - parcellation_scheme)['freesurferaparc']['node_information_graphml'] - - l1pipeline.run() - l1pipeline.write_graph(format='eps', graph2use='flat') - - # The second-level pipeline is created here - l2pipeline = create_merge_networks_by_group_workflow( - group_list, group_id, data_dir, subjects_dir, output_dir) - l2pipeline.run() - l2pipeline.write_graph(format='eps', graph2use='flat') -""" -Now that the for loop is complete there are two grouped CFF files each containing the appropriate subjects. -It is also convenient to have every subject in a single CFF file, so that is what the third-level pipeline does. -""" - -l3pipeline = create_merge_group_networks_workflow( - group_list, data_dir, subjects_dir, output_dir, title) -l3pipeline.run() -l3pipeline.write_graph(format='eps', graph2use='flat') -""" -The fourth and final workflow averages the networks and saves them in another CFF file -""" - -l4pipeline = create_average_networks_by_group_workflow( - group_list, data_dir, subjects_dir, output_dir, title) -l4pipeline.run() -l4pipeline.write_graph(format='eps', graph2use='flat') diff --git a/examples/dmri_group_connectivity_mrtrix.py b/examples/dmri_group_connectivity_mrtrix.py deleted file mode 100644 index cbe7ef7003..0000000000 --- a/examples/dmri_group_connectivity_mrtrix.py +++ /dev/null @@ -1,184 +0,0 @@ -""" -================================================== -dMRI: Group connectivity - MRtrix, FSL, FreeSurfer -================================================== - -Introduction -============ - -This script, dmri_group_connectivity_mrtrix.py, runs group-based connectivity analysis using -the dmri.mrtrix.connectivity_mapping Nipype workflow. Further detail on the processing can be -found in :doc:`dmri_connectivity_advanced`. This tutorial can be run using:: - - python dmri_group_connectivity_mrtrix.py - -We perform this analysis using one healthy subject and two subjects who suffer from Parkinson's disease. - -The whole package (960 mb as .tar.gz / 1.3 gb uncompressed) including the Freesurfer directories for these subjects, can be acquired from here: - - * http://db.tt/b6F1t0QV - -A data package containing the outputs of this pipeline can be obtained from here: - - * http://db.tt/elmMnIt1 - -Along with MRtrix, FSL, and Freesurfer, you must also have the Connectome File Format -library installed as well as the Connectome Mapper (cmp). - - * MRtrix: http://www.brain.org.au/software/mrtrix/ - * FSL: http://www.fmrib.ox.ac.uk/fsl/ - * Freesurfer: http://surfer.nmr.mgh.harvard.edu/ - * CTMK: http://www.cmtk.org/ - * CFF: sudo apt-get install python-cfflib - -Or on github at: - - * CFFlib: https://github.com/LTS5/cfflib - * CMP: https://github.com/LTS5/cmp - -Output data can be visualized in ConnectomeViewer, TrackVis, Gephi, -the MRtrix Viewer (mrview), and anything that can view Nifti files. - - * ConnectomeViewer: https://github.com/LTS5/connectomeviewer - * TrackVis: http://trackvis.org/ - * Gephi: http://gephi.org/ - -The fiber data is available in Numpy arrays, and the connectivity matrix -is also produced as a MATLAB matrix. - - - -Import the workflows --------------------- -First, we import the necessary modules from nipype. -""" - -import nipype.interfaces.fsl as fsl -import nipype.interfaces.freesurfer as fs # freesurfer -import os.path as op # system functions -import cmp -from niflow.nipype1.workflows.dmri.mrtrix.group_connectivity import create_group_connectivity_pipeline -from niflow.nipype1.workflows.dmri.connectivity.group_connectivity import ( - create_merge_network_results_by_group_workflow, - create_merge_group_network_results_workflow, - create_average_networks_by_group_workflow) -""" -Set the proper directories --------------------------- -First, we import the necessary modules from nipype. -""" - -subjects_dir = op.abspath('groupcondatapackage/subjects/') -data_dir = op.abspath('groupcondatapackage/data/') -fs.FSCommand.set_default_subjects_dir(subjects_dir) -fsl.FSLCommand.set_default_output_type('NIFTI') -""" -Define the groups ------------------ -Here we define the groups for this study. We would like to search for differences between the healthy subject and the two -vegetative patients. The group list is defined as a Python dictionary (see http://docs.python.org/tutorial/datastructures.html), -with group IDs ('controls', 'parkinsons') as keys, and subject/patient names as values. We set the main output directory as 'groupcon'. -""" - -group_list = {} -group_list['controls'] = ['cont17'] -group_list['parkinsons'] = ['pat10', 'pat20'] -""" -The output directory must be named as well. -""" - -global output_dir -output_dir = op.abspath('dmri_group_connectivity_mrtrix') -""" -Main processing loop -==================== -The title for the final grouped-network connectome file is dependent on the group names. The resulting file for this example -is 'parkinsons-controls.cff'. The following code implements the format a-b-c-...x.cff for an arbitary number of groups. - -.. warning:: - - The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dti'. - The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme. - -The workflow is created given the information input about the groups and subjects. - -.. seealso:: - - * nipype/workflows/dmri/mrtrix/group_connectivity.py - * nipype/workflows/dmri/mrtrix/connectivity_mapping.py - * :doc:`dmri_connectivity_advanced` - -We set values for absolute threshold used on the fractional anisotropy map. This is done -in order to identify single-fiber voxels. In brains with more damage, however, it may be necessary -to reduce the threshold, since their brains are have lower average fractional anisotropy values. - -We invert the b-vectors in the encoding file, and set the maximum harmonic order -of the pre-tractography spherical deconvolution step. This is done to show -how to set inputs that will affect both groups. - -Next we create and run the second-level pipeline. The purpose of this workflow is simple: -It is used to merge each subject's CFF file into one, so that there is a single file containing -all of the networks for each group. This can be useful for performing Network Brain Statistics -using the NBS plugin in ConnectomeViewer. - -.. seealso:: - - http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html - -""" - -title = '' -for idx, group_id in enumerate(group_list.keys()): - title += group_id - if not idx == len(list(group_list.keys())) - 1: - title += '-' - - info = dict( - dwi=[['subject_id', 'dti']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) - - l1pipeline = create_group_connectivity_pipeline( - group_list, group_id, data_dir, subjects_dir, output_dir, info) - - # Here with invert the b-vectors in the Y direction and set the maximum harmonic order of the - # spherical deconvolution step - l1pipeline.inputs.connectivity.mapping.fsl2mrtrix.invert_y = True - l1pipeline.inputs.connectivity.mapping.csdeconv.maximum_harmonic_order = 6 - - # Here we define the parcellation scheme and the number of tracks to produce - parcellation_name = 'scale500' - l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name - cmp_config = cmp.configuration.PipelineConfiguration() - cmp_config.parcellation_scheme = "Lausanne2008" - l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation( - 'Lausanne2008')[parcellation_name]['node_information_graphml'] - l1pipeline.inputs.connectivity.mapping.probCSDstreamtrack.desired_number_of_tracks = 100000 - - l1pipeline.run() - l1pipeline.write_graph(format='eps', graph2use='flat') - - # The second-level pipeline is created here - l2pipeline = create_merge_network_results_by_group_workflow( - group_list, group_id, data_dir, subjects_dir, output_dir) - l2pipeline.inputs.l2inputnode.network_file = cmp_config._get_lausanne_parcellation( - 'Lausanne2008')[parcellation_name]['node_information_graphml'] - l2pipeline.run() - l2pipeline.write_graph(format='eps', graph2use='flat') -""" -Now that the for loop is complete there are two grouped CFF files each containing the appropriate subjects. -It is also convenient to have every subject in a single CFF file, so that is what the third-level pipeline does. -""" - -l3pipeline = create_merge_group_network_results_workflow( - group_list, data_dir, subjects_dir, output_dir, title) -l3pipeline.run() -l3pipeline.write_graph(format='eps', graph2use='flat') -""" -The fourth and final workflow averages the networks and saves them in another CFF file -""" - -l4pipeline = create_average_networks_by_group_workflow( - group_list, data_dir, subjects_dir, output_dir, title) -l4pipeline.run() -l4pipeline.write_graph(format='eps', graph2use='flat') diff --git a/examples/dmri_mrtrix_dti.py b/examples/dmri_mrtrix_dti.py deleted file mode 100755 index 065c5ac99f..0000000000 --- a/examples/dmri_mrtrix_dti.py +++ /dev/null @@ -1,271 +0,0 @@ -#!/usr/bin/env python -""" -======================= -dMRI: DTI - MRtrix, FSL -======================= - -Introduction -============ - -This script, dmri_mrtrix_dti.py, demonstrates the ability to perform advanced diffusion analysis -in a Nipype pipeline:: - - python dmri_mrtrix_dti.py - -We perform this analysis using the FSL course data, which can be acquired from here: - - * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -Import necessary modules from nipype. -""" - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.interfaces.mrtrix as mrtrix # <---- The important new part! -import nipype.interfaces.fsl as fsl -import nipype.algorithms.misc as misc -import os -import os.path as op # system functions - -fsl.FSLCommand.set_default_output_type('NIFTI') -""" -This needs to point to the fdt folder you can find after extracting - - * http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -""" - -data_dir = op.abspath(op.join(op.curdir, 'exdata/')) -subject_list = ['subj1'] -""" -Use infosource node to loop through the subject list and define the input files. -For our purposes, these are the diffusion-weighted MR image, b vectors, and b values. -""" - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -infosource.iterables = ('subject_id', subject_list) - -info = dict( - dwi=[['subject_id', 'data']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']]) -""" -Use datasource node to perform the actual data grabbing. -Templates for the associated images are used to obtain the correct images. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - -datasource.inputs.template = "%s/%s" -datasource.inputs.base_directory = data_dir -datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -An inputnode is used to pass the data obtained by the data grabber to the actual processing functions -""" - -inputnode = pe.Node( - interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), - name="inputnode") -""" -Diffusion processing nodes --------------------------- - -.. seealso:: - - dmri_connectivity_advanced.py - Tutorial with further detail on using MRtrix tractography for connectivity analysis - - http://www.brain.org.au/software/mrtrix/index.html - MRtrix's online documentation - -b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix. -""" - -fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix') -""" -Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created: - * Major eigenvector in each voxel - * Apparent diffusion coefficient - * Fractional anisotropy - -""" - -gunzip = pe.Node(interface=misc.Gunzip(), name='gunzip') -dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(), name='dwi2tensor') -tensor2vector = pe.Node(interface=mrtrix.Tensor2Vector(), name='tensor2vector') -tensor2adc = pe.Node( - interface=mrtrix.Tensor2ApparentDiffusion(), name='tensor2adc') -tensor2fa = pe.Node( - interface=mrtrix.Tensor2FractionalAnisotropy(), name='tensor2fa') -""" -These nodes are used to create a rough brain mask from the b0 image. -The b0 image is extracted from the original diffusion-weighted image, -put through a simple thresholding routine, and smoothed using a 3x3 median filter. -""" - -MRconvert = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert') -MRconvert.inputs.extract_at_axis = 3 -MRconvert.inputs.extract_at_coordinate = [0] -threshold_b0 = pe.Node(interface=mrtrix.Threshold(), name='threshold_b0') -median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3d') -""" -The brain mask is also used to help identify single-fiber voxels. -This is done by passing the brain mask through two erosion steps, -multiplying the remaining mask with the fractional anisotropy map, and -thresholding the result to obtain some highly anisotropic within-brain voxels. -""" - -erode_mask_firstpass = pe.Node( - interface=mrtrix.Erode(), name='erode_mask_firstpass') -erode_mask_secondpass = pe.Node( - interface=mrtrix.Erode(), name='erode_mask_secondpass') -MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply') -MRmult_merge = pe.Node(interface=util.Merge(2), name="MRmultiply_merge") -threshold_FA = pe.Node(interface=mrtrix.Threshold(), name='threshold_FA') -threshold_FA.inputs.absolute_threshold_value = 0.7 -""" -For whole-brain tracking we also require a broad white-matter seed mask. -This is created by generating a white matter mask, given a brainmask, and -thresholding it at a reasonably high level. -""" - -bet = pe.Node(interface=fsl.BET(mask=True), name='bet_b0') -gen_WM_mask = pe.Node( - interface=mrtrix.GenerateWhiteMatterMask(), name='gen_WM_mask') -threshold_wmmask = pe.Node( - interface=mrtrix.Threshold(), name='threshold_wmmask') -threshold_wmmask.inputs.absolute_threshold_value = 0.4 -""" -The spherical deconvolution step depends on the estimate of the response function -in the highly anisotropic voxels we obtained above. - -.. warning:: - - For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps. - -""" - -estimateresponse = pe.Node( - interface=mrtrix.EstimateResponseForSH(), name='estimateresponse') -estimateresponse.inputs.maximum_harmonic_order = 6 -csdeconv = pe.Node( - interface=mrtrix.ConstrainedSphericalDeconvolution(), name='csdeconv') -csdeconv.inputs.maximum_harmonic_order = 6 -""" -Finally, we track probabilistically using the orientation distribution functions obtained earlier. -The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format. -""" - -probCSDstreamtrack = pe.Node( - interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(), - name='probCSDstreamtrack') -probCSDstreamtrack.inputs.inputmodel = 'SD_PROB' -probCSDstreamtrack.inputs.maximum_number_of_tracks = 150000 -tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(), name='tracks2prob') -tracks2prob.inputs.colour = True -tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk') -""" -Creating the workflow ---------------------- -In this section we connect the nodes for the diffusion processing. -""" - -tractography = pe.Workflow(name='tractography') - -tractography.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"), - ("bvals", "bval_file")])]) -tractography.connect([(inputnode, gunzip, [("dwi", "in_file")])]) -tractography.connect([(gunzip, dwi2tensor, [("out_file", "in_file")])]) -tractography.connect([(fsl2mrtrix, dwi2tensor, [("encoding_file", - "encoding_file")])]) - -tractography.connect([ - (dwi2tensor, tensor2vector, [['tensor', 'in_file']]), - (dwi2tensor, tensor2adc, [['tensor', 'in_file']]), - (dwi2tensor, tensor2fa, [['tensor', 'in_file']]), -]) -tractography.connect([(tensor2fa, MRmult_merge, [("FA", "in1")])]) -""" -This block creates the rough brain mask to be multiplied, mulitplies it with the -fractional anisotropy image, and thresholds it to get the single-fiber voxels. -""" - -tractography.connect([(gunzip, MRconvert, [("out_file", "in_file")])]) -tractography.connect([(MRconvert, threshold_b0, [("converted", "in_file")])]) -tractography.connect([(threshold_b0, median3d, [("out_file", "in_file")])]) -tractography.connect([(median3d, erode_mask_firstpass, [("out_file", - "in_file")])]) -tractography.connect([(erode_mask_firstpass, erode_mask_secondpass, - [("out_file", "in_file")])]) -tractography.connect([(erode_mask_secondpass, MRmult_merge, [("out_file", - "in2")])]) -tractography.connect([(MRmult_merge, MRmultiply, [("out", "in_files")])]) -tractography.connect([(MRmultiply, threshold_FA, [("out_file", "in_file")])]) -""" -Here the thresholded white matter mask is created for seeding the tractography. -""" - -tractography.connect([(gunzip, bet, [("out_file", "in_file")])]) -tractography.connect([(gunzip, gen_WM_mask, [("out_file", "in_file")])]) -tractography.connect([(bet, gen_WM_mask, [("mask_file", "binary_mask")])]) -tractography.connect([(fsl2mrtrix, gen_WM_mask, [("encoding_file", - "encoding_file")])]) -tractography.connect([(gen_WM_mask, threshold_wmmask, [("WMprobabilitymap", - "in_file")])]) -""" -Next we estimate the fiber response distribution. -""" - -tractography.connect([(gunzip, estimateresponse, [("out_file", "in_file")])]) -tractography.connect([(fsl2mrtrix, estimateresponse, [("encoding_file", - "encoding_file")])]) -tractography.connect([(threshold_FA, estimateresponse, [("out_file", - "mask_image")])]) -""" -Run constrained spherical deconvolution. -""" - -tractography.connect([(gunzip, csdeconv, [("out_file", "in_file")])]) -tractography.connect([(gen_WM_mask, csdeconv, [("WMprobabilitymap", - "mask_image")])]) -tractography.connect([(estimateresponse, csdeconv, [("response", - "response_file")])]) -tractography.connect([(fsl2mrtrix, csdeconv, [("encoding_file", - "encoding_file")])]) -""" -Connect the tractography and compute the tract density image. -""" - -tractography.connect([(threshold_wmmask, probCSDstreamtrack, [("out_file", - "seed_file")])]) -tractography.connect([(csdeconv, probCSDstreamtrack, - [("spherical_harmonics_image", "in_file")])]) -tractography.connect([(probCSDstreamtrack, tracks2prob, [("tracked", - "in_file")])]) -tractography.connect([(gunzip, tracks2prob, [("out_file", "template_file")])]) - -tractography.connect([(gunzip, tck2trk, [("out_file", "image_file")])]) -tractography.connect([(probCSDstreamtrack, tck2trk, [("tracked", "in_file")])]) -""" -Finally, we create another higher-level workflow to connect our tractography workflow with the info and datagrabbing nodes -declared at the beginning. Our tutorial is now extensible to any arbitrary number of subjects by simply adding -their names to the subject list and their data to the proper folders. -""" - -dwiproc = pe.Workflow(name="dwiproc") -dwiproc.base_dir = os.path.abspath('dmri_mrtrix_dti') -dwiproc.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, tractography, - [('dwi', 'inputnode.dwi'), ('bvals', 'inputnode.bvals'), - ('bvecs', 'inputnode.bvecs')])]) - -if __name__ == '__main__': - dwiproc.run() - dwiproc.write_graph() diff --git a/examples/dmri_preprocessing.py b/examples/dmri_preprocessing.py deleted file mode 100644 index 1efc4e2e05..0000000000 --- a/examples/dmri_preprocessing.py +++ /dev/null @@ -1,171 +0,0 @@ -# coding: utf-8 -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -=================== -dMRI: Preprocessing -=================== - -Introduction -============ - -This script, dmri_preprocessing.py, demonstrates how to prepare dMRI data -for tractography and connectivity analysis with nipype. - -We perform this analysis using the FSL course data, which can be acquired from -here: http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz - -Can be executed in command line using ``python dmri_preprocessing.py`` - - -Import necessary modules from nipype. -""" - -import os # system functions -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.utility as niu # utility -import nipype.algorithms.misc as misc - -import nipype.pipeline.engine as pe # pypeline engine - -from nipype.interfaces import fsl -from nipype.interfaces import ants -""" -Load specific nipype's workflows for preprocessing of dMRI data: -:class:`niflow.nipype1.workflows.dmri.preprocess.epi.all_peb_pipeline`, -as data include a *b0* volume with reverse encoding direction -(*P>>>A*, or *y*), in contrast with the general acquisition encoding -that is *A>>>P* or *-y* (in RAS systems). -""" - -from niflow.nipype1.workflows.dmri.fsl.artifacts import all_fsl_pipeline, remove_bias -""" -Map field names into individual subject runs -""" - -info = dict( - dwi=[['subject_id', 'dwidata']], - bvecs=[['subject_id', 'bvecs']], - bvals=[['subject_id', 'bvals']], - dwi_rev=[['subject_id', 'nodif_PA']]) - -infosource = pe.Node( - interface=niu.IdentityInterface(fields=['subject_id']), name="infosource") - -# Set the subject 1 identifier in subject_list, -# we choose the preproc dataset as it contains uncorrected files. -subject_list = ['subj1_preproc'] -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`~nipype.pipeline.engine.Node` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - nio.DataGrabber(infields=['subject_id'], outfields=list(info.keys())), - name='datasource') - -datasource.inputs.template = "%s/%s" - -# This needs to point to the fdt folder you can find after extracting -# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz -datasource.inputs.base_directory = os.path.abspath('fdt1') -datasource.inputs.field_template = dict( - dwi='%s/%s.nii.gz', dwi_rev='%s/%s.nii.gz') -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -An inputnode is used to pass the data obtained by the data grabber to the -actual processing functions -""" - -inputnode = pe.Node( - niu.IdentityInterface(fields=["dwi", "bvecs", "bvals", "dwi_rev"]), - name="inputnode") -""" - -Setup for dMRI preprocessing -============================ - -In this section we initialize the appropriate workflow for preprocessing of -diffusion images. - -Artifacts correction --------------------- - -We will use the combination of ``topup`` and ``eddy`` as suggested by FSL. - -In order to configure the susceptibility distortion correction (SDC), we first -write the specific parameters of our echo-planar imaging (EPI) images. - -Particularly, we look into the ``acqparams.txt`` file of the selected subject -to gather the encoding direction, acceleration factor (in parallel sequences -it is > 1), and readout time or echospacing. - -""" - -epi_AP = {'echospacing': 66.5e-3, 'enc_dir': 'y-'} -epi_PA = {'echospacing': 66.5e-3, 'enc_dir': 'y'} -prep = all_fsl_pipeline(epi_params=epi_AP, altepi_params=epi_PA) -""" - -Bias field correction ---------------------- - -Finally, we set up a node to correct for a single multiplicative bias field -from computed on the *b0* image, as suggested in [Jeurissen2014]_. - -""" - -bias = remove_bias() - -""" -Connect nodes in workflow -========================= - -We create a higher level workflow to connect the nodes. Please excuse the -author for writing the arguments of the ``connect`` function in a not-standard -style with readability aims. -""" - -wf = pe.Workflow(name="dMRI_Preprocessing") -wf.base_dir = os.path.abspath('preprocessing_dmri_tutorial') -wf.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, prep, - [('dwi', 'inputnode.in_file'), ('dwi_rev', 'inputnode.alt_file'), - ('bvals', 'inputnode.in_bval'), ('bvecs', 'inputnode.in_bvec')]), - (prep, bias, [('outputnode.out_file', 'inputnode.in_file'), - ('outputnode.out_mask', 'inputnode.in_mask')]), - (datasource, bias, [('bvals', 'inputnode.in_bval')])]) - -""" -Run the workflow as command line executable -""" - -if __name__ == '__main__': - wf.run() - wf.write_graph() - -""" -References ----------- - -.. [Jeurissen2014] Jeurissen et al., Multi-tissue constrained spherical deconvolution - for improved analysis of multi-shell diffusion MRI data. - NeuroImage 103:411--426. 2014. - doi:`10.1016/j.neuroimage.2014.07.061 - `__. - -""" diff --git a/examples/dmri_tbss_nki.py b/examples/dmri_tbss_nki.py deleted file mode 100755 index d14b74dda9..0000000000 --- a/examples/dmri_tbss_nki.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -========================= -dMRI: TBSS on NKI RS data -========================= - -A pipeline to do a TBSS analysis on the NKI rockland sample data - -""" - -from niflow.nipype1.workflows.dmri.fsl.dti import create_eddy_correct_pipeline -from niflow.nipype1.workflows.dmri.fsl.tbss import create_tbss_non_FA, create_tbss_all -""" -Tell python where to find the appropriate functions. -""" - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.fsl as fsl # fsl -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import os # system functions - -fsl.FSLCommand.set_default_output_type('NIFTI') -""" -You can get the data from: - -http://fcon_1000.projects.nitrc.org/indi/pro/eNKI_RS_TRT/FrontPage.html -""" - -dataDir = os.path.abspath('nki_rs_data') -workingdir = './tbss_example' -subjects_list = [ - '2475376', '3313349', '3808535', '3893245', '8735778', '9630905' -] - -gen_fa = pe.Workflow(name="gen_fa") -gen_fa.base_dir = os.path.join(os.path.abspath(workingdir), 'l1') - -subject_id_infosource = pe.Node( - util.IdentityInterface(fields=['subject_id']), - name='subject_id_infosource') -subject_id_infosource.iterables = ('subject_id', subjects_list) - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['dwi', 'bvec', 'bval']), - name='datasource') -datasource.inputs.base_directory = os.path.abspath(dataDir) -datasource.inputs.template = '%s/session2/DTI_mx_137/dti.%s' -datasource.inputs.template_args = dict( - dwi=[['subject_id', 'nii.gz']], - bvec=[['subject_id', 'bvec']], - bval=[['subject_id', 'bval']]) -datasource.inputs.sort_filelist = True -gen_fa.connect(subject_id_infosource, 'subject_id', datasource, 'subject_id') - -eddy_correct = create_eddy_correct_pipeline() -eddy_correct.inputs.inputnode.ref_num = 0 -gen_fa.connect(datasource, 'dwi', eddy_correct, 'inputnode.in_file') - -bet = pe.Node(interface=fsl.BET(), name='bet') -bet.inputs.mask = True -bet.inputs.frac = 0.34 -gen_fa.connect(eddy_correct, 'pick_ref.out', bet, 'in_file') - -dtifit = pe.Node(interface=fsl.DTIFit(), name='dtifit') -gen_fa.connect(eddy_correct, 'outputnode.eddy_corrected', dtifit, 'dwi') -gen_fa.connect(subject_id_infosource, 'subject_id', dtifit, 'base_name') -gen_fa.connect(bet, 'mask_file', dtifit, 'mask') -gen_fa.connect(datasource, 'bvec', dtifit, 'bvecs') -gen_fa.connect(datasource, 'bval', dtifit, 'bvals') - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.join( - os.path.abspath(workingdir), 'l1_results') -datasink.inputs.parameterization = False -gen_fa.connect(dtifit, 'FA', datasink, 'FA') -gen_fa.connect(dtifit, 'MD', datasink, 'MD') - -if __name__ == '__main__': - gen_fa.write_graph() - gen_fa.run() -""" -Here we get the FA list including all the subjects. -""" - -tbss_source = pe.Node( - interface=nio.DataGrabber(outfiles=['fa_list', 'md_list']), - name='tbss_source') -tbss_source.inputs.base_directory = datasink.inputs.base_directory -tbss_source.inputs.template = '%s/%s_%s.nii' -tbss_source.inputs.template_args = dict( - fa_list=[['FA', subjects_list, 'FA']], - md_list=[['MD', subjects_list, 'MD']]) -tbss_source.inputs.sort_filelist = True -""" -TBSS analysis -""" - -tbss_all = create_tbss_all() -tbss_all.inputs.inputnode.skeleton_thresh = 0.2 - -tbssproc = pe.Workflow(name="tbssproc") -tbssproc.base_dir = os.path.join(os.path.abspath(workingdir), 'l2') -tbssproc.connect(tbss_source, 'fa_list', tbss_all, 'inputnode.fa_list') - -tbss_MD = create_tbss_non_FA(name='tbss_MD') -tbss_MD.inputs.inputnode.skeleton_thresh = tbss_all.inputs.inputnode.skeleton_thresh - -tbssproc.connect([ - (tbss_all, tbss_MD, - [('tbss2.outputnode.field_list', 'inputnode.field_list'), - ('tbss3.outputnode.groupmask', 'inputnode.groupmask'), - ('tbss3.outputnode.meanfa_file', - 'inputnode.meanfa_file'), ('tbss4.outputnode.distance_map', - 'inputnode.distance_map')]), - (tbss_source, tbss_MD, [('md_list', 'inputnode.file_list')]), -]) - -if __name__ == '__main__': - tbssproc.write_graph() - tbssproc.run() diff --git a/examples/fmri_ants_openfmri.py b/examples/fmri_ants_openfmri.py deleted file mode 100755 index 4852f03e1c..0000000000 --- a/examples/fmri_ants_openfmri.py +++ /dev/null @@ -1,1140 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -============================================= -fMRI: OpenfMRI.org data, FSL, ANTS, c3daffine -============================================= - -A growing number of datasets are available on `OpenfMRI `_. -This script demonstrates how to use nipype to analyze a data set:: - - python fmri_ants_openfmri.py --datasetdir ds107 - -This workflow also requires 2mm subcortical templates that are available from -`MindBoggle `_. -Specifically the 2mm version of the `MNI template `_. - -Import necessary modules from nipype. -""" - -from __future__ import division, unicode_literals -from builtins import open, range, str, bytes - -from glob import glob -import os - -from nipype import config -from nipype import LooseVersion -from nipype import Workflow, Node, MapNode -from nipype.utils.filemanip import filename_to_list -import nipype.pipeline.engine as pe -import nipype.algorithms.modelgen as model -import nipype.algorithms.rapidart as ra -from nipype.algorithms.misc import TSNR, CalculateMedian -from nipype.interfaces.c3 import C3dAffineTool -from nipype.interfaces import fsl, Function, ants, freesurfer as fs -import nipype.interfaces.io as nio -from nipype.interfaces.io import FreeSurferSource -import nipype.interfaces.utility as niu -from nipype.interfaces.utility import Merge, IdentityInterface -from niflow.nipype1.workflows.fmri.fsl import (create_featreg_preproc, - create_modelfit_workflow, - create_fixed_effects_flow) - -config.enable_provenance() -version = 0 -if (fsl.Info.version() - and LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6')): - version = 507 - -fsl.FSLCommand.set_default_output_type('NIFTI_GZ') - -imports = ['import os', - 'import nibabel as nb', - 'import numpy as np', - 'import scipy as sp', - 'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename', - 'from scipy.special import legendre' - ] - - -def create_reg_workflow(name='registration'): - """Create a FEAT preprocessing workflow together with freesurfer - - Parameters - ---------- - name : name of workflow (default: 'registration') - - Inputs: - - inputspec.source_files : files (filename or list of filenames to register) - inputspec.mean_image : reference image to use - inputspec.anatomical_image : anatomical image to coregister to - inputspec.target_image : registration target - - Outputs: - - outputspec.func2anat_transform : FLIRT transform - outputspec.anat2target_transform : FLIRT+FNIRT transform - outputspec.transformed_files : transformed files in target space - outputspec.transformed_mean : mean image in target space - - Example - ------- - See code below - """ - - register = pe.Workflow(name=name) - - inputnode = pe.Node( - interface=niu.IdentityInterface(fields=[ - 'source_files', 'mean_image', 'anatomical_image', 'target_image', - 'target_image_brain', 'config_file' - ]), - name='inputspec') - outputnode = pe.Node( - interface=niu.IdentityInterface(fields=[ - 'func2anat_transform', 'anat2target_transform', - 'transformed_files', 'transformed_mean', 'anat2target', - 'mean2anat_mask' - ]), - name='outputspec') - """ - Estimate the tissue classes from the anatomical image. But use spm's segment - as FSL appears to be breaking. - """ - - stripper = pe.Node(fsl.BET(), name='stripper') - register.connect(inputnode, 'anatomical_image', stripper, 'in_file') - fast = pe.Node(fsl.FAST(), name='fast') - register.connect(stripper, 'out_file', fast, 'in_files') - """ - Binarize the segmentation - """ - - binarize = pe.Node( - fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'), name='binarize') - pickindex = lambda x, i: x[i] - register.connect(fast, ('partial_volume_files', pickindex, 2), binarize, - 'in_file') - """ - Calculate rigid transform from mean image to anatomical image - """ - - mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat') - mean2anat.inputs.dof = 6 - register.connect(inputnode, 'mean_image', mean2anat, 'in_file') - register.connect(stripper, 'out_file', mean2anat, 'reference') - """ - Now use bbr cost function to improve the transform - """ - - mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr') - mean2anatbbr.inputs.dof = 6 - mean2anatbbr.inputs.cost = 'bbr' - mean2anatbbr.inputs.schedule = os.path.join( - os.getenv('FSLDIR'), 'etc/flirtsch/bbr.sch') - register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file') - register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg') - register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference') - register.connect(mean2anat, 'out_matrix_file', mean2anatbbr, - 'in_matrix_file') - """ - Create a mask of the median image coregistered to the anatomical image - """ - - mean2anat_mask = Node(fsl.BET(mask=True), name='mean2anat_mask') - register.connect(mean2anatbbr, 'out_file', mean2anat_mask, 'in_file') - """ - Convert the BBRegister transformation to ANTS ITK format - """ - - convert2itk = pe.Node(C3dAffineTool(), name='convert2itk') - convert2itk.inputs.fsl2ras = True - convert2itk.inputs.itk_transform = True - register.connect(mean2anatbbr, 'out_matrix_file', convert2itk, - 'transform_file') - register.connect(inputnode, 'mean_image', convert2itk, 'source_file') - register.connect(stripper, 'out_file', convert2itk, 'reference_file') - """ - Compute registration between the subject's structural and MNI template - - * All parameters are set using the example from: - #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh - * This is currently set to perform a very quick registration. However, - the registration can be made significantly more accurate for cortical - structures by increasing the number of iterations. - """ - - reg = pe.Node(ants.Registration(), name='antsRegister') - reg.inputs.output_transform_prefix = "output_" - reg.inputs.transforms = ['Rigid', 'Affine', 'SyN'] - reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)] - reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[ - 100, 30, 20 - ]] - reg.inputs.dimension = 3 - reg.inputs.write_composite_transform = True - reg.inputs.collapse_output_transforms = True - reg.inputs.initial_moving_transform_com = True - reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']] - reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]] - reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]] - reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]] - reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]] - reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01] - reg.inputs.convergence_window_size = [20] * 2 + [5] - reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]] - reg.inputs.sigma_units = ['vox'] * 3 - reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]] - reg.inputs.use_estimate_learning_rate_once = [True] * 3 - reg.inputs.use_histogram_matching = [False] * 2 + [True] - reg.inputs.winsorize_lower_quantile = 0.005 - reg.inputs.winsorize_upper_quantile = 0.995 - reg.inputs.args = '--float' - reg.inputs.output_warped_image = 'output_warped_image.nii.gz' - reg.inputs.num_threads = 4 - reg.plugin_args = { - 'qsub_args': '-pe orte 4', - 'sbatch_args': '--mem=6G -c 4' - } - register.connect(stripper, 'out_file', reg, 'moving_image') - register.connect(inputnode, 'target_image_brain', reg, 'fixed_image') - - """ - Concatenate the affine and ants transforms into a list - """ - - merge = pe.Node(niu.Merge(2), iterfield=['in2'], name='mergexfm') - register.connect(convert2itk, 'itk_transform', merge, 'in2') - register.connect(reg, 'composite_transform', merge, 'in1') - - """ - Transform the mean image. First to anatomical and then to target - """ - - warpmean = pe.Node(ants.ApplyTransforms(), name='warpmean') - warpmean.inputs.input_image_type = 0 - warpmean.inputs.interpolation = 'Linear' - warpmean.inputs.invert_transform_flags = [False, False] - warpmean.terminal_output = 'file' - - register.connect(inputnode, 'target_image_brain', warpmean, - 'reference_image') - register.connect(inputnode, 'mean_image', warpmean, 'input_image') - register.connect(merge, 'out', warpmean, 'transforms') - """ - Transform the remaining images. First to anatomical and then to target - """ - - warpall = pe.MapNode( - ants.ApplyTransforms(), iterfield=['input_image'], name='warpall') - warpall.inputs.input_image_type = 0 - warpall.inputs.interpolation = 'Linear' - warpall.inputs.invert_transform_flags = [False, False] - warpall.terminal_output = 'file' - - register.connect(inputnode, 'target_image_brain', warpall, - 'reference_image') - register.connect(inputnode, 'source_files', warpall, 'input_image') - register.connect(merge, 'out', warpall, 'transforms') - """ - Assign all the output files - """ - - register.connect(reg, 'warped_image', outputnode, 'anat2target') - register.connect(warpmean, 'output_image', outputnode, 'transformed_mean') - register.connect(warpall, 'output_image', outputnode, 'transformed_files') - register.connect(mean2anatbbr, 'out_matrix_file', outputnode, - 'func2anat_transform') - register.connect(mean2anat_mask, 'mask_file', outputnode, 'mean2anat_mask') - register.connect(reg, 'composite_transform', outputnode, - 'anat2target_transform') - - return register - - -def get_aparc_aseg(files): - """Return the aparc+aseg.mgz file""" - - for name in files: - if 'aparc+aseg.mgz' in name: - return name - raise ValueError('aparc+aseg.mgz not found') - - -def create_fs_reg_workflow(name='registration'): - """Create a FEAT preprocessing workflow together with freesurfer - - Parameters - ---------- - - name : name of workflow (default: 'registration') - - Inputs: - - inputspec.source_files : files (filename or list of filenames to register) - inputspec.mean_image : reference image to use - inputspec.target_image : registration target - - Outputs: - - outputspec.func2anat_transform : FLIRT transform - outputspec.anat2target_transform : FLIRT+FNIRT transform - outputspec.transformed_files : transformed files in target space - outputspec.transformed_mean : mean image in target space - - Example - ------- - See code below - """ - - register = Workflow(name=name) - - inputnode = Node( - interface=IdentityInterface(fields=[ - 'source_files', 'mean_image', 'subject_id', 'subjects_dir', - 'target_image' - ]), - name='inputspec') - - outputnode = Node( - interface=IdentityInterface(fields=[ - 'func2anat_transform', 'out_reg_file', 'anat2target_transform', - 'transforms', 'transformed_mean', 'transformed_files', - 'min_cost_file', 'anat2target', 'aparc', 'mean2anat_mask' - ]), - name='outputspec') - - # Get the subject's freesurfer source directory - fssource = Node(FreeSurferSource(), name='fssource') - fssource.run_without_submitting = True - register.connect(inputnode, 'subject_id', fssource, 'subject_id') - register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir') - - convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert") - register.connect(fssource, 'T1', convert, 'in_file') - - # Coregister the median to the surface - bbregister = Node( - freesurfer.BBRegister(registered_file=True), name='bbregister') - bbregister.inputs.init = 'fsl' - bbregister.inputs.contrast_type = 't2' - bbregister.inputs.out_fsl_file = True - bbregister.inputs.epi_mask = True - register.connect(inputnode, 'subject_id', bbregister, 'subject_id') - register.connect(inputnode, 'mean_image', bbregister, 'source_file') - register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir') - - # Create a mask of the median coregistered to the anatomical image - mean2anat_mask = Node(fsl.BET(mask=True), name='mean2anat_mask') - register.connect(bbregister, 'registered_file', mean2anat_mask, 'in_file') - """ - use aparc+aseg's brain mask - """ - - binarize = Node( - fs.Binarize(min=0.5, out_type="nii.gz", dilate=1), - name="binarize_aparc") - register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize, - "in_file") - - stripper = Node(fsl.ApplyMask(), name='stripper') - register.connect(binarize, "binary_file", stripper, "mask_file") - register.connect(convert, 'out_file', stripper, 'in_file') - """ - Apply inverse transform to aparc file - """ - - aparcxfm = Node( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - name='aparc_inverse_transform') - register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file') - register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm, - 'target_file') - register.connect(inputnode, 'mean_image', aparcxfm, 'source_file') - """ - Convert the BBRegister transformation to ANTS ITK format - """ - - convert2itk = Node(C3dAffineTool(), name='convert2itk') - convert2itk.inputs.fsl2ras = True - convert2itk.inputs.itk_transform = True - register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file') - register.connect(inputnode, 'mean_image', convert2itk, 'source_file') - register.connect(stripper, 'out_file', convert2itk, 'reference_file') - """ - Compute registration between the subject's structural and MNI template - - * All parameters are set using the example from: - #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh - * This is currently set to perform a very quick registration. However, - the registration can be made significantly more accurate for cortical - structures by increasing the number of iterations. - """ - - reg = Node(ants.Registration(), name='antsRegister') - reg.inputs.output_transform_prefix = "output_" - reg.inputs.transforms = ['Rigid', 'Affine', 'SyN'] - reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)] - reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[ - 100, 30, 20 - ]] - reg.inputs.dimension = 3 - reg.inputs.write_composite_transform = True - reg.inputs.collapse_output_transforms = True - reg.inputs.initial_moving_transform_com = True - reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']] - reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]] - reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]] - reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]] - reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]] - reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01] - reg.inputs.convergence_window_size = [20] * 2 + [5] - reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]] - reg.inputs.sigma_units = ['vox'] * 3 - reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]] - reg.inputs.use_estimate_learning_rate_once = [True] * 3 - reg.inputs.use_histogram_matching = [False] * 2 + [True] - reg.inputs.winsorize_lower_quantile = 0.005 - reg.inputs.winsorize_upper_quantile = 0.995 - reg.inputs.float = True - reg.inputs.output_warped_image = 'output_warped_image.nii.gz' - reg.inputs.num_threads = 4 - reg.plugin_args = { - 'qsub_args': '-pe orte 4', - 'sbatch_args': '--mem=6G -c 4' - } - register.connect(stripper, 'out_file', reg, 'moving_image') - register.connect(inputnode, 'target_image', reg, 'fixed_image') - - """ - Concatenate the affine and ants transforms into a list - """ - - merge = Node(Merge(2), iterfield=['in2'], name='mergexfm') - register.connect(convert2itk, 'itk_transform', merge, 'in2') - register.connect(reg, 'composite_transform', merge, 'in1') - - """ - Transform the mean image. First to anatomical and then to target - """ - - warpmean = Node(ants.ApplyTransforms(), name='warpmean') - warpmean.inputs.input_image_type = 0 - warpmean.inputs.interpolation = 'Linear' - warpmean.inputs.invert_transform_flags = [False, False] - warpmean.terminal_output = 'file' - warpmean.inputs.args = '--float' - # warpmean.inputs.num_threads = 4 - # warpmean.plugin_args = {'sbatch_args': '--mem=4G -c 4'} - - """ - Transform the remaining images. First to anatomical and then to target - """ - - warpall = pe.MapNode( - ants.ApplyTransforms(), iterfield=['input_image'], name='warpall') - warpall.inputs.input_image_type = 0 - warpall.inputs.interpolation = 'Linear' - warpall.inputs.invert_transform_flags = [False, False] - warpall.terminal_output = 'file' - warpall.inputs.args = '--float' - warpall.inputs.num_threads = 2 - warpall.plugin_args = {'sbatch_args': '--mem=6G -c 2'} - """ - Assign all the output files - """ - - register.connect(warpmean, 'output_image', outputnode, 'transformed_mean') - register.connect(warpall, 'output_image', outputnode, 'transformed_files') - - register.connect(inputnode, 'target_image', warpmean, 'reference_image') - register.connect(inputnode, 'mean_image', warpmean, 'input_image') - register.connect(merge, 'out', warpmean, 'transforms') - register.connect(inputnode, 'target_image', warpall, 'reference_image') - register.connect(inputnode, 'source_files', warpall, 'input_image') - register.connect(merge, 'out', warpall, 'transforms') - """ - Assign all the output files - """ - - register.connect(reg, 'warped_image', outputnode, 'anat2target') - register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc') - register.connect(bbregister, 'out_fsl_file', outputnode, - 'func2anat_transform') - register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file') - register.connect(bbregister, 'min_cost_file', outputnode, 'min_cost_file') - register.connect(mean2anat_mask, 'mask_file', outputnode, 'mean2anat_mask') - register.connect(reg, 'composite_transform', outputnode, - 'anat2target_transform') - register.connect(merge, 'out', outputnode, 'transforms') - - return register - - -""" -Get info for a given subject -""" - - -def get_subjectinfo(subject_id, base_dir, task_id, model_id): - """Get info for a given subject - - Parameters - ---------- - subject_id : string - Subject identifier (e.g., sub001) - base_dir : string - Path to base directory of the dataset - task_id : int - Which task to process - model_id : int - Which model to process - - Returns - ------- - run_ids : list of ints - Run numbers - conds : list of str - Condition names - TR : float - Repetition time - """ - - from glob import glob - import os - import numpy as np - condition_info = [] - cond_file = os.path.join(base_dir, 'models', 'model%03d' % model_id, - 'condition_key.txt') - with open(cond_file, 'rt') as fp: - for line in fp: - info = line.strip().split() - condition_info.append([info[0], info[1], ' '.join(info[2:])]) - if len(condition_info) == 0: - raise ValueError('No condition info found in %s' % cond_file) - taskinfo = np.array(condition_info) - n_tasks = len(np.unique(taskinfo[:, 0])) - conds = [] - run_ids = [] - if task_id > n_tasks: - raise ValueError('Task id %d does not exist' % task_id) - for idx in range(n_tasks): - taskidx = np.where(taskinfo[:, 0] == 'task%03d' % (idx + 1)) - conds.append([ - condition.replace(' ', '_') - for condition in taskinfo[taskidx[0], 2] - ]) # if 'junk' not in condition]) - files = sorted( - glob( - os.path.join(base_dir, subject_id, 'BOLD', - 'task%03d_run*' % (idx + 1)))) - runs = [int(val[-3:]) for val in files] - run_ids.insert(idx, runs) - json_info = os.path.join(base_dir, subject_id, 'BOLD', 'task%03d_run%03d' % - (task_id, - run_ids[task_id - 1][0]), 'bold_scaninfo.json') - if os.path.exists(json_info): - import json - with open(json_info, 'rt') as fp: - data = json.load(fp) - TR = data['global']['const']['RepetitionTime'] / 1000. - else: - task_scan_key = os.path.join( - base_dir, subject_id, 'BOLD', 'task%03d_run%03d' % - (task_id, run_ids[task_id - 1][0]), 'scan_key.txt') - if os.path.exists(task_scan_key): - TR = np.genfromtxt(task_scan_key)[1] - else: - TR = np.genfromtxt(os.path.join(base_dir, 'scan_key.txt'))[1] - return run_ids[task_id - 1], conds[task_id - 1], TR - - -""" -Analyzes an open fmri dataset -""" - - -def analyze_openfmri_dataset(data_dir, - subject=None, - model_id=None, - task_id=None, - output_dir=None, - subj_prefix='*', - hpcutoff=120., - use_derivatives=True, - fwhm=6.0, - subjects_dir=None, - target=None): - """Analyzes an open fmri dataset - - Parameters - ---------- - - data_dir : str - Path to the base data directory - - work_dir : str - Nipype working directory (defaults to cwd) - """ - """ - Load nipype workflows - """ - - preproc = create_featreg_preproc(whichvol='first') - modelfit = create_modelfit_workflow() - fixed_fx = create_fixed_effects_flow() - if subjects_dir: - registration = create_fs_reg_workflow() - else: - registration = create_reg_workflow() - """ - Remove the plotting connection so that plot iterables don't propagate - to the model stage - """ - - preproc.disconnect( - preproc.get_node('plot_motion'), 'out_file', - preproc.get_node('outputspec'), 'motion_plots') - """ - Set up openfmri data specific components - """ - - subjects = sorted([ - path.split(os.path.sep)[-1] - for path in glob(os.path.join(data_dir, subj_prefix)) - ]) - - infosource = pe.Node( - niu.IdentityInterface(fields=['subject_id', 'model_id', 'task_id']), - name='infosource') - if len(subject) == 0: - infosource.iterables = [('subject_id', subjects), - ('model_id', [model_id]), ('task_id', task_id)] - else: - infosource.iterables = [('subject_id', [ - subjects[subjects.index(subj)] for subj in subject - ]), ('model_id', [model_id]), ('task_id', task_id)] - - subjinfo = pe.Node( - niu.Function( - input_names=['subject_id', 'base_dir', 'task_id', 'model_id'], - output_names=['run_id', 'conds', 'TR'], - function=get_subjectinfo), - name='subjectinfo') - subjinfo.inputs.base_dir = data_dir - """ - Return data components as anat, bold and behav - """ - - contrast_file = os.path.join(data_dir, 'models', 'model%03d' % model_id, - 'task_contrasts.txt') - has_contrast = os.path.exists(contrast_file) - if has_contrast: - datasource = pe.Node( - nio.DataGrabber( - infields=['subject_id', 'run_id', 'task_id', 'model_id'], - outfields=['anat', 'bold', 'behav', 'contrasts']), - name='datasource') - else: - datasource = pe.Node( - nio.DataGrabber( - infields=['subject_id', 'run_id', 'task_id', 'model_id'], - outfields=['anat', 'bold', 'behav']), - name='datasource') - datasource.inputs.base_directory = data_dir - datasource.inputs.template = '*' - - if has_contrast: - datasource.inputs.field_template = { - 'anat': '%s/anatomy/T1_001.nii.gz', - 'bold': '%s/BOLD/task%03d_r*/bold.nii.gz', - 'behav': ('%s/model/model%03d/onsets/task%03d_' - 'run%03d/cond*.txt'), - 'contrasts': ('models/model%03d/' - 'task_contrasts.txt') - } - datasource.inputs.template_args = { - 'anat': [['subject_id']], - 'bold': [['subject_id', 'task_id']], - 'behav': [['subject_id', 'model_id', 'task_id', 'run_id']], - 'contrasts': [['model_id']] - } - else: - datasource.inputs.field_template = { - 'anat': '%s/anatomy/T1_001.nii.gz', - 'bold': '%s/BOLD/task%03d_r*/bold.nii.gz', - 'behav': ('%s/model/model%03d/onsets/task%03d_' - 'run%03d/cond*.txt') - } - datasource.inputs.template_args = { - 'anat': [['subject_id']], - 'bold': [['subject_id', 'task_id']], - 'behav': [['subject_id', 'model_id', 'task_id', 'run_id']] - } - - datasource.inputs.sort_filelist = True - """ - Create meta workflow - """ - - wf = pe.Workflow(name='openfmri') - wf.connect(infosource, 'subject_id', subjinfo, 'subject_id') - wf.connect(infosource, 'model_id', subjinfo, 'model_id') - wf.connect(infosource, 'task_id', subjinfo, 'task_id') - wf.connect(infosource, 'subject_id', datasource, 'subject_id') - wf.connect(infosource, 'model_id', datasource, 'model_id') - wf.connect(infosource, 'task_id', datasource, 'task_id') - wf.connect(subjinfo, 'run_id', datasource, 'run_id') - wf.connect([ - (datasource, preproc, [('bold', 'inputspec.func')]), - ]) - - def get_highpass(TR, hpcutoff): - return hpcutoff / (2. * TR) - - gethighpass = pe.Node( - niu.Function( - input_names=['TR', 'hpcutoff'], - output_names=['highpass'], - function=get_highpass), - name='gethighpass') - wf.connect(subjinfo, 'TR', gethighpass, 'TR') - wf.connect(gethighpass, 'highpass', preproc, 'inputspec.highpass') - """ - Setup a basic set of contrasts, a t-test per condition - """ - - def get_contrasts(contrast_file, task_id, conds): - import numpy as np - import os - contrast_def = [] - if os.path.exists(contrast_file): - with open(contrast_file, 'rt') as fp: - contrast_def.extend([ - np.array(row.split()) for row in fp.readlines() - if row.strip() - ]) - contrasts = [] - for row in contrast_def: - if row[0] != 'task%03d' % task_id: - continue - con = [ - row[1], 'T', ['cond%03d' % (i + 1) for i in range(len(conds))], - row[2:].astype(float).tolist() - ] - contrasts.append(con) - # add auto contrasts for each column - for i, cond in enumerate(conds): - con = [cond, 'T', ['cond%03d' % (i + 1)], [1]] - contrasts.append(con) - return contrasts - - contrastgen = pe.Node( - niu.Function( - input_names=['contrast_file', 'task_id', 'conds'], - output_names=['contrasts'], - function=get_contrasts), - name='contrastgen') - - art = pe.MapNode( - interface=ra.ArtifactDetect( - use_differences=[True, False], - use_norm=True, - norm_threshold=1, - zintensity_threshold=3, - parameter_source='FSL', - mask_type='file'), - iterfield=['realigned_files', 'realignment_parameters', 'mask_file'], - name="art") - - modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec") - modelspec.inputs.input_units = 'secs' - - def check_behav_list(behav, run_id, conds): - import numpy as np - num_conds = len(conds) - if isinstance(behav, (str, bytes)): - behav = [behav] - behav_array = np.array(behav).flatten() - num_elements = behav_array.shape[0] - return behav_array.reshape(int(num_elements / num_conds), - num_conds).tolist() - - reshape_behav = pe.Node( - niu.Function( - input_names=['behav', 'run_id', 'conds'], - output_names=['behav'], - function=check_behav_list), - name='reshape_behav') - - wf.connect(subjinfo, 'TR', modelspec, 'time_repetition') - wf.connect(datasource, 'behav', reshape_behav, 'behav') - wf.connect(subjinfo, 'run_id', reshape_behav, 'run_id') - wf.connect(subjinfo, 'conds', reshape_behav, 'conds') - wf.connect(reshape_behav, 'behav', modelspec, 'event_files') - - wf.connect(subjinfo, 'TR', modelfit, 'inputspec.interscan_interval') - wf.connect(subjinfo, 'conds', contrastgen, 'conds') - if has_contrast: - wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file') - else: - contrastgen.inputs.contrast_file = '' - wf.connect(infosource, 'task_id', contrastgen, 'task_id') - wf.connect(contrastgen, 'contrasts', modelfit, 'inputspec.contrasts') - - wf.connect([(preproc, art, - [('outputspec.motion_parameters', 'realignment_parameters'), - ('outputspec.realigned_files', - 'realigned_files'), ('outputspec.mask', 'mask_file')]), - (preproc, modelspec, - [('outputspec.highpassed_files', 'functional_runs'), - ('outputspec.motion_parameters', 'realignment_parameters')]), - (art, modelspec, - [('outlier_files', 'outlier_files')]), (modelspec, modelfit, [ - ('session_info', 'inputspec.session_info') - ]), (preproc, modelfit, [('outputspec.highpassed_files', - 'inputspec.functional_data')])]) - - # Comute TSNR on realigned data regressing polynomials upto order 2 - tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr') - wf.connect(preproc, "outputspec.realigned_files", tsnr, "in_file") - - # Compute the median image across runs - calc_median = Node(CalculateMedian(), name='median') - wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') - """ - Reorder the copes so that now it combines across runs - """ - - def sort_copes(copes, varcopes, contrasts): - import numpy as np - if not isinstance(copes, list): - copes = [copes] - varcopes = [varcopes] - num_copes = len(contrasts) - n_runs = len(copes) - all_copes = np.array(copes).flatten() - all_varcopes = np.array(varcopes).flatten() - outcopes = all_copes.reshape( - int(len(all_copes) / num_copes), num_copes).T.tolist() - outvarcopes = all_varcopes.reshape( - int(len(all_varcopes) / num_copes), num_copes).T.tolist() - return outcopes, outvarcopes, n_runs - - cope_sorter = pe.Node( - niu.Function( - input_names=['copes', 'varcopes', 'contrasts'], - output_names=['copes', 'varcopes', 'n_runs'], - function=sort_copes), - name='cope_sorter') - - pickfirst = lambda x: x[0] - - wf.connect(contrastgen, 'contrasts', cope_sorter, 'contrasts') - wf.connect([(preproc, fixed_fx, - [(('outputspec.mask', pickfirst), - 'flameo.mask_file')]), (modelfit, cope_sorter, - [('outputspec.copes', 'copes')]), - (modelfit, cope_sorter, [('outputspec.varcopes', 'varcopes')]), - (cope_sorter, fixed_fx, - [('copes', 'inputspec.copes'), ('varcopes', - 'inputspec.varcopes'), - ('n_runs', 'l2model.num_copes')]), (modelfit, fixed_fx, [ - ('outputspec.dof_file', 'inputspec.dof_files'), - ])]) - - wf.connect(calc_median, 'median_file', registration, - 'inputspec.mean_image') - if subjects_dir: - wf.connect(infosource, 'subject_id', registration, - 'inputspec.subject_id') - registration.inputs.inputspec.subjects_dir = subjects_dir - registration.inputs.inputspec.target_image = fsl.Info.standard_image( - 'MNI152_T1_2mm_brain.nii.gz') - if target: - registration.inputs.inputspec.target_image = target - else: - wf.connect(datasource, 'anat', registration, - 'inputspec.anatomical_image') - registration.inputs.inputspec.target_image = fsl.Info.standard_image( - 'MNI152_T1_2mm.nii.gz') - registration.inputs.inputspec.target_image_brain = fsl.Info.standard_image( - 'MNI152_T1_2mm_brain.nii.gz') - registration.inputs.inputspec.config_file = 'T1_2_MNI152_2mm' - - def merge_files(copes, varcopes, zstats): - out_files = [] - splits = [] - out_files.extend(copes) - splits.append(len(copes)) - out_files.extend(varcopes) - splits.append(len(varcopes)) - out_files.extend(zstats) - splits.append(len(zstats)) - return out_files, splits - - mergefunc = pe.Node( - niu.Function( - input_names=['copes', 'varcopes', 'zstats'], - output_names=['out_files', 'splits'], - function=merge_files), - name='merge_files') - wf.connect([(fixed_fx.get_node('outputspec'), mergefunc, [ - ('copes', 'copes'), - ('varcopes', 'varcopes'), - ('zstats', 'zstats'), - ])]) - wf.connect(mergefunc, 'out_files', registration, 'inputspec.source_files') - - def split_files(in_files, splits): - copes = in_files[:splits[0]] - varcopes = in_files[splits[0]:(splits[0] + splits[1])] - zstats = in_files[(splits[0] + splits[1]):] - return copes, varcopes, zstats - - splitfunc = pe.Node( - niu.Function( - input_names=['in_files', 'splits'], - output_names=['copes', 'varcopes', 'zstats'], - function=split_files), - name='split_files') - wf.connect(mergefunc, 'splits', splitfunc, 'splits') - wf.connect(registration, 'outputspec.transformed_files', splitfunc, - 'in_files') - - if subjects_dir: - get_roi_mean = pe.MapNode( - fs.SegStats(default_color_table=True), - iterfield=['in_file'], - name='get_aparc_means') - get_roi_mean.inputs.avgwf_txt_file = True - wf.connect( - fixed_fx.get_node('outputspec'), 'copes', get_roi_mean, 'in_file') - wf.connect(registration, 'outputspec.aparc', get_roi_mean, - 'segmentation_file') - - get_roi_tsnr = pe.MapNode( - fs.SegStats(default_color_table=True), - iterfield=['in_file'], - name='get_aparc_tsnr') - get_roi_tsnr.inputs.avgwf_txt_file = True - wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file') - wf.connect(registration, 'outputspec.aparc', get_roi_tsnr, - 'segmentation_file') - """ - Connect to a datasink - """ - - def get_subs(subject_id, conds, run_id, model_id, task_id): - subs = [('_subject_id_%s_' % subject_id, '')] - subs.append(('_model_id_%d' % model_id, 'model%03d' % model_id)) - subs.append(('task_id_%d/' % task_id, '/task%03d_' % task_id)) - subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_warp', - 'mean')) - subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_flirt', - 'affine')) - - for i in range(len(conds)): - subs.append(('_flameo%d/cope1.' % i, 'cope%02d.' % (i + 1))) - subs.append(('_flameo%d/varcope1.' % i, 'varcope%02d.' % (i + 1))) - subs.append(('_flameo%d/zstat1.' % i, 'zstat%02d.' % (i + 1))) - subs.append(('_flameo%d/tstat1.' % i, 'tstat%02d.' % (i + 1))) - subs.append(('_flameo%d/res4d.' % i, 'res4d%02d.' % (i + 1))) - subs.append(('_warpall%d/cope1_warp.' % i, 'cope%02d.' % (i + 1))) - subs.append(('_warpall%d/varcope1_warp.' % (len(conds) + i), - 'varcope%02d.' % (i + 1))) - subs.append(('_warpall%d/zstat1_warp.' % (2 * len(conds) + i), - 'zstat%02d.' % (i + 1))) - subs.append(('_warpall%d/cope1_trans.' % i, 'cope%02d.' % (i + 1))) - subs.append(('_warpall%d/varcope1_trans.' % (len(conds) + i), - 'varcope%02d.' % (i + 1))) - subs.append(('_warpall%d/zstat1_trans.' % (2 * len(conds) + i), - 'zstat%02d.' % (i + 1))) - subs.append(('__get_aparc_means%d/' % i, '/cope%02d_' % (i + 1))) - - for i, run_num in enumerate(run_id): - subs.append(('__get_aparc_tsnr%d/' % i, '/run%02d_' % run_num)) - subs.append(('__art%d/' % i, '/run%02d_' % run_num)) - subs.append(('__dilatemask%d/' % i, '/run%02d_' % run_num)) - subs.append(('__realign%d/' % i, '/run%02d_' % run_num)) - subs.append(('__modelgen%d/' % i, '/run%02d_' % run_num)) - subs.append(('/model%03d/task%03d/' % (model_id, task_id), '/')) - subs.append(('/model%03d/task%03d_' % (model_id, task_id), '/')) - subs.append(('_bold_dtype_mcf_bet_thresh_dil', '_mask')) - subs.append(('_output_warped_image', '_anat2target')) - subs.append(('median_flirt_brain_mask', 'median_brain_mask')) - subs.append(('median_bbreg_brain_mask', 'median_brain_mask')) - return subs - - subsgen = pe.Node( - niu.Function( - input_names=[ - 'subject_id', 'conds', 'run_id', 'model_id', 'task_id' - ], - output_names=['substitutions'], - function=get_subs), - name='subsgen') - wf.connect(subjinfo, 'run_id', subsgen, 'run_id') - - datasink = pe.Node(interface=nio.DataSink(), name="datasink") - wf.connect(infosource, 'subject_id', datasink, 'container') - wf.connect(infosource, 'subject_id', subsgen, 'subject_id') - wf.connect(infosource, 'model_id', subsgen, 'model_id') - wf.connect(infosource, 'task_id', subsgen, 'task_id') - wf.connect(contrastgen, 'contrasts', subsgen, 'conds') - wf.connect(subsgen, 'substitutions', datasink, 'substitutions') - wf.connect([(fixed_fx.get_node('outputspec'), datasink, - [('res4d', 'res4d'), ('copes', 'copes'), ('varcopes', - 'varcopes'), - ('zstats', 'zstats'), ('tstats', 'tstats')])]) - wf.connect([(modelfit.get_node('modelgen'), datasink, [ - ('design_cov', 'qa.model'), - ('design_image', 'qa.model.@matrix_image'), - ('design_file', 'qa.model.@matrix'), - ])]) - wf.connect([(preproc, datasink, [('outputspec.motion_parameters', - 'qa.motion'), ('outputspec.motion_plots', - 'qa.motion.plots'), - ('outputspec.mask', 'qa.mask')])]) - wf.connect(registration, 'outputspec.mean2anat_mask', datasink, - 'qa.mask.mean2anat') - wf.connect(art, 'norm_files', datasink, 'qa.art.@norm') - wf.connect(art, 'intensity_files', datasink, 'qa.art.@intensity') - wf.connect(art, 'outlier_files', datasink, 'qa.art.@outlier_files') - wf.connect(registration, 'outputspec.anat2target', datasink, - 'qa.anat2target') - wf.connect(tsnr, 'tsnr_file', datasink, 'qa.tsnr.@map') - if subjects_dir: - wf.connect(registration, 'outputspec.min_cost_file', datasink, - 'qa.mincost') - wf.connect([(get_roi_tsnr, datasink, [('avgwf_txt_file', 'qa.tsnr'), - ('summary_file', - 'qa.tsnr.@summary')])]) - wf.connect([(get_roi_mean, datasink, [('avgwf_txt_file', 'copes.roi'), - ('summary_file', - 'copes.roi.@summary')])]) - wf.connect([(splitfunc, datasink, [ - ('copes', 'copes.mni'), - ('varcopes', 'varcopes.mni'), - ('zstats', 'zstats.mni'), - ])]) - wf.connect(calc_median, 'median_file', datasink, 'mean') - wf.connect(registration, 'outputspec.transformed_mean', datasink, - 'mean.mni') - wf.connect(registration, 'outputspec.func2anat_transform', datasink, - 'xfm.mean2anat') - wf.connect(registration, 'outputspec.anat2target_transform', datasink, - 'xfm.anat2target') - """ - Set processing parameters - """ - - preproc.inputs.inputspec.fwhm = fwhm - gethighpass.inputs.hpcutoff = hpcutoff - modelspec.inputs.high_pass_filter_cutoff = hpcutoff - modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': use_derivatives}} - modelfit.inputs.inputspec.model_serial_correlations = True - modelfit.inputs.inputspec.film_threshold = 1000 - - datasink.inputs.base_directory = output_dir - return wf - - -""" -The following functions run the whole workflow. -""" - -if __name__ == '__main__': - import argparse - defstr = ' (default %(default)s)' - parser = argparse.ArgumentParser( - prog='fmri_openfmri.py', description=__doc__) - parser.add_argument('-d', '--datasetdir', required=True) - parser.add_argument( - '-s', - '--subject', - default=[], - nargs='+', - type=str, - help="Subject name (e.g. 'sub001')") - parser.add_argument( - '-m', '--model', default=1, help="Model index" + defstr) - parser.add_argument( - '-x', - '--subjectprefix', - default='sub*', - help="Subject prefix" + defstr) - parser.add_argument( - '-t', - '--task', - default=1, # nargs='+', - type=int, - help="Task index" + defstr) - parser.add_argument( - '--hpfilter', - default=120., - type=float, - help="High pass filter cutoff (in secs)" + defstr) - parser.add_argument( - '--fwhm', default=6., type=float, help="Spatial FWHM" + defstr) - parser.add_argument( - '--derivatives', action="store_true", help="Use derivatives" + defstr) - parser.add_argument( - "-o", "--output_dir", dest="outdir", help="Output directory base") - parser.add_argument( - "-w", "--work_dir", dest="work_dir", help="Output directory base") - parser.add_argument( - "-p", - "--plugin", - dest="plugin", - default='Linear', - help="Plugin to use") - parser.add_argument( - "--plugin_args", dest="plugin_args", help="Plugin arguments") - parser.add_argument( - "--sd", - dest="subjects_dir", - help="FreeSurfer subjects directory (if available)") - parser.add_argument( - "--target", - dest="target_file", - help=("Target in MNI space. Best to use the MindBoggle " - "template - only used with FreeSurfer" - "OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz")) - args = parser.parse_args() - outdir = args.outdir - work_dir = os.getcwd() - if args.work_dir: - work_dir = os.path.abspath(args.work_dir) - if outdir: - outdir = os.path.abspath(outdir) - else: - outdir = os.path.join(work_dir, 'output') - outdir = os.path.join(outdir, 'model%02d' % int(args.model), - 'task%03d' % int(args.task)) - derivatives = args.derivatives - if derivatives is None: - derivatives = False - wf = analyze_openfmri_dataset( - data_dir=os.path.abspath(args.datasetdir), - subject=args.subject, - model_id=int(args.model), - task_id=[int(args.task)], - subj_prefix=args.subjectprefix, - output_dir=outdir, - hpcutoff=args.hpfilter, - use_derivatives=derivatives, - fwhm=args.fwhm, - subjects_dir=args.subjects_dir, - target=args.target_file) - # wf.config['execution']['remove_unnecessary_outputs'] = False - - wf.base_dir = work_dir - if args.plugin_args: - wf.run(args.plugin, plugin_args=eval(args.plugin_args)) - else: - wf.run(args.plugin) diff --git a/examples/fmri_freesurfer_smooth.py b/examples/fmri_freesurfer_smooth.py deleted file mode 100755 index 69545b59c0..0000000000 --- a/examples/fmri_freesurfer_smooth.py +++ /dev/null @@ -1,616 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -====================================== -fMRI: surface smooth - FreeSurfer, SPM -====================================== - -This tutorial illustrates how to perform surface-based smoothing of -cortical data using FreeSurfer_ and then perform firstlevel model and -contrast estimation using SPM_. A surface-based second level glm -illustrates the use of spherical registration and freesurfer's glm -functions. - -Preparing environment -===================== - -Step 0 ------- - -In order to run this tutorial you need to have SPM_ and FreeSurfer_ -tools installed and accessible from matlab/command line. Check by -calling mri_info from the command line. - -Step 1 ------- -Link the *fsaverage* directory for your freesurfer distribution. To do -this type: - -:: - - cd nipype-tutorial/fsdata - ln -s $FREESURFER_HOME/subjects/fsaverage - cd .. - - -Defining the workflow -===================== - -""" - -from __future__ import print_function -from builtins import str -from builtins import range - -import os # system functions - -import nipype.algorithms.modelgen as model # model generation -import nipype.algorithms.rapidart as ra # artifact detection -import nipype.interfaces.freesurfer as fs # freesurfer -import nipype.interfaces.io as nio # i/o routines -import nipype.interfaces.matlab as mlab # how to run matlab -import nipype.interfaces.spm as spm # spm -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -""" -Preliminaries -------------- - -Set any package specific configuration. - -Setting the subjects directory and the appropriate matlab command to use. if -you want to use a different spm version/path, it should also be entered here. - -These are currently being set at the class level, so every node will inherit -these settings. However, these can also be changed or set for an individual -node. -""" - -# Tell freesurfer what subjects directory to use -subjects_dir = os.path.abspath('fsdata') -fs.FSCommand.set_default_subjects_dir(subjects_dir) - -# Set the way matlab should be called -mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") -# If SPM is not in your MATLAB path you should add it here -mlab.MatlabCommand.set_default_paths('/software/spm8') -""" -Setup preprocessing workflow ----------------------------- - - -""" - -preproc = pe.Workflow(name='preproc') -""" -Use :class:`nipype.interfaces.spm.Realign` for motion correction -and register all images to the mean image. -""" - -realign = pe.Node(interface=spm.Realign(), name="realign") -realign.inputs.register_to_mean = True -""" -Use :class:`nipype.algorithms.rapidart` to determine which of the -images in the functional series are outliers based on deviations in -intensity or movement. -""" - -art = pe.Node(interface=ra.ArtifactDetect(), name="art") -art.inputs.use_differences = [True, False] -art.inputs.use_norm = True -art.inputs.norm_threshold = 1 -art.inputs.zintensity_threshold = 3 -art.inputs.mask_type = 'file' -art.inputs.parameter_source = 'SPM' -""" -Use :class:`nipype.interfaces.freesurfer.BBRegister` to coregister the mean -functional image generated by realign to the subjects' surfaces. -""" - -surfregister = pe.Node(interface=fs.BBRegister(), name='surfregister') -surfregister.inputs.init = 'fsl' -surfregister.inputs.contrast_type = 't2' -""" -Use :class:`nipype.interfaces.io.FreeSurferSource` to retrieve various image -files that are automatically generated by the recon-all process. -""" - -FreeSurferSource = pe.Node(interface=nio.FreeSurferSource(), name='fssource') -""" -Use :class:`nipype.interfaces.freesurfer.ApplyVolTransform` to convert the -brainmask generated by freesurfer into the realigned functional space. -""" - -ApplyVolTransform = pe.Node(interface=fs.ApplyVolTransform(), name='applyreg') -ApplyVolTransform.inputs.inverse = True -""" -Use :class:`nipype.interfaces.freesurfer.Binarize` to extract a binary brain -mask. -""" - -Threshold = pe.Node(interface=fs.Binarize(), name='threshold') -Threshold.inputs.min = 10 -Threshold.inputs.out_type = 'nii' -""" -Two different types of functional data smoothing are performed in this -workflow. The volume smoothing option performs a standard SPM smoothin. using -:class:`nipype.interfaces.spm.Smooth`. In addition, we use a smoothing routine -from freesurfer (:class:`nipype.interfaces.freesurfer.Binarize`) to project the -functional data from the volume to the subjects' surface, smooth it on the -surface and fit it back into the volume forming the cortical ribbon. The -projection uses the average value along a "cortical column". In addition to the -surface smoothing, the rest of the volume is smoothed with a 3d gaussian kernel. - -.. note:: - - It is very important to note that the projection to the surface takes a 3d - manifold to a 2d manifold. Hence the reverse projection, simply fills the - thickness of cortex with the smoothed data. The smoothing is not performed - in a depth specific manner. The output of this branch should only be used - for surface-based analysis and visualization. - -""" - -volsmooth = pe.Node(interface=spm.Smooth(), name="volsmooth") -surfsmooth = pe.MapNode( - interface=fs.Smooth(proj_frac_avg=(0, 1, 0.1)), - name="surfsmooth", - iterfield=['in_file']) -""" -We connect up the different nodes to implement the preprocessing workflow. -""" - -preproc.connect([ - (realign, surfregister, [('mean_image', 'source_file')]), - (FreeSurferSource, ApplyVolTransform, [('brainmask', 'target_file')]), - (surfregister, ApplyVolTransform, [('out_reg_file', 'reg_file')]), - (realign, ApplyVolTransform, [('mean_image', 'source_file')]), - (ApplyVolTransform, Threshold, [('transformed_file', 'in_file')]), - (realign, art, [('realignment_parameters', 'realignment_parameters'), - ('realigned_files', 'realigned_files')]), - (Threshold, art, [('binary_file', 'mask_file')]), - (realign, volsmooth, [('realigned_files', 'in_files')]), - (realign, surfsmooth, [('realigned_files', 'in_file')]), - (surfregister, surfsmooth, [('out_reg_file', 'reg_file')]), -]) -""" -Set up volume analysis workflow -------------------------------- - -""" - -volanalysis = pe.Workflow(name='volanalysis') -""" -Generate SPM-specific design information using -:class:`nipype.interfaces.spm.SpecifyModel`. -""" - -modelspec = pe.Node(interface=model.SpecifySPMModel(), name="modelspec") -modelspec.inputs.concatenate_runs = True -""" -Generate a first level SPM.mat file for analysis -:class:`nipype.interfaces.spm.Level1Design`. -""" - -level1design = pe.Node(interface=spm.Level1Design(), name="level1design") -level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} -""" -Use :class:`nipype.interfaces.spm.EstimateModel` to determine the -parameters of the model. -""" - -level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") -level1estimate.inputs.estimation_method = {'Classical': 1} -""" -Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the -first level contrasts specified in a few steps above. -""" - -contrastestimate = pe.Node( - interface=spm.EstimateContrast(), name="contrastestimate") - -volanalysis.connect([ - (modelspec, level1design, [('session_info', 'session_info')]), - (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]), - (level1estimate, contrastestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')]), -]) -""" -Set up surface analysis workflow --------------------------------- - -We simply clone the volume analysis workflow. -""" - -surfanalysis = volanalysis.clone(name='surfanalysis') -""" -Set up volume normalization workflow ------------------------------------- - -The volume analysis is performed in individual space. Therefore, post analysis -we normalize the contrast images to MNI space. -""" - -volnorm = pe.Workflow(name='volnormconimages') -""" -Use :class:`nipype.interfaces.freesurfer.MRIConvert` to convert the brainmask, -an mgz file and the contrast images (nifti-1 img/hdr pairs), to single volume -nifti images. -""" - -convert = pe.Node(interface=fs.MRIConvert(out_type='nii'), name='convert2nii') -convert2 = pe.MapNode( - interface=fs.MRIConvert(out_type='nii'), - iterfield=['in_file'], - name='convertimg2nii') -""" -Use :class:`nipype.interfaces.spm.Segment` to segment the structural image and -generate the transformation file to MNI space. - -.. note:: - - Segment takes longer than usual because the nose is wrapped behind - the head in the structural image. -""" - -segment = pe.Node(interface=spm.Segment(), name='segment') -""" -Use :class:`nipype.interfaces.freesurfer.ApplyVolTransform` to convert contrast -images into freesurfer space. -""" - -normwreg = pe.MapNode( - interface=fs.ApplyVolTransform(), - iterfield=['source_file'], - name='applyreg2con') -""" -Use :class:`nipype.interfaces.spm.Normalize` to normalize the contrast images -to MNI space -""" - -normalize = pe.Node(interface=spm.Normalize(jobtype='write'), name='norm2mni') -""" -Connect up the volume normalization components -""" - -volnorm.connect([ - (convert, segment, [('out_file', 'data')]), - (convert2, normwreg, [('out_file', 'source_file')]), - (segment, normalize, [('transformation_mat', 'parameter_file')]), - (normwreg, normalize, [('transformed_file', 'apply_to_files')]), -]) -""" -Preproc + Analysis + VolumeNormalization workflow -------------------------------------------------- - -Connect up the lower level workflows into an integrated analysis. In addition, -we add an input node that specifies all the inputs needed for this -workflow. Thus, one can import this workflow and connect it to their own data -sources. An example with the nifti-tutorial data is provided below. - -For this workflow the only necessary inputs are the functional images, a -freesurfer subject id corresponding to recon-all processed data, the session -information for the functional runs and the contrasts to be evaluated. -""" - -inputnode = pe.Node( - interface=util.IdentityInterface( - fields=['func', 'subject_id', 'session_info', 'contrasts']), - name='inputnode') -""" -Connect the components into an integrated workflow. -""" - -l1pipeline = pe.Workflow(name='firstlevel') -l1pipeline.connect([ - (inputnode, preproc, [ - ('func', 'realign.in_files'), - ('subject_id', 'surfregister.subject_id'), - ('subject_id', 'fssource.subject_id'), - ]), - (inputnode, volanalysis, [('session_info', 'modelspec.subject_info'), - ('contrasts', 'contrastestimate.contrasts')]), - (inputnode, surfanalysis, [('session_info', 'modelspec.subject_info'), - ('contrasts', 'contrastestimate.contrasts')]), -]) - -# attach volume and surface model specification and estimation components -l1pipeline.connect( - [(preproc, volanalysis, - [('realign.realignment_parameters', 'modelspec.realignment_parameters'), - ('volsmooth.smoothed_files', 'modelspec.functional_runs'), - ('art.outlier_files', - 'modelspec.outlier_files'), ('threshold.binary_file', - 'level1design.mask_image')]), - (preproc, surfanalysis, - [('realign.realignment_parameters', 'modelspec.realignment_parameters'), - ('surfsmooth.smoothed_file', 'modelspec.functional_runs'), - ('art.outlier_files', - 'modelspec.outlier_files'), ('threshold.binary_file', - 'level1design.mask_image')])]) - -# attach volume contrast normalization components -l1pipeline.connect([(preproc, volnorm, - [('fssource.orig', 'convert2nii.in_file'), - ('surfregister.out_reg_file', 'applyreg2con.reg_file'), - ('fssource.orig', 'applyreg2con.target_file')]), - (volanalysis, volnorm, [ - ('contrastestimate.con_images', - 'convertimg2nii.in_file'), - ])]) -""" -Data specific components ------------------------- - -The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``s1`` and ``s2``. Each subject directory -contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And -one anatomical volume named struct.nii. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -In the example below, run 'f3' is of type 'func' and gets mapped to a -nifti filename through a template '%s.nii'. So 'f3' would become -'f3.nii'. - -""" - -# Specify the location of the data. -data_dir = os.path.abspath('data') -# Specify the subject directories -subject_list = ['s1', 's3'] -# Map field names to individual subject runs. -info = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]], - struct=[['subject_id', 'struct']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -Set preprocessing parameters ----------------------------- -""" - -l1pipeline.inputs.preproc.fssource.subjects_dir = subjects_dir -l1pipeline.inputs.preproc.volsmooth.fwhm = 4 -l1pipeline.inputs.preproc.surfsmooth.surface_fwhm = 5 -l1pipeline.inputs.preproc.surfsmooth.vol_fwhm = 4 -""" -Experimental paradigm specific components ------------------------------------------ - -Here we create a function that returns subject-specific information -about the experimental paradigm. This is used by the -:class:`nipype.interfaces.spm.SpecifyModel` to create the information -necessary to generate an SPM design matrix. In this tutorial, the same -paradigm was used for every participant. -""" - - -def subjectinfo(subject_id): - from nipype.interfaces.base import Bunch - from copy import deepcopy - print("Subject ID: %s\n" % str(subject_id)) - output = [] - names = ['Task-Odd', 'Task-Even'] - for r in range(4): - onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))] - output.insert(r, - Bunch( - conditions=names, - onsets=deepcopy(onsets), - durations=[[15] for s in names], - )) - return output - - -"""Setup the contrast structure that needs to be evaluated. This is a -list of lists. The inner list specifies the contrasts and has the -following format - [Name,Stat,[list of condition names],[weights on -those conditions]. The condition names must match the `names` listed -in the `subjectinfo` function described above. -""" - -cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]) -cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]) -contrasts = [cont1, cont2] -""" -Set up node specific inputs ---------------------------- - -We replicate the modelspec parameters separately for the surface- and -volume-based analysis. -""" - -modelspecref = l1pipeline.inputs.volanalysis.modelspec -modelspecref.input_units = 'secs' -modelspecref.time_repetition = 3. -modelspecref.high_pass_filter_cutoff = 120 - -modelspecref = l1pipeline.inputs.surfanalysis.modelspec -modelspecref.input_units = 'secs' -modelspecref.time_repetition = 3. -modelspecref.high_pass_filter_cutoff = 120 - -l1designref = l1pipeline.inputs.volanalysis.level1design -l1designref.timing_units = modelspecref.output_units -l1designref.interscan_interval = modelspecref.time_repetition - -l1designref = l1pipeline.inputs.surfanalysis.level1design -l1designref.timing_units = modelspecref.output_units -l1designref.interscan_interval = modelspecref.time_repetition - -l1pipeline.inputs.inputnode.contrasts = contrasts -""" -Setup the pipeline ------------------- - -The nodes created above do not describe the flow of data. They merely -describe the parameters used for each function. In this section we -setup the connections between the nodes such that appropriate outputs -from nodes are piped into appropriate inputs of other nodes. - -Use the :class:`nipype.pipeline.engine.Workfow` to create a -graph-based execution pipeline for first level analysis. -""" - -level1 = pe.Workflow(name="level1") -level1.base_dir = os.path.abspath('volsurf_tutorial/workingdir') - -level1.connect([ - (infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, l1pipeline, [('func', 'inputnode.func')]), - (infosource, l1pipeline, [('subject_id', 'inputnode.subject_id'), - (('subject_id', subjectinfo), - 'inputnode.session_info')]), -]) -""" -Store the output ----------------- - -Create a datasink node to store the contrast images and registration info -""" - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.abspath('volsurf_tutorial/l1out') -datasink.inputs.substitutions = [] - - -def getsubs(subject_id): - subs = [('_subject_id_%s/' % subject_id, '')] - return subs - - -# store relevant outputs from various stages of the 1st level analysis -level1.connect([(infosource, datasink, [('subject_id', 'container'), - (('subject_id', getsubs), - 'substitutions')]), - (l1pipeline, datasink, [ - ('surfanalysis.contrastestimate.con_images', 'contrasts'), - ('preproc.surfregister.out_reg_file', 'registrations'), - ])]) -""" -Run the analysis pipeline and also create a dot+png (if graphviz is available) -that visually represents the workflow. -""" - -if __name__ == '__main__': - level1.run() - level1.write_graph(graph2use='flat') -""" -Level2 surface-based pipeline ------------------------------ - -Create a level2 workflow -""" - -l2flow = pe.Workflow(name='l2out') -l2flow.base_dir = os.path.abspath('volsurf_tutorial') -""" -Setup a dummy node to iterate over contrasts and hemispheres -""" - -l2inputnode = pe.Node( - interface=util.IdentityInterface(fields=['contrasts', 'hemi']), - name='inputnode') -l2inputnode.iterables = [('contrasts', list(range(1, - len(contrasts) + 1))), - ('hemi', ['lh', 'rh'])] -""" -Use a datagrabber node to collect contrast images and registration files -""" - -l2source = pe.Node( - interface=nio.DataGrabber(infields=['con_id'], outfields=['con', 'reg']), - name='l2source') -l2source.inputs.base_directory = os.path.abspath('volsurf_tutorial/l1out') -l2source.inputs.template = '*' -l2source.inputs.field_template = dict( - con='*/contrasts/con_%04d.img', reg='*/registrations/*.dat') -l2source.inputs.template_args = dict(con=[['con_id']], reg=[[]]) -l2source.inputs.sort_filelist = True - -l2flow.connect(l2inputnode, 'contrasts', l2source, 'con_id') -""" -Merge contrast images and registration files -""" - -mergenode = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge') - - -def ordersubjects(files, subj_list): - outlist = [] - for s in subj_list: - for f in files: - if '/%s/' % s in f: - outlist.append(f) - continue - print(outlist) - return outlist - - -l2flow.connect(l2source, ('con', ordersubjects, subject_list), mergenode, - 'in1') -l2flow.connect(l2source, ('reg', ordersubjects, subject_list), mergenode, - 'in2') -""" -Concatenate contrast images projected to fsaverage -""" - -l2concat = pe.Node(interface=fs.MRISPreproc(), name='concat') -l2concat.inputs.target = 'fsaverage' -l2concat.inputs.fwhm = 5 - - -def list2tuple(listoflist): - return [tuple(x) for x in listoflist] - - -l2flow.connect(l2inputnode, 'hemi', l2concat, 'hemi') -l2flow.connect(mergenode, ('out', list2tuple), l2concat, 'vol_measure_file') -""" -Perform a one sample t-test -""" - -l2ttest = pe.Node(interface=fs.OneSampleTTest(), name='onesample') -l2flow.connect(l2concat, 'out_file', l2ttest, 'in_file') -""" -Run the analysis pipeline and also create a dot+png (if graphviz is available) -that visually represents the workflow. -""" - -if __name__ == '__main__': - l2flow.run() - l2flow.write_graph(graph2use='flat') diff --git a/examples/fmri_fsl.py b/examples/fmri_fsl.py deleted file mode 100755 index 13ce9fa8da..0000000000 --- a/examples/fmri_fsl.py +++ /dev/null @@ -1,629 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -========= -fMRI: FSL -========= - -A workflow that uses fsl to perform a first level analysis on the nipype -tutorial data set:: - - python fmri_fsl.py - - -First tell python where to find the appropriate functions. -""" - -from __future__ import print_function -from __future__ import division -from builtins import str -from builtins import range - -import os # system functions - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.fsl as fsl # fsl -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.modelgen as model # model generation -import nipype.algorithms.rapidart as ra # artifact detection -""" -Preliminaries -------------- - -Setup any package specific configuration. The output file format for FSL -routines is being set to compressed NIFTI. -""" - -fsl.FSLCommand.set_default_output_type('NIFTI_GZ') -""" -Setting up workflows --------------------- - -In this tutorial we will be setting up a hierarchical workflow for fsl -analysis. This will demonstrate how pre-defined workflows can be setup and -shared across users, projects and labs. - - -Setup preprocessing workflow ----------------------------- - -This is a generic fsl feat preprocessing workflow encompassing skull stripping, -motion correction and smoothing operations. - -""" - -preproc = pe.Workflow(name='preproc') -""" -Set up a node to define all inputs required for the preprocessing workflow -""" - -inputnode = pe.Node( - interface=util.IdentityInterface(fields=[ - 'func', - 'struct', - ]), - name='inputspec') -""" -Convert functional images to float representation. Since there can be more than -one functional run we use a MapNode to convert each run. -""" - -img2float = pe.MapNode( - interface=fsl.ImageMaths( - out_data_type='float', op_string='', suffix='_dtype'), - iterfield=['in_file'], - name='img2float') -preproc.connect(inputnode, 'func', img2float, 'in_file') -""" -Extract the middle volume of the first run as the reference -""" - -extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1), name='extractref') -""" -Define a function to pick the first file from a list of files -""" - - -def pickfirst(files): - if isinstance(files, list): - return files[0] - else: - return files - - -preproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file') -""" -Define a function to return the 1 based index of the middle volume -""" - - -def getmiddlevolume(func): - from nibabel import load - funcfile = func - if isinstance(func, list): - funcfile = func[0] - _, _, _, timepoints = load(funcfile).shape - return int(timepoints / 2) - 1 - - -preproc.connect(inputnode, ('func', getmiddlevolume), extract_ref, 't_min') -""" -Realign the functional runs to the middle volume of the first run -""" - -motion_correct = pe.MapNode( - interface=fsl.MCFLIRT(save_mats=True, save_plots=True), - name='realign', - iterfield=['in_file']) -preproc.connect(img2float, 'out_file', motion_correct, 'in_file') -preproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') -""" -Plot the estimated motion parameters -""" - -plot_motion = pe.MapNode( - interface=fsl.PlotMotionParams(in_source='fsl'), - name='plot_motion', - iterfield=['in_file']) -plot_motion.iterables = ('plot_type', ['rotations', 'translations']) -preproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') -""" -Extract the mean volume of the first functional run -""" - -meanfunc = pe.Node( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - name='meanfunc') -preproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file') -""" -Strip the skull from the mean functional to generate a mask -""" - -meanfuncmask = pe.Node( - interface=fsl.BET(mask=True, no_output=True, frac=0.3), - name='meanfuncmask') -preproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') -""" -Mask the functional runs with the extracted mask -""" - -maskfunc = pe.MapNode( - interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), - iterfield=['in_file'], - name='maskfunc') -preproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') -preproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') -""" -Determine the 2nd and 98th percentile intensities of each functional run -""" - -getthresh = pe.MapNode( - interface=fsl.ImageStats(op_string='-p 2 -p 98'), - iterfield=['in_file'], - name='getthreshold') -preproc.connect(maskfunc, 'out_file', getthresh, 'in_file') -""" -Threshold the first run of the functional data at 10% of the 98th percentile -""" - -threshold = pe.Node( - interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), - name='threshold') -preproc.connect(maskfunc, ('out_file', pickfirst), threshold, 'in_file') -""" -Define a function to get 10% of the intensity -""" - - -def getthreshop(thresh): - return '-thr %.10f -Tmin -bin' % (0.1 * thresh[0][1]) - - -preproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string') -""" -Determine the median value of the functional runs using the mask -""" - -medianval = pe.MapNode( - interface=fsl.ImageStats(op_string='-k %s -p 50'), - iterfield=['in_file'], - name='medianval') -preproc.connect(motion_correct, 'out_file', medianval, 'in_file') -preproc.connect(threshold, 'out_file', medianval, 'mask_file') -""" -Dilate the mask -""" - -dilatemask = pe.Node( - interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), - name='dilatemask') -preproc.connect(threshold, 'out_file', dilatemask, 'in_file') -""" -Mask the motion corrected functional runs with the dilated mask -""" - -maskfunc2 = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file'], - name='maskfunc2') -preproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') -preproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') -""" -Determine the mean image from each functional run -""" - -meanfunc2 = pe.MapNode( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - iterfield=['in_file'], - name='meanfunc2') -preproc.connect(maskfunc2, 'out_file', meanfunc2, 'in_file') -""" -Merge the median values with the mean functional images into a coupled list -""" - -mergenode = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge') -preproc.connect(meanfunc2, 'out_file', mergenode, 'in1') -preproc.connect(medianval, 'out_stat', mergenode, 'in2') -""" -Smooth each run using SUSAN with the brightness threshold set to 75% of the -median value for each run and a mask constituting the mean functional -""" - -smooth = pe.MapNode( - interface=fsl.SUSAN(), - iterfield=['in_file', 'brightness_threshold', 'usans'], - name='smooth') -""" -Define a function to get the brightness threshold for SUSAN -""" - - -def getbtthresh(medianvals): - return [0.75 * val for val in medianvals] - - -def getusans(x): - return [[tuple([val[0], 0.75 * val[1]])] for val in x] - - -preproc.connect(maskfunc2, 'out_file', smooth, 'in_file') -preproc.connect(medianval, ('out_stat', getbtthresh), smooth, - 'brightness_threshold') -preproc.connect(mergenode, ('out', getusans), smooth, 'usans') -""" -Mask the smoothed data with the dilated mask -""" - -maskfunc3 = pe.MapNode( - interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), - iterfield=['in_file'], - name='maskfunc3') -preproc.connect(smooth, 'smoothed_file', maskfunc3, 'in_file') -preproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') -""" -Scale each volume of the run so that the median value of the run is set to 10000 -""" - -intnorm = pe.MapNode( - interface=fsl.ImageMaths(suffix='_intnorm'), - iterfield=['in_file', 'op_string'], - name='intnorm') -preproc.connect(maskfunc3, 'out_file', intnorm, 'in_file') -""" -Define a function to get the scaling factor for intensity normalization -""" - - -def getinormscale(medianvals): - return ['-mul %.10f' % (10000. / val) for val in medianvals] - - -preproc.connect(medianval, ('out_stat', getinormscale), intnorm, 'op_string') -""" -Perform temporal highpass filtering on the data -""" - -highpass = pe.MapNode( - interface=fsl.ImageMaths(suffix='_tempfilt'), - iterfield=['in_file'], - name='highpass') -preproc.connect(intnorm, 'out_file', highpass, 'in_file') -""" -Generate a mean functional image from the first run -""" - -meanfunc3 = pe.MapNode( - interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), - iterfield=['in_file'], - name='meanfunc3') -preproc.connect(highpass, ('out_file', pickfirst), meanfunc3, 'in_file') -""" -Strip the structural image and coregister the mean functional image to the -structural image -""" - -nosestrip = pe.Node(interface=fsl.BET(frac=0.3), name='nosestrip') -skullstrip = pe.Node(interface=fsl.BET(mask=True), name='stripstruct') - -coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister') -""" -Use :class:`nipype.algorithms.rapidart` to determine which of the -images in the functional series are outliers based on deviations in -intensity and/or movement. -""" - -art = pe.MapNode( - interface=ra.ArtifactDetect( - use_differences=[True, False], - use_norm=True, - norm_threshold=1, - zintensity_threshold=3, - parameter_source='FSL', - mask_type='file'), - iterfield=['realigned_files', 'realignment_parameters'], - name="art") - -preproc.connect([ - (inputnode, nosestrip, [('struct', 'in_file')]), - (nosestrip, skullstrip, [('out_file', 'in_file')]), - (skullstrip, coregister, [('out_file', 'in_file')]), - (meanfunc2, coregister, [(('out_file', pickfirst), 'reference')]), - (motion_correct, art, [('par_file', 'realignment_parameters')]), - (maskfunc2, art, [('out_file', 'realigned_files')]), - (dilatemask, art, [('out_file', 'mask_file')]), -]) -""" -Set up model fitting workflow ------------------------------ - -""" - -modelfit = pe.Workflow(name='modelfit') -""" -Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information. -""" - -modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec") -""" -Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf -file for analysis -""" - -level1design = pe.Node(interface=fsl.Level1Design(), name="level1design") -""" -Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat -file for use by FILMGLS -""" - -modelgen = pe.MapNode( - interface=fsl.FEATModel(), - name='modelgen', - iterfield=['fsf_file', 'ev_files']) -""" -Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a -mat file and a functional run -""" - -modelestimate = pe.MapNode( - interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5, threshold=1000), - name='modelestimate', - iterfield=['design_file', 'in_file']) -""" -Use :class:`nipype.interfaces.fsl.ContrastMgr` to generate contrast estimates -""" - -conestimate = pe.MapNode( - interface=fsl.ContrastMgr(), - name='conestimate', - iterfield=[ - 'tcon_file', 'param_estimates', 'sigmasquareds', 'corrections', - 'dof_file' - ]) - -modelfit.connect([ - (modelspec, level1design, [('session_info', 'session_info')]), - (level1design, modelgen, [('fsf_files', 'fsf_file'), ('ev_files', - 'ev_files')]), - (modelgen, modelestimate, [('design_file', 'design_file')]), - (modelgen, conestimate, [('con_file', 'tcon_file')]), - (modelestimate, conestimate, - [('param_estimates', 'param_estimates'), ('sigmasquareds', - 'sigmasquareds'), - ('corrections', 'corrections'), ('dof_file', 'dof_file')]), -]) -""" -Set up fixed-effects workflow ------------------------------ - -""" - -fixed_fx = pe.Workflow(name='fixedfx') -""" -Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and -varcopes for each condition -""" - -copemerge = pe.MapNode( - interface=fsl.Merge(dimension='t'), - iterfield=['in_files'], - name="copemerge") - -varcopemerge = pe.MapNode( - interface=fsl.Merge(dimension='t'), - iterfield=['in_files'], - name="varcopemerge") -""" -Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition -specific level 2 model design files -""" - -level2model = pe.Node(interface=fsl.L2Model(), name='l2model') -""" -Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model -""" - -flameo = pe.MapNode( - interface=fsl.FLAMEO(run_mode='fe'), - name="flameo", - iterfield=['cope_file', 'var_cope_file']) - -fixed_fx.connect([ - (copemerge, flameo, [('merged_file', 'cope_file')]), - (varcopemerge, flameo, [('merged_file', 'var_cope_file')]), - (level2model, flameo, [('design_mat', 'design_file'), - ('design_con', 't_con_file'), ('design_grp', - 'cov_split_file')]), -]) -""" -Set up first-level workflow ---------------------------- - -""" - - -def sort_copes(files): - numelements = len(files[0]) - outfiles = [] - for i in range(numelements): - outfiles.insert(i, []) - for j, elements in enumerate(files): - outfiles[i].append(elements[i]) - return outfiles - - -def num_copes(files): - return len(files) - - -firstlevel = pe.Workflow(name='firstlevel') -firstlevel.connect( - [(preproc, modelfit, [('highpass.out_file', 'modelspec.functional_runs'), - ('art.outlier_files', 'modelspec.outlier_files'), - ('highpass.out_file', 'modelestimate.in_file')]), - (preproc, fixed_fx, - [('coregister.out_file', 'flameo.mask_file')]), (modelfit, fixed_fx, [ - (('conestimate.copes', sort_copes), 'copemerge.in_files'), - (('conestimate.varcopes', sort_copes), 'varcopemerge.in_files'), - (('conestimate.copes', num_copes), 'l2model.num_copes'), - ])]) -""" -Experiment specific components ------------------------------- - -The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``s1`` and ``s2``. Each subject directory -contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And -one anatomical volume named struct.nii. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -In the example below, run 'f3' is of type 'func' and gets mapped to a -nifti filename through a template '%s.nii'. So 'f3' would become -'f3.nii'. - -""" - -# Specify the location of the data. -data_dir = os.path.abspath('data') -# Specify the subject directories -subject_list = ['s1'] # , 's3'] -# Map field names to individual subject runs. -info = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]], - struct=[['subject_id', 'struct']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Now we create a :class:`nipype.interfaces.io.DataSource` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -Use the get_node function to retrieve an internal node by name. Then set the -iterables on this node to perform two different extents of smoothing. -""" - -smoothnode = firstlevel.get_node('preproc.smooth') -assert (str(smoothnode) == 'preproc.smooth') -smoothnode.iterables = ('fwhm', [5., 10.]) - -hpcutoff = 120 -TR = 3. # ensure float -firstlevel.inputs.preproc.highpass.suffix = '_hpf' -firstlevel.inputs.preproc.highpass.op_string = '-bptf %d -1' % (hpcutoff / TR) -""" -Setup a function that returns subject-specific information about the -experimental paradigm. This is used by the -:class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary -to generate an SPM design matrix. In this tutorial, the same paradigm was used -for every participant. Other examples of this function are available in the -`doc/examples` folder. Note: Python knowledge required here. -""" - - -def subjectinfo(subject_id): - from nipype.interfaces.base import Bunch - from copy import deepcopy - print("Subject ID: %s\n" % str(subject_id)) - output = [] - names = ['Task-Odd', 'Task-Even'] - for r in range(4): - onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))] - output.insert(r, - Bunch( - conditions=names, - onsets=deepcopy(onsets), - durations=[[15] for s in names], - amplitudes=None, - tmod=None, - pmod=None, - regressor_names=None, - regressors=None)) - return output - - -""" -Setup the contrast structure that needs to be evaluated. This is a list of -lists. The inner list specifies the contrasts and has the following format - -[Name,Stat,[list of condition names],[weights on those conditions]. The -condition names must match the `names` listed in the `subjectinfo` function -described above. -""" - -cont1 = ['Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]] -cont2 = ['Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]] -cont3 = ['Task', 'F', [cont1, cont2]] -contrasts = [cont1, cont2] - -firstlevel.inputs.modelfit.modelspec.input_units = 'secs' -firstlevel.inputs.modelfit.modelspec.time_repetition = TR -firstlevel.inputs.modelfit.modelspec.high_pass_filter_cutoff = hpcutoff - -firstlevel.inputs.modelfit.level1design.interscan_interval = TR -firstlevel.inputs.modelfit.level1design.bases = {'dgamma': {'derivs': False}} -firstlevel.inputs.modelfit.level1design.contrasts = contrasts -firstlevel.inputs.modelfit.level1design.model_serial_correlations = True -""" -Set up complete workflow -======================== -""" - -l1pipeline = pe.Workflow(name="level1") -l1pipeline.base_dir = os.path.abspath('./fsl/workingdir') -l1pipeline.config = { - "execution": { - "crashdump_dir": os.path.abspath('./fsl/crashdumps') - } -} - -l1pipeline.connect([ - (infosource, datasource, [('subject_id', 'subject_id')]), - (infosource, firstlevel, [(('subject_id', subjectinfo), - 'modelfit.modelspec.subject_info')]), - (datasource, firstlevel, [ - ('struct', 'preproc.inputspec.struct'), - ('func', 'preproc.inputspec.func'), - ]), -]) -""" -Execute the pipeline --------------------- - -The code discussed above sets up all the necessary data structures with -appropriate parameters and the connectivity between the processes, but does not -generate any output. To actually run the analysis on the data the -``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. -""" - -if __name__ == '__main__': - l1pipeline.write_graph() - outgraph = l1pipeline.run() - # l1pipeline.run(plugin='MultiProc', plugin_args={'n_procs':2}) diff --git a/examples/fmri_fsl_feeds.py b/examples/fmri_fsl_feeds.py deleted file mode 100755 index 5a90bf9213..0000000000 --- a/examples/fmri_fsl_feeds.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -================= -fMRI: FEEDS - FSL -================= - -A pipeline example that data from the FSL FEEDS set. Single subject, two -stimuli. - -You can find it at http://www.fmrib.ox.ac.uk/fsl/feeds/doc/index.html - -""" - -from __future__ import division -from builtins import range - -import os # system functions -from nipype.interfaces import io as nio # Data i/o -from nipype.interfaces import utility as niu # Utilities -from nipype.interfaces import fsl # fsl -from nipype.pipeline import engine as pe # pypeline engine -from nipype.algorithms import modelgen as model # model generation -from niflow.nipype1.workflows.fmri.fsl import ( - create_featreg_preproc, create_modelfit_workflow, create_reg_workflow) -from nipype.interfaces.base import Bunch -""" -Preliminaries -------------- - -Setup any package specific configuration. The output file format for FSL -routines is being set to compressed NIFTI. -""" - -fsl.FSLCommand.set_default_output_type('NIFTI_GZ') -""" -Experiment specific components ------------------------------- - -This tutorial does a single subject analysis so we are not using infosource and -iterables -""" - -# Specify the location of the FEEDS data. You can find it at http://www.fmrib.ox.ac.uk/fsl/feeds/doc/index.html - -inputnode = pe.Node( - niu.IdentityInterface(fields=['in_data']), name='inputnode') -# Specify the subject directories -# Map field names to individual subject runs. -info = dict(func=[['fmri']], struct=[['structural']]) -""" -Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the -information from above about the layout of our data. The -:class:`nipype.pipeline.Node` module wraps the interface object and provides -additional housekeeping and pipeline specific functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber(outfields=['func', 'struct']), name='datasource') -datasource.inputs.template = 'feeds/data/%s.nii.gz' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True - -preproc = create_featreg_preproc(whichvol='first') -TR = 3. -preproc.inputs.inputspec.fwhm = 5 -preproc.inputs.inputspec.highpass = 100. / TR - -modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec") -modelspec.inputs.input_units = 'secs' -modelspec.inputs.time_repetition = TR -modelspec.inputs.high_pass_filter_cutoff = 100 -modelspec.inputs.subject_info = [ - Bunch( - conditions=['Visual', 'Auditory'], - onsets=[ - list(range(0, int(180 * TR), 60)), - list(range(0, int(180 * TR), 90)) - ], - durations=[[30], [45]], - amplitudes=None, - tmod=None, - pmod=None, - regressor_names=None, - regressors=None) -] - -modelfit = create_modelfit_workflow(f_contrasts=True) -modelfit.inputs.inputspec.interscan_interval = TR -modelfit.inputs.inputspec.model_serial_correlations = True -modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': True}} -cont1 = ['Visual>Baseline', 'T', ['Visual', 'Auditory'], [1, 0]] -cont2 = ['Auditory>Baseline', 'T', ['Visual', 'Auditory'], [0, 1]] -cont3 = ['Task', 'F', [cont1, cont2]] -modelfit.inputs.inputspec.contrasts = [cont1, cont2, cont3] - -registration = create_reg_workflow() -registration.inputs.inputspec.target_image = fsl.Info.standard_image( - 'MNI152_T1_2mm.nii.gz') -registration.inputs.inputspec.target_image_brain = fsl.Info.standard_image( - 'MNI152_T1_2mm_brain.nii.gz') -registration.inputs.inputspec.config_file = 'T1_2_MNI152_2mm' -""" -Set up complete workflow -======================== -""" - -l1pipeline = pe.Workflow(name="level1") -l1pipeline.base_dir = os.path.abspath('./fsl_feeds/workingdir') -l1pipeline.config = { - "execution": { - "crashdump_dir": os.path.abspath('./fsl_feeds/crashdumps') - } -} - -l1pipeline.connect(inputnode, 'in_data', datasource, 'base_directory') -l1pipeline.connect(datasource, 'func', preproc, 'inputspec.func') -l1pipeline.connect(preproc, 'outputspec.highpassed_files', modelspec, - 'functional_runs') -l1pipeline.connect(preproc, 'outputspec.motion_parameters', modelspec, - 'realignment_parameters') -l1pipeline.connect(modelspec, 'session_info', modelfit, - 'inputspec.session_info') -l1pipeline.connect(preproc, 'outputspec.highpassed_files', modelfit, - 'inputspec.functional_data') -l1pipeline.connect(preproc, 'outputspec.mean', registration, - 'inputspec.mean_image') -l1pipeline.connect(datasource, 'struct', registration, - 'inputspec.anatomical_image') -l1pipeline.connect(modelfit, 'outputspec.zfiles', registration, - 'inputspec.source_files') -""" -Setup the datasink -""" - -datasink = pe.Node( - interface=nio.DataSink(parameterization=False), name="datasink") -datasink.inputs.base_directory = os.path.abspath('./fsl_feeds/l1out') -datasink.inputs.substitutions = [ - ('fmri_dtype_mcf_mask_smooth_mask_gms_mean_warp', 'meanfunc') -] -# store relevant outputs from various stages of the 1st level analysis -l1pipeline.connect(registration, 'outputspec.transformed_files', datasink, - 'level1.@Z') -l1pipeline.connect(registration, 'outputspec.transformed_mean', datasink, - 'meanfunc') -""" -Execute the pipeline --------------------- - -The code discussed above sets up all the necessary data structures with -appropriate parameters and the connectivity between the processes, but does not -generate any output. To actually run the analysis on the data the -``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. -""" - -if __name__ == '__main__': - l1pipeline.inputs.inputnode.in_data = os.path.abspath('feeds/data') - l1pipeline.run() diff --git a/examples/fmri_fsl_reuse.py b/examples/fmri_fsl_reuse.py deleted file mode 100755 index 5375f8a780..0000000000 --- a/examples/fmri_fsl_reuse.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -========================= -fMRI: FSL reuse workflows -========================= - -A workflow that uses fsl to perform a first level analysis on the nipype -tutorial data set:: - - python fmri_fsl_reuse.py - - -First tell python where to find the appropriate functions. -""" - -from __future__ import print_function -from __future__ import division -from builtins import str -from builtins import range - -import os # system functions -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.fsl as fsl # fsl -from nipype.interfaces import utility as niu # Utilities -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.modelgen as model # model generation -import nipype.algorithms.rapidart as ra # artifact detection - -from niflow.nipype1.workflows.fmri.fsl import (create_featreg_preproc, - create_modelfit_workflow, - create_fixed_effects_flow) -""" -Preliminaries -------------- - -Setup any package specific configuration. The output file format for FSL -routines is being set to compressed NIFTI. -""" - -fsl.FSLCommand.set_default_output_type('NIFTI_GZ') - -level1_workflow = pe.Workflow(name='level1flow') - -preproc = create_featreg_preproc(whichvol='first') - -modelfit = create_modelfit_workflow() - -fixed_fx = create_fixed_effects_flow() -""" -Add artifact detection and model specification nodes between the preprocessing -and modelfitting workflows. -""" - -art = pe.MapNode( - ra.ArtifactDetect( - use_differences=[True, False], - use_norm=True, - norm_threshold=1, - zintensity_threshold=3, - parameter_source='FSL', - mask_type='file'), - iterfield=['realigned_files', 'realignment_parameters', 'mask_file'], - name="art") - -modelspec = pe.Node(model.SpecifyModel(), name="modelspec") - -level1_workflow.connect( - [(preproc, art, - [('outputspec.motion_parameters', 'realignment_parameters'), - ('outputspec.realigned_files', 'realigned_files'), ('outputspec.mask', - 'mask_file')]), - (preproc, modelspec, [('outputspec.highpassed_files', 'functional_runs'), - ('outputspec.motion_parameters', - 'realignment_parameters')]), (art, modelspec, - [('outlier_files', - 'outlier_files')]), - (modelspec, modelfit, [('session_info', 'inputspec.session_info')]), - (preproc, modelfit, [('outputspec.highpassed_files', - 'inputspec.functional_data')])]) -""" -Set up first-level workflow ---------------------------- - -""" - - -def sort_copes(files): - numelements = len(files[0]) - outfiles = [] - for i in range(numelements): - outfiles.insert(i, []) - for j, elements in enumerate(files): - outfiles[i].append(elements[i]) - return outfiles - - -def num_copes(files): - return len(files) - - -pickfirst = lambda x: x[0] - -level1_workflow.connect( - [(preproc, fixed_fx, [(('outputspec.mask', pickfirst), - 'flameo.mask_file')]), - (modelfit, fixed_fx, [ - (('outputspec.copes', sort_copes), 'inputspec.copes'), - ('outputspec.dof_file', 'inputspec.dof_files'), - (('outputspec.varcopes', sort_copes), 'inputspec.varcopes'), - (('outputspec.copes', num_copes), 'l2model.num_copes'), - ])]) -""" -Experiment specific components ------------------------------- - -The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``s1`` and ``s2``. Each subject directory -contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And -one anatomical volume named struct.nii. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -In the example below, run 'f3' is of type 'func' and gets mapped to a -nifti filename through a template '%s.nii'. So 'f3' would become -'f3.nii'. - -""" - -inputnode = pe.Node( - niu.IdentityInterface(fields=['in_data']), name='inputnode') - -# Specify the subject directories -subject_list = ['s1'] # , 's3'] -# Map field names to individual subject runs. -info = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]], - struct=[['subject_id', 'struct']]) - -infosource = pe.Node( - niu.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Now we create a :class:`nipype.interfaces.io.DataSource` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.template = 'nipype-tutorial/data/%s/%s.nii' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -""" -Use the get_node function to retrieve an internal node by name. Then set the -iterables on this node to perform two different extents of smoothing. -""" - -featinput = level1_workflow.get_node('featpreproc.inputspec') -featinput.iterables = ('fwhm', [5., 10.]) - -hpcutoff = 120. -TR = 3. -featinput.inputs.highpass = hpcutoff / (2. * TR) -""" -Setup a function that returns subject-specific information about the -experimental paradigm. This is used by the -:class:`nipype.modelgen.SpecifyModel` to create the information necessary -to generate an SPM design matrix. In this tutorial, the same paradigm was used -for every participant. Other examples of this function are available in the -`doc/examples` folder. Note: Python knowledge required here. -""" - - -def subjectinfo(subject_id): - from nipype.interfaces.base import Bunch - from copy import deepcopy - print("Subject ID: %s\n" % str(subject_id)) - output = [] - names = ['Task-Odd', 'Task-Even'] - for r in range(4): - onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))] - output.insert(r, - Bunch( - conditions=names, - onsets=deepcopy(onsets), - durations=[[15] for s in names])) - return output - - -""" -Setup the contrast structure that needs to be evaluated. This is a list of -lists. The inner list specifies the contrasts and has the following format - -[Name,Stat,[list of condition names],[weights on those conditions]. The -condition names must match the `names` listed in the `subjectinfo` function -described above. -""" - -cont1 = ['Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]] -cont2 = ['Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]] -cont3 = ['Task', 'F', [cont1, cont2]] -contrasts = [cont1, cont2] - -modelspec.inputs.input_units = 'secs' -modelspec.inputs.time_repetition = TR -modelspec.inputs.high_pass_filter_cutoff = hpcutoff - -modelfit.inputs.inputspec.interscan_interval = TR -modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': False}} -modelfit.inputs.inputspec.contrasts = contrasts -modelfit.inputs.inputspec.model_serial_correlations = True -modelfit.inputs.inputspec.film_threshold = 1000 - -level1_workflow.base_dir = os.path.abspath('./fsl/workingdir') -level1_workflow.config['execution'] = dict( - crashdump_dir=os.path.abspath('./fsl/crashdumps')) - -level1_workflow.connect([ - (inputnode, datasource, [('in_data', 'base_directory')]), - (infosource, datasource, [('subject_id', 'subject_id')]), - (infosource, modelspec, [(('subject_id', subjectinfo), 'subject_info')]), - (datasource, preproc, [('func', 'inputspec.func')]), -]) -""" -Execute the pipeline --------------------- - -The code discussed above sets up all the necessary data structures with -appropriate parameters and the connectivity between the processes, but does not -generate any output. To actually run the analysis on the data the -``nipype.pipeline.engine.Pipeline.Run`` function needs to be called. -""" - -if __name__ == '__main__': - # level1_workflow.write_graph() - level1_workflow.run() - # level1_workflow.run(plugin='MultiProc', plugin_args={'n_procs':2}) diff --git a/examples/fmri_nipy_glm.py b/examples/fmri_nipy_glm.py deleted file mode 100755 index 0439050c7a..0000000000 --- a/examples/fmri_nipy_glm.py +++ /dev/null @@ -1,261 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -=================== -fMRI: NiPy GLM, SPM -=================== - -The fmri_nipy_glm.py integrates several interfaces to perform a first level -analysis on a two-subject data set. It is very similar to the spm_tutorial with -the difference of using nipy for fitting GLM model and estimating contrasts. -The tutorial can be found in the examples folder. Run the tutorial from inside -the nipype tutorial directory:: - - python fmri_nipy_glm.py - -""" - -from __future__ import print_function -from builtins import str -from builtins import range - -from nipype.interfaces.nipy.model import FitGLM, EstimateContrast -from nipype.interfaces.nipy.preprocess import ComputeMask -"""Import necessary modules from nipype.""" - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.spm as spm # spm -import nipype.interfaces.matlab as mlab # how to run matlab -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.rapidart as ra # artifact detection -import nipype.algorithms.modelgen as model # model specification -import os # system functions -""" - -Preliminaries -------------- - -Set any package specific configuration. The output file format -for FSL routines is being set to uncompressed NIFTI and a specific -version of matlab is being used. The uncompressed format is required -because SPM does not handle compressed NIFTI. -""" - -# Set the way matlab should be called -mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") -"""The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``s1`` and ``s2``. Each subject directory -contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And -one anatomical volume named struct.nii. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -In the example below, run 'f3' is of type 'func' and gets mapped to a -nifti filename through a template '%s.nii'. So 'f3' would become -'f3.nii'. - -""" - -# Specify the location of the data. -data_dir = os.path.abspath('data') -# Specify the subject directories -subject_list = ['s1'] -# Map field names to individual subject runs. -info = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]], - struct=[['subject_id', 'struct']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Preprocessing pipeline nodes ----------------------------- - -Now we create a :class:`nipype.interfaces.io.DataSource` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -"""Use :class:`nipype.interfaces.spm.Realign` for motion correction -and register all images to the mean image. -""" - -realign = pe.Node(interface=spm.Realign(), name="realign") -realign.inputs.register_to_mean = True - -compute_mask = pe.Node(interface=ComputeMask(), name="compute_mask") -"""Use :class:`nipype.algorithms.rapidart` to determine which of the -images in the functional series are outliers based on deviations in -intensity or movement. -""" - -art = pe.Node(interface=ra.ArtifactDetect(), name="art") -art.inputs.use_differences = [True, False] -art.inputs.use_norm = True -art.inputs.norm_threshold = 1 -art.inputs.zintensity_threshold = 3 -art.inputs.mask_type = 'file' -art.inputs.parameter_source = 'SPM' -"""Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid -body registration of the functional data to the structural data. -""" - -coregister = pe.Node(interface=spm.Coregister(), name="coregister") -coregister.inputs.jobtype = 'estimate' -"""Smooth the functional data using -:class:`nipype.interfaces.spm.Smooth`. -""" - -smooth = pe.Node(interface=spm.Smooth(), name="smooth") -smooth.inputs.fwhm = 4 -""" -Set up analysis components --------------------------- - -Here we create a function that returns subject-specific information -about the experimental paradigm. This is used by the -:class:`nipype.interfaces.spm.SpecifyModel` to create the information -necessary to generate an SPM design matrix. In this tutorial, the same -paradigm was used for every participant. -""" - - -def subjectinfo(subject_id): - from nipype.interfaces.base import Bunch - from copy import deepcopy - print("Subject ID: %s\n" % str(subject_id)) - output = [] - names = ['Task-Odd', 'Task-Even'] - for r in range(4): - onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))] - output.insert(r, - Bunch( - conditions=names, - onsets=deepcopy(onsets), - durations=[[15] for s in names], - amplitudes=None, - tmod=None, - pmod=None, - regressor_names=None, - regressors=None)) - return output - - -"""Setup the contrast structure that needs to be evaluated. This is a -list of lists. The inner list specifies the contrasts and has the -following format - [Name,Stat,[list of condition names],[weights on -those conditions]. The condition names must match the `names` listed -in the `subjectinfo` function described above. -""" - -cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]) -cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]) -contrasts = [cont1, cont2] -"""Generate design information using -:class:`nipype.interfaces.spm.SpecifyModel`. nipy accepts only design specified -in seconds so "output_units" has always have to be set to "secs". -""" - -modelspec = pe.Node(interface=model.SpecifySPMModel(), name="modelspec") -modelspec.inputs.concatenate_runs = True -modelspec.inputs.input_units = 'secs' -modelspec.inputs.output_units = 'secs' -modelspec.inputs.time_repetition = 3. -modelspec.inputs.high_pass_filter_cutoff = 120 -"""Fit the GLM model using nipy and ordinary least square method -""" - -model_estimate = pe.Node(interface=FitGLM(), name="model_estimate") -model_estimate.inputs.TR = 3. -model_estimate.inputs.model = "spherical" -model_estimate.inputs.method = "ols" -"""Estimate the contrasts. The format of the contrasts definition is the same as -for FSL and SPM -""" - -contrast_estimate = pe.Node( - interface=EstimateContrast(), name="contrast_estimate") -cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]) -cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]) -contrast_estimate.inputs.contrasts = [cont1, cont2] -""" -Setup the pipeline ------------------- - -The nodes created above do not describe the flow of data. They merely -describe the parameters used for each function. In this section we -setup the connections between the nodes such that appropriate outputs -from nodes are piped into appropriate inputs of other nodes. - -Use the :class:`nipype.pipeline.engine.Pipeline` to create a -graph-based execution pipeline for first level analysis. The config -options tells the pipeline engine to use `workdir` as the disk -location to use when running the processes and keeping their -outputs. The `use_parameterized_dirs` tells the engine to create -sub-directories under `workdir` corresponding to the iterables in the -pipeline. Thus for this pipeline there will be subject specific -sub-directories. - -The ``nipype.pipeline.engine.Pipeline.connect`` function creates the -links between the processes, i.e., how data should flow in and out of -the processing nodes. -""" - -l1pipeline = pe.Workflow(name="level1") -l1pipeline.base_dir = os.path.abspath('nipy_tutorial/workingdir') - -l1pipeline.connect( - [(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, realign, [('func', 'in_files')]), (realign, compute_mask, [ - ('mean_image', 'mean_volume') - ]), (realign, coregister, [('mean_image', 'source'), - ('realigned_files', - 'apply_to_files')]), (datasource, coregister, - [('struct', 'target')]), - (coregister, smooth, - [('coregistered_files', 'in_files')]), (realign, modelspec, [ - ('realignment_parameters', 'realignment_parameters') - ]), (smooth, modelspec, - [('smoothed_files', 'functional_runs')]), (realign, art, [ - ('realignment_parameters', 'realignment_parameters') - ]), (coregister, art, [('coregistered_files', 'realigned_files')]), - (compute_mask, art, [('brain_mask', 'mask_file')]), (art, modelspec, [ - ('outlier_files', 'outlier_files') - ]), (infosource, modelspec, [ - (("subject_id", subjectinfo), "subject_info") - ]), (modelspec, model_estimate, - [('session_info', 'session_info')]), (compute_mask, model_estimate, - [('brain_mask', 'mask')]), - (model_estimate, contrast_estimate, - [("beta", "beta"), ("nvbeta", "nvbeta"), ("s2", "s2"), ("dof", "dof"), - ("axis", "axis"), ("constants", "constants"), ("reg_names", - "reg_names")])]) - -if __name__ == '__main__': - l1pipeline.run() diff --git a/examples/fmri_slicer_coregistration.py b/examples/fmri_slicer_coregistration.py deleted file mode 100755 index 461fedabdf..0000000000 --- a/examples/fmri_slicer_coregistration.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -===================================== -fMRI: Coregistration - Slicer, BRAINS -===================================== - -This is currently not working and will raise an exception in release 0.3. It -will be fixed in a later release:: - - python fmri_slicer_coregistration.py - -""" - -# raise RuntimeWarning, 'Slicer not fully implmented' -from nipype.interfaces.slicer import BRAINSFit, BRAINSResample -"""Import necessary modules from nipype.""" - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import os # system functions -""" - -Preliminaries -------------- - -Confirm package dependencies are installed. (This is only for the -tutorial, rarely would you put this in your own code.) -""" - -from nipype.utils.misc import package_check - -package_check('numpy', '1.3', 'tutorial1') -package_check('scipy', '0.7', 'tutorial1') -package_check('IPython', '0.10', 'tutorial1') -"""The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``s1`` and ``s2``. Each subject directory -contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And -one anatomical volume named struct.nii. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -In the example below, run 'f3' is of type 'func' and gets mapped to a -nifti filename through a template '%s.nii'. So 'f3' would become -'f3.nii'. - -""" - -# Specify the location of the data. -data_dir = os.path.abspath('data') -# Specify the subject directories -subject_list = ['s1', 's3'] -# Map field names to individual subject runs. -info = dict(func=[['subject_id', 'f3']], struct=[['subject_id', 'struct']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Preprocessing pipeline nodes ----------------------------- - -Now we create a :class:`nipype.interfaces.io.DataSource` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True - -coregister = pe.Node(interface=BRAINSFit(), name="coregister") -coregister.inputs.outputTransform = True -coregister.inputs.outputVolume = True -coregister.inputs.transformType = ["Affine"] - -reslice = pe.Node(interface=BRAINSResample(), name="reslice") -reslice.inputs.outputVolume = True - -pipeline = pe.Workflow(name="pipeline") -pipeline.base_dir = os.path.abspath('slicer_tutorial/workingdir') - -pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, coregister, [('func', 'movingVolume')]), - (datasource, coregister, - [('struct', 'fixedVolume')]), (coregister, reslice, [ - ('outputTransform', 'warpTransform') - ]), (datasource, reslice, [('func', 'inputVolume')]), - (datasource, reslice, [('struct', 'referenceVolume')])]) - -if __name__ == '__main__': - pipeline.run() - pipeline.write_graph() diff --git a/examples/fmri_spm.py b/examples/fmri_spm.py deleted file mode 100755 index 23a16a36fe..0000000000 --- a/examples/fmri_spm.py +++ /dev/null @@ -1,390 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -============== -fMRI: SPM, FSL -============== - -The fmri_spm.py integrates several interfaces to perform a first -and second level analysis on a two-subject data set. The tutorial can -be found in the examples folder. Run the tutorial from inside the -nipype tutorial directory:: - - python fmri_spm.py - -Import necessary modules from nipype.""" - -from __future__ import print_function -from builtins import str -from builtins import range - -import os # system functions - -from nipype import config -# config.enable_provenance() - -from nipype.interfaces import spm, fsl - -# In order to use this example with SPM's matlab common runtime -# matlab_cmd = ('/Users/satra/Downloads/spm8/run_spm8.sh ' -# '/Applications/MATLAB/MATLAB_Compiler_Runtime/v713/ script') -# spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True) - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.rapidart as ra # artifact detection -import nipype.algorithms.modelgen as model # model specification -import nipype.interfaces.matlab as mlab -""" - -Preliminaries -------------- - -Set any package specific configuration. The output file format -for FSL routines is being set to uncompressed NIFTI and a specific -version of matlab is being used. The uncompressed format is required -because SPM does not handle compressed NIFTI. -""" - -# Tell fsl to generate all output in uncompressed nifti format -fsl.FSLCommand.set_default_output_type('NIFTI') - -# Set the way matlab should be called -# import nipype.interfaces.matlab as mlab # how to run matlab -# mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") - -# In case a different path is required -# mlab.MatlabCommand.set_default_paths('/software/matlab/spm12b/spm12b_r5918') -"""The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``s1`` and ``s2``. Each subject directory -contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And -one anatomical volume named struct.nii. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -In the example below, run 'f3' is of type 'func' and gets mapped to a -nifti filename through a template '%s.nii'. So 'f3' would become -'f3.nii'. - -""" - -# Specify the location of the data. -data_dir = os.path.abspath('data') -# Specify the subject directories -subject_list = ['s1', 's3'] -# Map field names to individual subject runs. -info = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]], - struct=[['subject_id', 'struct']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) -""" -Preprocessing pipeline nodes ----------------------------- - -Now we create a :class:`nipype.interfaces.io.DataSource` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True -"""Use :class:`nipype.interfaces.spm.Realign` for motion correction -and register all images to the mean image. -""" - -realign = pe.Node(interface=spm.Realign(), name="realign") -realign.inputs.register_to_mean = True -"""Use :class:`nipype.algorithms.rapidart` to determine which of the -images in the functional series are outliers based on deviations in -intensity or movement. -""" - -art = pe.Node(interface=ra.ArtifactDetect(), name="art") -art.inputs.use_differences = [True, False] -art.inputs.use_norm = True -art.inputs.norm_threshold = 1 -art.inputs.zintensity_threshold = 3 -art.inputs.mask_type = 'file' -art.inputs.parameter_source = 'SPM' -"""Skull strip structural images using -:class:`nipype.interfaces.fsl.BET`. -""" - -skullstrip = pe.Node(interface=fsl.BET(), name="skullstrip") -skullstrip.inputs.mask = True -"""Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid -body registration of the functional data to the structural data. -""" - -coregister = pe.Node(interface=spm.Coregister(), name="coregister") -coregister.inputs.jobtype = 'estimate' -"""Warp functional and structural data to SPM's T1 template using -:class:`nipype.interfaces.spm.Normalize`. The tutorial data set -includes the template image, T1.nii. -""" - -normalize = pe.Node(interface=spm.Normalize(), name="normalize") -normalize.inputs.template = os.path.abspath('data/T1.nii') -"""Smooth the functional data using -:class:`nipype.interfaces.spm.Smooth`. -""" - -smooth = pe.Node(interface=spm.Smooth(), name="smooth") -fwhmlist = [4] -smooth.iterables = ('fwhm', fwhmlist) -""" -Set up analysis components --------------------------- - -Here we create a function that returns subject-specific information -about the experimental paradigm. This is used by the -:class:`nipype.interfaces.spm.SpecifyModel` to create the information -necessary to generate an SPM design matrix. In this tutorial, the same -paradigm was used for every participant. -""" - - -def subjectinfo(subject_id): - from nipype.interfaces.base import Bunch - from copy import deepcopy - print("Subject ID: %s\n" % str(subject_id)) - output = [] - names = ['Task-Odd', 'Task-Even'] - for r in range(4): - onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))] - output.insert(r, - Bunch( - conditions=names, - onsets=deepcopy(onsets), - durations=[[15] for s in names])) - return output - - -"""Setup the contrast structure that needs to be evaluated. This is a -list of lists. The inner list specifies the contrasts and has the -following format - [Name,Stat,[list of condition names],[weights on -those conditions]. The condition names must match the `names` listed -in the `subjectinfo` function described above. -""" - -cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]) -cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]) -contrasts = [cont1, cont2] -"""Generate SPM-specific design information using -:class:`nipype.interfaces.spm.SpecifyModel`. -""" - -modelspec = pe.Node(interface=model.SpecifySPMModel(), name="modelspec") -modelspec.inputs.concatenate_runs = False -modelspec.inputs.input_units = 'secs' -modelspec.inputs.output_units = 'secs' -modelspec.inputs.time_repetition = 3. -modelspec.inputs.high_pass_filter_cutoff = 120 -"""Generate a first level SPM.mat file for analysis -:class:`nipype.interfaces.spm.Level1Design`. -""" - -level1design = pe.Node(interface=spm.Level1Design(), name="level1design") -level1design.inputs.timing_units = modelspec.inputs.output_units -level1design.inputs.interscan_interval = modelspec.inputs.time_repetition -level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} -"""Use :class:`nipype.interfaces.spm.EstimateModel` to determine the -parameters of the model. -""" - -level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") -level1estimate.inputs.estimation_method = {'Classical': 1} -"""Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the -first level contrasts specified in a few steps above. -""" - -contrastestimate = pe.Node( - interface=spm.EstimateContrast(), name="contrastestimate") -contrastestimate.inputs.contrasts = contrasts -contrastestimate.overwrite = True -contrastestimate.config = {'execution': {'remove_unnecessary_outputs': False}} -""" -Setup the pipeline ------------------- - -The nodes created above do not describe the flow of data. They merely -describe the parameters used for each function. In this section we -setup the connections between the nodes such that appropriate outputs -from nodes are piped into appropriate inputs of other nodes. - -Use the :class:`nipype.pipeline.engine.Pipeline` to create a -graph-based execution pipeline for first level analysis. The config -options tells the pipeline engine to use `workdir` as the disk -location to use when running the processes and keeping their -outputs. The `use_parameterized_dirs` tells the engine to create -sub-directories under `workdir` corresponding to the iterables in the -pipeline. Thus for this pipeline there will be subject specific -sub-directories. - -The ``nipype.pipeline.engine.Pipeline.connect`` function creates the -links between the processes, i.e., how data should flow in and out of -the processing nodes. -""" - -l1pipeline = pe.Workflow(name="level1") -l1pipeline.base_dir = os.path.abspath('spm_tutorial/workingdir') - -l1pipeline.connect([ - (infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, realign, [('func', 'in_files')]), - (realign, coregister, [('mean_image', 'source'), ('realigned_files', - 'apply_to_files')]), - (datasource, coregister, [('struct', 'target')]), - (datasource, normalize, [('struct', 'source')]), - (coregister, normalize, [('coregistered_files', 'apply_to_files')]), - (normalize, smooth, [('normalized_files', 'in_files')]), - (infosource, modelspec, [(('subject_id', subjectinfo), 'subject_info')]), - (realign, modelspec, [('realignment_parameters', - 'realignment_parameters')]), - (smooth, modelspec, [('smoothed_files', 'functional_runs')]), - (normalize, skullstrip, [('normalized_source', 'in_file')]), - (realign, art, [('realignment_parameters', 'realignment_parameters')]), - (normalize, art, [('normalized_files', 'realigned_files')]), - (skullstrip, art, [('mask_file', 'mask_file')]), - (art, modelspec, [('outlier_files', 'outlier_files')]), - (modelspec, level1design, [('session_info', 'session_info')]), - (skullstrip, level1design, [('mask_file', 'mask_image')]), - (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]), - (level1estimate, contrastestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')]), -]) -""" - -Setup storage results ---------------------- - -Use :class:`nipype.interfaces.io.DataSink` to store selected outputs -from the pipeline in a specific location. This allows the user to -selectively choose important output bits from the analysis and keep -them. - -The first step is to create a datasink node and then to connect -outputs from the modules above to storage locations. These take the -following form directory_name[.[@]subdir] where parts between [] are -optional. For example 'realign.@mean' below creates a directory called -realign in 'l1output/subject_id/' and stores the mean image output -from the Realign process in the realign directory. If the @ is left -out, then a sub-directory with the name 'mean' would be created and -the mean image would be copied to that directory. -""" - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.abspath('spm_tutorial/l1output') - - -def getstripdir(subject_id): - import os - return os.path.join( - os.path.abspath('spm_tutorial/workingdir'), - '_subject_id_%s' % subject_id) - - -# store relevant outputs from various stages of the 1st level analysis -l1pipeline.connect([ - (infosource, datasink, [('subject_id', 'container'), - (('subject_id', getstripdir), 'strip_dir')]), - (realign, datasink, [('mean_image', 'realign.@mean'), - ('realignment_parameters', 'realign.@param')]), - (art, datasink, [('outlier_files', 'art.@outliers'), ('statistic_files', - 'art.@stats')]), - (level1design, datasink, [('spm_mat_file', 'model.pre-estimate')]), - (level1estimate, datasink, - [('spm_mat_file', 'model.@spm'), ('beta_images', 'model.@beta'), - ('mask_image', 'model.@mask'), ('residual_image', 'model.@res'), - ('RPVimage', 'model.@rpv')]), - (contrastestimate, datasink, [('con_images', 'contrasts.@con'), - ('spmT_images', 'contrasts.@T')]), -]) -""" -Setup level 2 pipeline ----------------------- - -Use :class:`nipype.interfaces.io.DataGrabber` to extract the contrast -images across a group of first level subjects. Unlike the previous -pipeline that iterated over subjects, this pipeline will iterate over -contrasts. -""" - -# collect all the con images for each contrast. -contrast_ids = list(range(1, len(contrasts) + 1)) -l2source = pe.Node(nio.DataGrabber(infields=['fwhm', 'con']), name="l2source") -# we use .*i* to capture both .img (SPM8) and .nii (SPM12) -l2source.inputs.template = os.path.abspath( - 'spm_tutorial/l1output/*/con*/*/_fwhm_%d/con_%04d.*i*') -# iterate over all contrast images -l2source.iterables = [('fwhm', fwhmlist), ('con', contrast_ids)] -l2source.inputs.sort_filelist = True -"""Use :class:`nipype.interfaces.spm.OneSampleTTestDesign` to perform a -simple statistical analysis of the contrasts from the group of -subjects (n=2 in this example). -""" - -# setup a 1-sample t-test node -onesamplettestdes = pe.Node( - interface=spm.OneSampleTTestDesign(), name="onesampttestdes") -l2estimate = pe.Node(interface=spm.EstimateModel(), name="level2estimate") -l2estimate.inputs.estimation_method = {'Classical': 1} -l2conestimate = pe.Node( - interface=spm.EstimateContrast(), name="level2conestimate") -cont1 = ('Group', 'T', ['mean'], [1]) -l2conestimate.inputs.contrasts = [cont1] -l2conestimate.inputs.group_contrast = True -"""As before, we setup a pipeline to connect these two nodes (l2source --> onesamplettest). -""" - -l2pipeline = pe.Workflow(name="level2") -l2pipeline.base_dir = os.path.abspath('spm_tutorial/l2output') -l2pipeline.connect([ - (l2source, onesamplettestdes, [('outfiles', 'in_files')]), - (onesamplettestdes, l2estimate, [('spm_mat_file', 'spm_mat_file')]), - (l2estimate, l2conestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')]), -]) -""" -Execute the pipeline --------------------- - -The code discussed above sets up all the necessary data structures -with appropriate parameters and the connectivity between the -processes, but does not generate any output. To actually run the -analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` -function needs to be called. -""" - -if __name__ == '__main__': - l1pipeline.run('MultiProc') - l2pipeline.run('MultiProc') diff --git a/examples/fmri_spm_auditory.py b/examples/fmri_spm_auditory.py deleted file mode 100755 index 60572f0e97..0000000000 --- a/examples/fmri_spm_auditory.py +++ /dev/null @@ -1,404 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -========================== -fMRI: SPM Auditory dataset -========================== - -Introduction -============ -The fmri_spm_auditory.py recreates the classical workflow described in the -`SPM8 manual `_ using auditory -dataset that can be downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/auditory/:: - - python fmri_spm_auditory.py - -Import necessary modules from nipype.""" - -from builtins import range - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.spm as spm # spm -import nipype.interfaces.fsl as fsl # fsl -import nipype.interfaces.matlab as mlab # how to run matlab -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.modelgen as model # model specification -import os # system functions -""" - -Preliminaries -------------- - -""" - -# Set the way Matlab should be called -mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") - -""" - -Setting up workflows --------------------- -In this tutorial we will be setting up a hierarchical workflow for SPM -analysis. This will demonstrate how predefined workflows can be setup -and shared across users, projects and labs. - -Setup preprocessing workflow ----------------------------- -This is a generic preprocessing workflow that can be used by different analyses - -""" - -preproc = pe.Workflow(name='preproc') -"""We strongly encourage to use 4D files instead of series of 3D for fMRI analyses -for many reasons (cleanness and saving and filesystem inodes are among them). However, -the the workflow presented in the SPM8 manual which this tutorial is based on -uses 3D files. Therefore we leave converting to 4D as an option. We are using ``merge_to_4d`` -variable, because switching between 3D and 4dD requires some additional steps (explained later on). -Use :ref:`nipype.interfaces.fsl.utils.Merge` to merge a series -of 3D files along the time dimension creating a 4D file. -""" - -merge_to_4d = True - -if merge_to_4d: - merge = pe.Node(interface=fsl.Merge(), name="merge") - merge.inputs.dimension = "t" -"""Use :ref:`nipype.interfaces.spm.preprocess.Realign` -for motion correction and register all images to the mean image. -""" - -realign = pe.Node(interface=spm.Realign(), name="realign") -"""Use :ref:`nipype.interfaces.spm.preprocess.Coregister` -to perform a rigid body registration of the functional data to the structural data. -""" - -coregister = pe.Node(interface=spm.Coregister(), name="coregister") -coregister.inputs.jobtype = 'estimate' - -segment = pe.Node(interface=spm.Segment(), name="segment") - -"""Uncomment the following line for faster execution -""" - -# segment.inputs.gaussians_per_class = [1, 1, 1, 4] - -"""Warp functional and structural data to SPM's T1 template using -:ref:`nipype.interfaces.spm.preprocess.Normalize`. -The tutorial data set includes the template image, T1.nii. -""" - -normalize_func = pe.Node(interface=spm.Normalize(), name="normalize_func") -normalize_func.inputs.jobtype = "write" - -normalize_struc = pe.Node(interface=spm.Normalize(), name="normalize_struc") -normalize_struc.inputs.jobtype = "write" -"""Smooth the functional data using -:ref:`nipype.interfaces.spm.preprocess.Smooth`. -""" - -smooth = pe.Node(interface=spm.Smooth(), name="smooth") - -"""``write_voxel_sizes`` is the input of the normalize interface that is recommended -to be set to the voxel sizes of the target volume. -There is no need to set it manually since we can infer it from data -using the following function: -""" - -def get_vox_dims(volume): - import nibabel as nb - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume) - hdr = nii.header - voxdims = hdr.get_zooms() - return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] - - -"""Here we are connecting all the nodes together. -Notice that we add the merge node only if you choose to use 4D. -Also, the ``get_vox_dims`` function is passed along the input volume of -:ref:`nipype.interfaces.spm.preprocess.Normalize` to set the optimal voxel sizes. -""" - -if merge_to_4d: - preproc.connect([(merge, realign, [('merged_file', 'in_files')])]) - -preproc.connect([ - (realign, coregister, [('mean_image', 'target')]), - (coregister, segment, [('coregistered_source', 'data')]), - (segment, normalize_func, [('transformation_mat', 'parameter_file')]), - (segment, normalize_struc, - [('transformation_mat', 'parameter_file'), ('modulated_input_image', - 'apply_to_files'), - (('modulated_input_image', get_vox_dims), 'write_voxel_sizes')]), - (realign, normalize_func, [('realigned_files', 'apply_to_files'), - (('realigned_files', get_vox_dims), - 'write_voxel_sizes')]), - (normalize_func, smooth, [('normalized_files', 'in_files')]), -]) - -""" -Set up analysis workflow ------------------------- -""" - -l1analysis = pe.Workflow(name='analysis') - -"""Generate SPM-specific design information using -:ref:`nipype.algorithms.modelgen.SpecifySPMModel`. -""" - -modelspec = pe.Node(interface=model.SpecifySPMModel(), name="modelspec") - -"""Generate a first level SPM.mat file for analysis -:ref:`nipype.interfaces.spm.model.Level1Design`. -""" - -level1design = pe.Node(interface=spm.Level1Design(), name="level1design") -level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} - -"""Use :ref:`nipype.interfaces.spm.model.EstimateModel` -to determine the parameters of the model. -""" - -level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") -level1estimate.inputs.estimation_method = {'Classical': 1} - -threshold = pe.Node(interface=spm.Threshold(), name="threshold") - -"""Use :ref:`nipype.interfaces.spm.model.EstimateContrast` -to estimate the first level contrasts specified in a few steps above. -""" - -contrastestimate = pe.Node( - interface=spm.EstimateContrast(), name="contrastestimate") - -l1analysis.connect([ - (modelspec, level1design, [('session_info', 'session_info')]), - (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]), - (level1estimate, contrastestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')]), - (contrastestimate, threshold, [('spm_mat_file', 'spm_mat_file'), - ('spmT_images', 'stat_image')]), -]) -""" -Preprocessing and analysis pipeline ------------------------------------ -""" - -l1pipeline = pe.Workflow(name='firstlevel') -l1pipeline.connect([(preproc, l1analysis, - [('realign.realignment_parameters', - 'modelspec.realignment_parameters')])]) - -""" -Plugging in ``functional_runs`` is a bit more complicated, -because model spec expects a list of ``runs``. -Every run can be a 4D file or a list of 3D files. -Therefore for 3D analysis we need a list of lists and to make one we need a helper function. -""" - -if merge_to_4d: - l1pipeline.connect([(preproc, l1analysis, - [('smooth.smoothed_files', - 'modelspec.functional_runs')])]) -else: - - def makelist(item): - return [item] - - l1pipeline.connect([(preproc, l1analysis, - [(('smooth.smoothed_files', makelist), - 'modelspec.functional_runs')])]) -""" -Data specific components ------------------------- -In this tutorial there is only one subject ``M00223``. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. -""" - -# Specify the location of the data downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/auditory/ -data_dir = os.path.abspath('spm_auditory_data') -# Specify the subject directories -subject_list = ['M00223'] -# Map field names to individual subject runs. -info = dict( - func=[['f', 'subject_id', 'f', 'subject_id', - list(range(16, 100))]], - struct=[['s', 'subject_id', 's', 'subject_id', 2]]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") - -""" -Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) - -""" -Now we create a :ref:`nipype.interfaces.io.DataGrabber` -object and fill in the information from above about the layout of our data. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s%s/%s%s_%03d.img' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True - -""" -Experimental paradigm specific components ------------------------------------------ -Here we create a structure that provides information -about the experimental paradigm. This is used by the -:ref:`nipype.algorithms.modelgen.SpecifySPMModel` -to create the information necessary to generate an SPM design matrix. -""" - -from nipype.interfaces.base import Bunch -subjectinfo = [ - Bunch( - conditions=['Task'], onsets=[list(range(6, 84, 12))], durations=[[6]]) -] - -""" -Setup the contrast structure that needs to be evaluated. This is a -list of lists. The inner list specifies the contrasts and has the -following format - ``[Name,Stat,[list of condition names],[weights on -those conditions]``. The condition names must match the ``names`` listed -in the ``subjectinfo`` function described above. -""" - -cont1 = ('active > rest', 'T', ['Task'], [1]) -contrasts = [cont1] - -# set up node specific inputs -modelspecref = l1pipeline.inputs.analysis.modelspec -modelspecref.input_units = 'scans' -modelspecref.output_units = 'scans' -modelspecref.time_repetition = 7 -modelspecref.high_pass_filter_cutoff = 120 - -l1designref = l1pipeline.inputs.analysis.level1design -l1designref.timing_units = modelspecref.output_units -l1designref.interscan_interval = modelspecref.time_repetition - -l1pipeline.inputs.preproc.smooth.fwhm = [6, 6, 6] -l1pipeline.inputs.analysis.modelspec.subject_info = subjectinfo -l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts -l1pipeline.inputs.analysis.threshold.contrast_index = 1 - -""" -Setup the pipeline ------------------- -The nodes created above do not describe the flow of data. They merely -describe the parameters used for each function. In this section we -setup the connections between the nodes such that appropriate outputs -from nodes are piped into appropriate inputs of other nodes. - -Use the :class:`~nipype.pipeline.engine.workflows.Workflow` to create a -graph-based execution pipeline for first level analysis. -Set the :py:attr:`~nipype.pipeline.engine.workflows.base.EngineBase.base_dir` -option to instruct the pipeline engine to use ``spm_auditory_tutorial/workingdir`` -as the filesystem location to use when running the processes and keeping their -outputs. -Other options can be set via `the configuration file -`__. -For example, ``use_parameterized_dirs`` tells the engine to create -sub-directories under :py:attr:`~nipype.pipeline.engine.workflows.Workflow.base_dir`, -corresponding to the iterables in the pipeline. -Thus, for this pipeline there will be subject specific sub-directories. - -When building a workflow, interface objects are wrapped within -a :class:`~nipype.pipeline.engine.nodes.Node` so that they can be inserted -in the workflow. - -The :func:`~nipype.pipeline.engine.workflows.Workflow.connect` method creates the -links between :class:`~nipype.pipeline.engine.nodes.Node` instances, i.e., -how data should flow in and out of the processing nodes. -""" - -level1 = pe.Workflow(name="level1") -level1.base_dir = os.path.abspath('spm_auditory_tutorial/workingdir') - -level1.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, l1pipeline, [('struct', - 'preproc.coregister.source')])]) -if merge_to_4d: - level1.connect([(datasource, l1pipeline, [('func', - 'preproc.merge.in_files')])]) -else: - level1.connect([(datasource, l1pipeline, [('func', - 'preproc.realign.in_files')])]) - -""" -Setup storage results ---------------------- -Use :ref:`nipype.interfaces.io.DataSink` to store selected outputs -from the pipeline in a specific location. This allows the user to -selectively choose important output bits from the analysis and keep -them. - -The first step is to create a datasink node and then to connect -outputs from the modules above to storage locations. These take the -following form ``directory_name[.[@]subdir]`` where parts between ``[]`` are -optional. For example 'realign.@mean' below creates a directory called -realign in 'l1output/subject_id/' and stores the mean image output -from the Realign process in the realign directory. If the @ is left -out, then a sub-directory with the name 'mean' would be created and -the mean image would be copied to that directory. - -""" - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.abspath( - 'spm_auditory_tutorial/l1output') - - -def getstripdir(subject_id): - import os - return os.path.join( - os.path.abspath('spm_auditory_tutorial/workingdir'), - '_subject_id_%s' % subject_id) - - -# store relevant outputs from various stages of the 1st level analysis -level1.connect([ - (infosource, datasink, [('subject_id', 'container'), - (('subject_id', getstripdir), 'strip_dir')]), - (l1pipeline, datasink, - [('analysis.contrastestimate.con_images', 'contrasts.@con'), - ('analysis.contrastestimate.spmT_images', 'contrasts.@T')]), -]) - -""" -Execute the pipeline --------------------- -The code discussed above sets up all the necessary data structures -with appropriate parameters and the connectivity between the -processes, but does not generate any output. To actually run the -analysis on the data the :func:`~nipype.pipeline.engine.workflows.Workflow.run` -method needs to be called. -""" - -if __name__ == '__main__': - level1.run() - level1.write_graph() diff --git a/examples/fmri_spm_dartel.py b/examples/fmri_spm_dartel.py deleted file mode 100755 index 815ce5a62d..0000000000 --- a/examples/fmri_spm_dartel.py +++ /dev/null @@ -1,539 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -================= -fMRI: DARTEL, SPM -================= - -The fmri_spm_dartel.py integrates several interfaces to perform a first -and second level analysis on a two-subject data set. The tutorial can -be found in the examples folder. Run the tutorial from inside the -nipype tutorial directory:: - - python fmri_spm_dartel.py - -Import necessary modules from nipype.""" - -from __future__ import print_function -from builtins import str -from builtins import range - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.spm as spm # spm -import niflow.nipype1.workflows.fmri.spm as spm_wf # spm -import nipype.interfaces.fsl as fsl # fsl -from nipype.interfaces import utility as niu # Utilities -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.rapidart as ra # artifact detection -import nipype.algorithms.modelgen as model # model specification -import os # system functions - -""" -Preliminaries -------------- -Set any package specific configuration. The output file format -for FSL routines is being set to uncompressed NIFTI and a specific -version of matlab is being used. The uncompressed format is required -because SPM does not handle compressed NIFTI. -""" - -# Tell fsl to generate all output in uncompressed nifti format -fsl.FSLCommand.set_default_output_type('NIFTI') - -# Set the way matlab should be called -# mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") -# mlab.MatlabCommand.set_default_paths('/software/spm8') - -""" -Setting up workflows --------------------- -In this tutorial we will be setting up a hierarchical workflow for spm -analysis. This will demonstrate how pre-defined workflows can be setup -and shared across users, projects and labs. - - -Setup preprocessing workflow ----------------------------- -This is a generic preprocessing workflow that can be used by different analyses - -""" - -preproc = pe.Workflow(name='preproc') - -"""Use :class:`nipype.interfaces.spm.Realign` for motion correction -and register all images to the mean image. -""" - -realign = pe.Node(spm.Realign(), name="realign") -realign.inputs.register_to_mean = True - -"""Use :class:`nipype.algorithms.rapidart` to determine which of the -images in the functional series are outliers based on deviations in -intensity or movement. -""" - -art = pe.Node(ra.ArtifactDetect(), name="art") -art.inputs.use_differences = [True, False] -art.inputs.use_norm = True -art.inputs.norm_threshold = 1 -art.inputs.zintensity_threshold = 3 -art.inputs.mask_type = 'file' -art.inputs.parameter_source = 'SPM' - -"""Skull strip structural images using -:class:`nipype.interfaces.fsl.BET`. -""" - -skullstrip = pe.Node(fsl.BET(), name="skullstrip") -skullstrip.inputs.mask = True - -"""Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid -body registration of the functional data to the structural data. -""" - -coregister = pe.Node(spm.Coregister(), name="coregister") -coregister.inputs.jobtype = 'estimate' - -"""Normalize and smooth functional data using DARTEL template -""" - -normalize_and_smooth_func = pe.Node( - spm.DARTELNorm2MNI(modulate=True), name='normalize_and_smooth_func') -fwhmlist = [4] -normalize_and_smooth_func.iterables = ('fwhm', fwhmlist) - -"""Normalize structural data using DARTEL template -""" - -normalize_struct = pe.Node( - spm.DARTELNorm2MNI(modulate=True), name='normalize_struct') -normalize_struct.inputs.fwhm = 2 - -preproc.connect([ - (realign, coregister, [('mean_image', 'source'), ('realigned_files', - 'apply_to_files')]), - (coregister, normalize_and_smooth_func, [('coregistered_files', - 'apply_to_files')]), - (normalize_struct, skullstrip, [('normalized_files', 'in_file')]), - (realign, art, [('realignment_parameters', 'realignment_parameters')]), - (normalize_and_smooth_func, art, [('normalized_files', - 'realigned_files')]), - (skullstrip, art, [('mask_file', 'mask_file')]), -]) - -""" -Set up analysis workflow ------------------------- -""" - -l1analysis = pe.Workflow(name='analysis') - -"""Generate SPM-specific design information using -:class:`nipype.interfaces.spm.SpecifyModel`. -""" - -modelspec = pe.Node(model.SpecifySPMModel(), name="modelspec") -modelspec.inputs.concatenate_runs = True - -"""Generate a first level SPM.mat file for analysis -:class:`nipype.interfaces.spm.Level1Design`. -""" - -level1design = pe.Node(spm.Level1Design(), name="level1design") -level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} - -"""Use :class:`nipype.interfaces.spm.EstimateModel` to determine the -parameters of the model. -""" - -level1estimate = pe.Node(spm.EstimateModel(), name="level1estimate") -level1estimate.inputs.estimation_method = {'Classical': 1} - -"""Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the -first level contrasts specified in a few steps above. -""" - -contrastestimate = pe.Node(spm.EstimateContrast(), name="contrastestimate") - -"""Use :class: `nipype.interfaces.utility.Select` to select each contrast for -reporting. -""" - -selectcontrast = pe.Node(niu.Select(), name="selectcontrast") - -"""Use :class:`nipype.interfaces.fsl.Overlay` to combine the statistical output of -the contrast estimate and a background image into one volume. -""" - -overlaystats = pe.Node(fsl.Overlay(), name="overlaystats") -overlaystats.inputs.stat_thresh = (3, 10) -overlaystats.inputs.show_negative_stats = True -overlaystats.inputs.auto_thresh_bg = True - -"""Use :class:`nipype.interfaces.fsl.Slicer` to create images of the overlaid -statistical volumes for a report of the first-level results. -""" - -slicestats = pe.Node(fsl.Slicer(), name="slicestats") -slicestats.inputs.all_axial = True -slicestats.inputs.image_width = 750 - -l1analysis.connect([(modelspec, level1design, - [('session_info', - 'session_info')]), (level1design, level1estimate, - [('spm_mat_file', 'spm_mat_file')]), - (level1estimate, contrastestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', - 'beta_images'), - ('residual_image', - 'residual_image')]), (contrastestimate, selectcontrast, - [('spmT_images', 'inlist')]), - (selectcontrast, overlaystats, - [('out', 'stat_image')]), (overlaystats, slicestats, - [('out_file', 'in_file')])]) - -""" -Preproc + Analysis pipeline ---------------------------- -""" - -l1pipeline = pe.Workflow(name='firstlevel') -l1pipeline.connect([ - (preproc, l1analysis, - [('realign.realignment_parameters', 'modelspec.realignment_parameters'), - ('normalize_and_smooth_func.normalized_files', - 'modelspec.functional_runs'), ('art.outlier_files', - 'modelspec.outlier_files'), - ('skullstrip.mask_file', - 'level1design.mask_image'), ('normalize_struct.normalized_files', - 'overlaystats.background_image')]), -]) - -""" -Data specific components ------------------------- -The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``s1`` and ``s2``. Each subject directory -contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And -one anatomical volume named struct.nii. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -In the example below, run 'f3' is of type 'func' and gets mapped to a -nifti filename through a template '%s.nii'. So 'f3' would become -'f3.nii'. - -""" - -# Specify the location of the data. -# data_dir = os.path.abspath('data') -# Specify the subject directories -subject_list = ['s1', 's3'] -# Map field names to individual subject runs. -info = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]], - struct=[['subject_id', 'struct']]) - -infosource = pe.Node( - niu.IdentityInterface(fields=['subject_id']), name="infosource") - -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) - -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -inputnode = pe.Node( - niu.IdentityInterface(fields=['in_data']), name='inputnode') -datasource = pe.Node( - nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.template = 'nipype-tutorial/data/%s/%s.nii' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True - -"""We need to create a separate workflow to make the DARTEL template -""" - -datasource_dartel = pe.MapNode( - nio.DataGrabber(infields=['subject_id'], outfields=['struct']), - name='datasource_dartel', - iterfield=['subject_id']) -datasource_dartel.inputs.template = 'nipype-tutorial/data/%s/%s.nii' -datasource_dartel.inputs.template_args = dict( - struct=[['subject_id', 'struct']]) -datasource_dartel.inputs.sort_filelist = True -datasource_dartel.inputs.subject_id = subject_list - -"""Here we make sure that struct files have names corresponding to the subject ids. -This way we will be able to pick the right field flows later. -""" - -rename_dartel = pe.MapNode( - niu.Rename(format_string="subject_id_%(subject_id)s_struct"), - iterfield=['in_file', 'subject_id'], - name='rename_dartel') -rename_dartel.inputs.subject_id = subject_list -rename_dartel.inputs.keep_ext = True - -dartel_workflow = spm_wf.create_DARTEL_template(name='dartel_workflow') -dartel_workflow.inputs.inputspec.template_prefix = "template" - -"""This function will allow to pick the right field flow for each subject -""" - -def pickFieldFlow(dartel_flow_fields, subject_id): - from nipype.utils.filemanip import split_filename - for f in dartel_flow_fields: - _, name, _ = split_filename(f) - if name.find("subject_id_%s" % subject_id): - return f - - raise Exception - -pick_flow = pe.Node( - niu.Function( - input_names=['dartel_flow_fields', 'subject_id'], - output_names=['dartel_flow_field'], - function=pickFieldFlow), - name="pick_flow") - -""" -Experimental paradigm specific components ------------------------------------------ -Here we create a function that returns subject-specific information -about the experimental paradigm. This is used by the -:class:`nipype.interfaces.spm.SpecifyModel` to create the information -necessary to generate an SPM design matrix. In this tutorial, the same -paradigm was used for every participant. -""" - -def subjectinfo(subject_id): - from nipype.interfaces.base import Bunch - from copy import deepcopy - print("Subject ID: %s\n" % str(subject_id)) - output = [] - names = ['Task-Odd', 'Task-Even'] - for r in range(4): - onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))] - output.insert(r, - Bunch( - conditions=names, - onsets=deepcopy(onsets), - durations=[[15] for s in names], - amplitudes=None, - tmod=None, - pmod=None, - regressor_names=None, - regressors=None)) - return output - -"""Setup the contrast structure that needs to be evaluated. This is a -list of lists. The inner list specifies the contrasts and has the -following format - [Name,Stat,[list of condition names],[weights on -those conditions]. The condition names must match the `names` listed -in the `subjectinfo` function described above. -""" - -cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]) -cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]) -contrasts = [cont1, cont2] - -# set up node specific inputs -modelspecref = l1pipeline.inputs.analysis.modelspec -modelspecref.input_units = 'secs' -modelspecref.output_units = 'secs' -modelspecref.time_repetition = 3. -modelspecref.high_pass_filter_cutoff = 120 - -l1designref = l1pipeline.inputs.analysis.level1design -l1designref.timing_units = modelspecref.output_units -l1designref.interscan_interval = modelspecref.time_repetition - -l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts - -# Iterate over each contrast and create report images. -selectcontrast.iterables = ('index', [[i] for i in range(len(contrasts))]) - -""" -Setup the pipeline ------------------- -The nodes created above do not describe the flow of data. They merely -describe the parameters used for each function. In this section we -setup the connections between the nodes such that appropriate outputs -from nodes are piped into appropriate inputs of other nodes. - -Use the :class:`nipype.pipeline.engine.Pipeline` to create a -graph-based execution pipeline for first level analysis. The config -options tells the pipeline engine to use `workdir` as the disk -location to use when running the processes and keeping their -outputs. The `use_parameterized_dirs` tells the engine to create -sub-directories under `workdir` corresponding to the iterables in the -pipeline. Thus for this pipeline there will be subject specific -sub-directories. - -The ``nipype.pipeline.engine.Pipeline.connect`` function creates the -links between the processes, i.e., how data should flow in and out of -the processing nodes. -""" - -level1 = pe.Workflow(name="level1") -level1.base_dir = os.path.abspath('spm_dartel_tutorial/workingdir') - -level1.connect([ - (inputnode, datasource, [('in_data', 'base_directory')]), - (inputnode, datasource_dartel, [('in_data', 'base_directory')]), - (datasource_dartel, rename_dartel, [('struct', 'in_file')]), - (rename_dartel, dartel_workflow, [('out_file', - 'inputspec.structural_files')]), - (infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, l1pipeline, - [('func', 'preproc.realign.in_files'), ('struct', - 'preproc.coregister.target'), - ('struct', 'preproc.normalize_struct.apply_to_files')]), - (dartel_workflow, l1pipeline, - [('outputspec.template_file', 'preproc.normalize_struct.template_file'), - ('outputspec.template_file', - 'preproc.normalize_and_smooth_func.template_file')]), - (infosource, pick_flow, [('subject_id', 'subject_id')]), - (dartel_workflow, pick_flow, [('outputspec.flow_fields', - 'dartel_flow_fields')]), - (pick_flow, l1pipeline, - [('dartel_flow_field', 'preproc.normalize_struct.flowfield_files'), - ('dartel_flow_field', - 'preproc.normalize_and_smooth_func.flowfield_files')]), - (infosource, l1pipeline, [(('subject_id', subjectinfo), - 'analysis.modelspec.subject_info')]), -]) - -""" -Setup storage results ---------------------- -Use :class:`nipype.interfaces.io.DataSink` to store selected outputs -from the pipeline in a specific location. This allows the user to -selectively choose important output bits from the analysis and keep -them. - -The first step is to create a datasink node and then to connect -outputs from the modules above to storage locations. These take the -following form directory_name[.[@]subdir] where parts between [] are -optional. For example 'realign.@mean' below creates a directory called -realign in 'l1output/subject_id/' and stores the mean image output -from the Realign process in the realign directory. If the @ is left -out, then a sub-directory with the name 'mean' would be created and -the mean image would be copied to that directory. -""" - -datasink = pe.Node(nio.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.abspath( - 'spm_dartel_tutorial/l1output') -report = pe.Node(nio.DataSink(), name='report') -report.inputs.base_directory = os.path.abspath('spm_dartel_tutorial/report') -report.inputs.parameterization = False - - -def getstripdir(subject_id): - import os - return os.path.join( - os.path.abspath('spm_dartel_tutorial/workingdir'), - '_subject_id_%s' % subject_id) - - -# store relevant outputs from various stages of the 1st level analysis -level1.connect([ - (infosource, datasink, [('subject_id', 'container'), - (('subject_id', getstripdir), 'strip_dir')]), - (l1pipeline, datasink, - [('analysis.contrastestimate.con_images', 'contrasts.@con'), - ('analysis.contrastestimate.spmT_images', 'contrasts.@T')]), - (infosource, report, [('subject_id', 'container'), - (('subject_id', getstripdir), 'strip_dir')]), - (l1pipeline, report, [('analysis.slicestats.out_file', '@report')]), -]) - -""" -Execute the pipeline --------------------- -The code discussed above sets up all the necessary data structures -with appropriate parameters and the connectivity between the -processes, but does not generate any output. To actually run the -analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` -function needs to be called. -""" - -if __name__ == '__main__': - level1.run(plugin_args={'n_procs': 4}) - level1.write_graph() - -""" -Setup level 2 pipeline ----------------------- -Use :class:`nipype.interfaces.io.DataGrabber` to extract the contrast -images across a group of first level subjects. Unlike the previous -pipeline that iterated over subjects, this pipeline will iterate over -contrasts. -""" - -# collect all the con images for each contrast. -contrast_ids = list(range(1, len(contrasts) + 1)) -l2source = pe.Node(nio.DataGrabber(infields=['fwhm', 'con']), name="l2source") -# we use .*i* to capture both .img (SPM8) and .nii (SPM12) -l2source.inputs.template = os.path.abspath( - 'spm_dartel_tutorial/l1output/*/con*/*/_fwhm_%d/con_%04d.*i*') -# iterate over all contrast images -l2source.iterables = [('fwhm', fwhmlist), ('con', contrast_ids)] -l2source.inputs.sort_filelist = True - -"""Use :class:`nipype.interfaces.spm.OneSampleTTestDesign` to perform a -simple statistical analysis of the contrasts from the group of -subjects (n=2 in this example). -""" - -# setup a 1-sample t-test node -onesamplettestdes = pe.Node(spm.OneSampleTTestDesign(), name="onesampttestdes") -l2estimate = pe.Node(spm.EstimateModel(), name="level2estimate") -l2estimate.inputs.estimation_method = {'Classical': 1} -l2conestimate = pe.Node(spm.EstimateContrast(), name="level2conestimate") -cont1 = ('Group', 'T', ['mean'], [1]) -l2conestimate.inputs.contrasts = [cont1] -l2conestimate.inputs.group_contrast = True - -"""As before, we setup a pipeline to connect these two nodes (l2source --> onesamplettest). -""" - -l2pipeline = pe.Workflow(name="level2") -l2pipeline.base_dir = os.path.abspath('spm_dartel_tutorial/l2output') -l2pipeline.connect([ - (l2source, onesamplettestdes, [('outfiles', 'in_files')]), - (onesamplettestdes, l2estimate, [('spm_mat_file', 'spm_mat_file')]), - (l2estimate, l2conestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')]), -]) - -""" -Execute the second level pipeline ---------------------------------- -""" - -if __name__ == '__main__': - l2pipeline.run() diff --git a/examples/fmri_spm_face.py b/examples/fmri_spm_face.py deleted file mode 100755 index 9bced9f0c9..0000000000 --- a/examples/fmri_spm_face.py +++ /dev/null @@ -1,504 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -===================================== -fMRI: Famous vs non-famous faces, SPM -===================================== - -Introduction -============ - -The fmri_spm_face.py recreates the classical workflow described in the -`SPM8 manual `_ using face -dataset that can be downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/face_rep/:: - - python fmri_spm.py - -Import necessary modules from nipype.""" - -from __future__ import division -from builtins import range - -import os # system functions -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.spm as spm # spm -import nipype.interfaces.matlab as mlab # how to run matlab -import nipype.interfaces.utility as util # utility -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.modelgen as model # model specification - -""" -Preliminaries -------------- -Set any package specific configuration. The output file format -for FSL routines is being set to uncompressed NIFTI and a specific -version of matlab is being used. The uncompressed format is required -because SPM does not handle compressed NIFTI. -""" - -# Set the way matlab should be called -mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") -# If SPM is not in your MATLAB path you should add it here -# mlab.MatlabCommand.set_default_paths('/path/to/your/spm8') - -""" -Setting up workflows --------------------- -In this tutorial we will be setting up a hierarchical workflow for spm -analysis. It one is slightly different then the one used in spm_tutorial2. - -Setup preprocessing workflow ----------------------------- -This is a generic preprocessing workflow that can be used by different analyses -""" - -preproc = pe.Workflow(name='preproc') - -"""Use :class:`nipype.interfaces.spm.Realign` for motion correction -and register all images to the mean image. -""" - -realign = pe.Node(interface=spm.Realign(), name="realign") - -slice_timing = pe.Node(interface=spm.SliceTiming(), name="slice_timing") - -"""Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid -body registration of the functional data to the structural data. -""" - -coregister = pe.Node(interface=spm.Coregister(), name="coregister") -coregister.inputs.jobtype = 'estimate' - -segment = pe.Node(interface=spm.Segment(), name="segment") -segment.inputs.save_bias_corrected = True - -"""Uncomment the following line for faster execution -""" - -# segment.inputs.gaussians_per_class = [1, 1, 1, 4] - -"""Warp functional and structural data to SPM's T1 template using -:class:`nipype.interfaces.spm.Normalize`. The tutorial data set -includes the template image, T1.nii. -""" - -normalize_func = pe.Node(interface=spm.Normalize(), name="normalize_func") -normalize_func.inputs.jobtype = "write" - -normalize_struc = pe.Node(interface=spm.Normalize(), name="normalize_struc") -normalize_struc.inputs.jobtype = "write" - -"""Smooth the functional data using -:class:`nipype.interfaces.spm.Smooth`. -""" - -smooth = pe.Node(interface=spm.Smooth(), name="smooth") - -"""`write_voxel_sizes` is the input of the normalize interface that is recommended to be set to -the voxel sizes of the target volume. There is no need to set it manually since we van infer it from data -using the following function: -""" - - -def get_vox_dims(volume): - import nibabel as nb - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume) - hdr = nii.header - voxdims = hdr.get_zooms() - return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] - - -"""Here we are connecting all the nodes together. Notice that we add the merge node only if you choose -to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal -voxel sizes. -""" - -preproc.connect([ - (realign, coregister, [('mean_image', 'target')]), - (coregister, segment, [('coregistered_source', 'data')]), - (segment, normalize_func, [('transformation_mat', 'parameter_file')]), - (segment, normalize_struc, - [('transformation_mat', 'parameter_file'), ('bias_corrected_image', - 'apply_to_files'), - (('bias_corrected_image', get_vox_dims), 'write_voxel_sizes')]), - (realign, slice_timing, [('realigned_files', 'in_files')]), - (slice_timing, normalize_func, [('timecorrected_files', 'apply_to_files'), - (('timecorrected_files', get_vox_dims), - 'write_voxel_sizes')]), - (normalize_func, smooth, [('normalized_files', 'in_files')]), -]) - -""" -Set up analysis workflow ------------------------- -""" - -l1analysis = pe.Workflow(name='analysis') - -"""Generate SPM-specific design information using -:class:`nipype.interfaces.spm.SpecifyModel`. -""" - -modelspec = pe.Node(interface=model.SpecifySPMModel(), name="modelspec") - -"""Generate a first level SPM.mat file for analysis -:class:`nipype.interfaces.spm.Level1Design`. -""" - -level1design = pe.Node(interface=spm.Level1Design(), name="level1design") - -"""Use :class:`nipype.interfaces.spm.EstimateModel` to determine the -parameters of the model. -""" - -level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") -level1estimate.inputs.estimation_method = {'Classical': 1} - -threshold = pe.Node(interface=spm.Threshold(), name="threshold") - -"""Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the -first level contrasts specified in a few steps above. -""" - -contrastestimate = pe.Node( - interface=spm.EstimateContrast(), name="contrastestimate") - - -def pickfirst(l): - return l[0] - - -l1analysis.connect([ - (modelspec, level1design, [('session_info', 'session_info')]), - (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]), - (level1estimate, contrastestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')]), - (contrastestimate, threshold, [('spm_mat_file', 'spm_mat_file'), - (('spmT_images', pickfirst), - 'stat_image')]), -]) - -""" -Preproc + Analysis pipeline ---------------------------- -""" - -l1pipeline = pe.Workflow(name='firstlevel') -l1pipeline.connect([(preproc, l1analysis, - [('realign.realignment_parameters', - 'modelspec.realignment_parameters')])]) - -"""Pluging in `functional_runs` is a bit more complicated, because model spec expects a list of `runs`. -Every run can be a 4D file or a list of 3D files. Therefore for 3D analysis we need a list of lists and -to make one we need a helper function. -""" - - -def makelist(item): - return [item] - - -l1pipeline.connect([(preproc, l1analysis, [(('smooth.smoothed_files', - makelist), - 'modelspec.functional_runs')])]) - -""" -Data specific components ------------------------- -In this tutorial there is only one subject `M03953`. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. -""" - -# Specify the location of the data downloaded from http://www.fil.ion.ucl.ac.uk/spm/data/face_rep/face_rep_SPM5.html -data_dir = os.path.abspath('spm_face_data') -# Specify the subject directories -subject_list = ['M03953'] -# Map field names to individual subject runs. -info = dict( - func=[['RawEPI', 'subject_id', 5, ["_%04d" % i for i in range(6, 357)]]], - struct=[['Structural', 'subject_id', 7, '']]) - -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") - -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) - -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/s%s_%04d%s.img' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True - -""" -Experimental paradigm specific components ------------------------------------------ -Here we create a structure that provides information -about the experimental paradigm. This is used by the -:class:`nipype.interfaces.spm.SpecifyModel` to create the information -necessary to generate an SPM design matrix. -""" - -from nipype.interfaces.base import Bunch - -"""We're importing the onset times from a mat file (found on -http://www.fil.ion.ucl.ac.uk/spm/data/face_rep/) -""" - -from scipy.io.matlab import loadmat -mat = loadmat(os.path.join(data_dir, "sots.mat"), struct_as_record=False) -sot = mat['sot'][0] -itemlag = mat['itemlag'][0] - -subjectinfo = [ - Bunch( - conditions=['N1', 'N2', 'F1', 'F2'], - onsets=[sot[0], sot[1], sot[2], sot[3]], - durations=[[0], [0], [0], [0]], - amplitudes=None, - tmod=None, - pmod=None, - regressor_names=None, - regressors=None) -] - -"""Setup the contrast structure that needs to be evaluated. This is a -list of lists. The inner list specifies the contrasts and has the -following format - [Name,Stat,[list of condition names],[weights on -those conditions]. The condition names must match the `names` listed -in the `subjectinfo` function described above. -""" - -cond1 = ('positive effect of condition', 'T', - ['N1*bf(1)', 'N2*bf(1)', 'F1*bf(1)', 'F2*bf(1)'], [1, 1, 1, 1]) -cond2 = ('positive effect of condition_dtemo', 'T', - ['N1*bf(2)', 'N2*bf(2)', 'F1*bf(2)', 'F2*bf(2)'], [1, 1, 1, 1]) -cond3 = ('positive effect of condition_ddisp', 'T', - ['N1*bf(3)', 'N2*bf(3)', 'F1*bf(3)', 'F2*bf(3)'], [1, 1, 1, 1]) -# non-famous > famous -fam1 = ('positive effect of Fame', 'T', - ['N1*bf(1)', 'N2*bf(1)', 'F1*bf(1)', 'F2*bf(1)'], [1, 1, -1, -1]) -fam2 = ('positive effect of Fame_dtemp', 'T', - ['N1*bf(2)', 'N2*bf(2)', 'F1*bf(2)', 'F2*bf(2)'], [1, 1, -1, -1]) -fam3 = ('positive effect of Fame_ddisp', 'T', - ['N1*bf(3)', 'N2*bf(3)', 'F1*bf(3)', 'F2*bf(3)'], [1, 1, -1, -1]) -# rep1 > rep2 -rep1 = ('positive effect of Rep', 'T', - ['N1*bf(1)', 'N2*bf(1)', 'F1*bf(1)', 'F2*bf(1)'], [1, -1, 1, -1]) -rep2 = ('positive effect of Rep_dtemp', 'T', - ['N1*bf(2)', 'N2*bf(2)', 'F1*bf(2)', 'F2*bf(2)'], [1, -1, 1, -1]) -rep3 = ('positive effect of Rep_ddisp', 'T', - ['N1*bf(3)', 'N2*bf(3)', 'F1*bf(3)', 'F2*bf(3)'], [1, -1, 1, -1]) -int1 = ('positive interaction of Fame x Rep', 'T', - ['N1*bf(1)', 'N2*bf(1)', 'F1*bf(1)', 'F2*bf(1)'], [-1, -1, -1, 1]) -int2 = ('positive interaction of Fame x Rep_dtemp', 'T', - ['N1*bf(2)', 'N2*bf(2)', 'F1*bf(2)', 'F2*bf(2)'], [1, -1, -1, 1]) -int3 = ('positive interaction of Fame x Rep_ddisp', 'T', - ['N1*bf(3)', 'N2*bf(3)', 'F1*bf(3)', 'F2*bf(3)'], [1, -1, -1, 1]) - -contf1 = ['average effect condition', 'F', [cond1, cond2, cond3]] -contf2 = ['main effect Fam', 'F', [fam1, fam2, fam3]] -contf3 = ['main effect Rep', 'F', [rep1, rep2, rep3]] -contf4 = ['interaction: Fam x Rep', 'F', [int1, int2, int3]] -contrasts = [ - cond1, cond2, cond3, fam1, fam2, fam3, rep1, rep2, rep3, int1, int2, int3, - contf1, contf2, contf3, contf4 -] - -"""Setting up nodes inputs -""" - -num_slices = 24 -TR = 2. - -slice_timingref = l1pipeline.inputs.preproc.slice_timing -slice_timingref.num_slices = num_slices -slice_timingref.time_repetition = TR -slice_timingref.time_acquisition = TR - TR / float(num_slices) -slice_timingref.slice_order = list(range(num_slices, 0, -1)) -slice_timingref.ref_slice = int(num_slices / 2) - -l1pipeline.inputs.preproc.smooth.fwhm = [8, 8, 8] - -# set up node specific inputs -modelspecref = l1pipeline.inputs.analysis.modelspec -modelspecref.input_units = 'scans' -modelspecref.output_units = 'scans' -modelspecref.time_repetition = TR -modelspecref.high_pass_filter_cutoff = 120 - -l1designref = l1pipeline.inputs.analysis.level1design -l1designref.timing_units = modelspecref.output_units -l1designref.interscan_interval = modelspecref.time_repetition -l1designref.microtime_resolution = slice_timingref.num_slices -l1designref.microtime_onset = slice_timingref.ref_slice -l1designref.bases = {'hrf': {'derivs': [1, 1]}} - -""" -The following lines automatically inform SPM to create a default set of -contrats for a factorial design. -""" - -# l1designref.factor_info = [dict(name = 'Fame', levels = 2), -# dict(name = 'Rep', levels = 2)] - -l1pipeline.inputs.analysis.modelspec.subject_info = subjectinfo -l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts -l1pipeline.inputs.analysis.threshold.contrast_index = 1 - -""" -Use derivative estimates in the non-parametric model -""" - -l1pipeline.inputs.analysis.contrastestimate.use_derivs = True - -""" -Setting up parametricvariation of the model -""" - -subjectinfo_param = [ - Bunch( - conditions=['N1', 'N2', 'F1', 'F2'], - onsets=[sot[0], sot[1], sot[2], sot[3]], - durations=[[0], [0], [0], [0]], - amplitudes=None, - tmod=None, - pmod=[ - None, - Bunch(name=['Lag'], param=itemlag[1].tolist(), poly=[2]), None, - Bunch(name=['Lag'], param=itemlag[3].tolist(), poly=[2]) - ], - regressor_names=None, - regressors=None) -] - -cont1 = ('Famous_lag1', 'T', ['F2xLag^1'], [1]) -cont2 = ('Famous_lag2', 'T', ['F2xLag^2'], [1]) -fcont1 = ('Famous Lag', 'F', [cont1, cont2]) -paramcontrasts = [cont1, cont2, fcont1] - -paramanalysis = l1analysis.clone(name='paramanalysis') - -paramanalysis.inputs.level1design.bases = {'hrf': {'derivs': [0, 0]}} -paramanalysis.inputs.modelspec.subject_info = subjectinfo_param -paramanalysis.inputs.contrastestimate.contrasts = paramcontrasts -paramanalysis.inputs.contrastestimate.use_derivs = False - -l1pipeline.connect( - [(preproc, paramanalysis, - [('realign.realignment_parameters', 'modelspec.realignment_parameters'), - (('smooth.smoothed_files', makelist), 'modelspec.functional_runs')])]) - -""" -Setup the pipeline ------------------- -The nodes created above do not describe the flow of data. They merely -describe the parameters used for each function. In this section we -setup the connections between the nodes such that appropriate outputs -from nodes are piped into appropriate inputs of other nodes. - -Use the :class:`nipype.pipeline.engine.Pipeline` to create a -graph-based execution pipeline for first level analysis. The config -options tells the pipeline engine to use `workdir` as the disk -location to use when running the processes and keeping their -outputs. The `use_parameterized_dirs` tells the engine to create -sub-directories under `workdir` corresponding to the iterables in the -pipeline. Thus for this pipeline there will be subject specific -sub-directories. - -The ``nipype.pipeline.engine.Pipeline.connect`` function creates the -links between the processes, i.e., how data should flow in and out of -the processing nodes. -""" - -level1 = pe.Workflow(name="level1") -level1.base_dir = os.path.abspath('spm_face_tutorial/workingdir') - -level1.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, l1pipeline, - [('struct', 'preproc.coregister.source'), - ('func', 'preproc.realign.in_files')])]) - -""" -Setup storage results ---------------------- -Use :class:`nipype.interfaces.io.DataSink` to store selected outputs -from the pipeline in a specific location. This allows the user to -selectively choose important output bits from the analysis and keep -them. - -The first step is to create a datasink node and then to connect -outputs from the modules above to storage locations. These take the -following form directory_name[.[@]subdir] where parts between [] are -optional. For example 'realign.@mean' below creates a directory called -realign in 'l1output/subject_id/' and stores the mean image output -from the Realign process in the realign directory. If the @ is left -out, then a sub-directory with the name 'mean' would be created and -the mean image would be copied to that directory. -""" - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.abspath( - 'spm_auditory_tutorial/l1output') - - -def getstripdir(subject_id): - import os - return os.path.join( - os.path.abspath('spm_auditory_tutorial/workingdir'), - '_subject_id_%s' % subject_id) - - -# store relevant outputs from various stages of the 1st level analysis -level1.connect([ - (infosource, datasink, [('subject_id', 'container'), - (('subject_id', getstripdir), 'strip_dir')]), - (l1pipeline, datasink, - [('analysis.contrastestimate.con_images', 'contrasts.@con'), - ('analysis.contrastestimate.spmT_images', 'contrasts.@T'), - ('paramanalysis.contrastestimate.con_images', - 'paramcontrasts.@con'), ('paramanalysis.contrastestimate.spmT_images', - 'paramcontrasts.@T')]), -]) - -""" -Execute the pipeline --------------------- -The code discussed above sets up all the necessary data structures -with appropriate parameters and the connectivity between the -processes, but does not generate any output. To actually run the -analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` -function needs to be called. -""" - -if __name__ == '__main__': - level1.run() - level1.write_graph() diff --git a/examples/fmri_spm_nested.py b/examples/fmri_spm_nested.py deleted file mode 100755 index e63b3a2cde..0000000000 --- a/examples/fmri_spm_nested.py +++ /dev/null @@ -1,489 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -========================== -fMRI: SPM nested workflows -========================== - -The fmri_spm.py integrates several interfaces to perform a first -and second level analysis on a two-subject data set. The tutorial can -be found in the examples folder. Run the tutorial from inside the -nipype tutorial directory:: - - python fmri_spm_nested.py - -Import necessary modules from nipype.""" - -from __future__ import print_function -from builtins import str -from builtins import range -import os.path as op # system functions - -from nipype.interfaces import io as nio # Data i/o -from nipype.interfaces import spm as spm # spm -# from nipype.interfaces import matlab as mlab # how to run matlab -from nipype.interfaces import fsl as fsl # fsl -from nipype.interfaces import utility as niu # utility -from nipype.pipeline import engine as pe # pypeline engine -from nipype.algorithms import rapidart as ra # artifact detection -from nipype.algorithms import modelgen as model # model specification - -""" -Preliminaries -------------- -Set any package specific configuration. The output file format -for FSL routines is being set to uncompressed NIFTI and a specific -version of matlab is being used. The uncompressed format is required -because SPM does not handle compressed NIFTI. -""" - -# Tell fsl to generate all output in uncompressed nifti format -fsl.FSLCommand.set_default_output_type('NIFTI') - -# Set the way matlab should be called -# mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") -# mlab.MatlabCommand.set_default_paths('/software/spm8') - -""" -Setting up workflows --------------------- -In this tutorial we will be setting up a hierarchical workflow for spm -analysis. This will demonstrate how pre-defined workflows can be setup -and shared across users, projects and labs. - -Example of how to inline functions in connect() ------------------------------------------------ -""" - - -def _template_path(in_data): - import os.path as op - return op.abspath(op.join(in_data, 'nipype-tutorial/data/T1.nii')) - - -""" -Set-up preprocessing workflow ------------------------------ -This is a generic preprocessing workflow that can be used by different analyses -""" - -preproc = pe.Workflow(name='preproc') - -""" -A node called :code:`inputnode` is set to designate the path in which input data -are located: -""" - -inputnode = pe.Node( - niu.IdentityInterface(fields=['in_data']), name='inputnode') - -"""Use :class:`nipype.interfaces.spm.Realign` for motion correction -and register all images to the mean image. -""" - -realign = pe.Node(spm.Realign(), name="realign") -realign.inputs.register_to_mean = True - -"""Use :class:`nipype.algorithms.rapidart` to determine which of the -images in the functional series are outliers based on deviations in -intensity or movement. -""" - -art = pe.Node(ra.ArtifactDetect(), name="art") -art.inputs.use_differences = [True, False] -art.inputs.use_norm = True -art.inputs.norm_threshold = 1 -art.inputs.zintensity_threshold = 3 -art.inputs.mask_type = 'file' -art.inputs.parameter_source = 'SPM' - -"""Skull strip structural images using -:class:`nipype.interfaces.fsl.BET`. -""" - -skullstrip = pe.Node(fsl.BET(), name="skullstrip") -skullstrip.inputs.mask = True - -"""Use :class:`nipype.interfaces.spm.Coregister` to perform a rigid -body registration of the functional data to the structural data. -""" - -coregister = pe.Node(spm.Coregister(), name="coregister") -coregister.inputs.jobtype = 'estimate' - -"""Warp functional and structural data to SPM's T1 template using -:class:`nipype.interfaces.spm.Normalize`. The tutorial data set -includes the template image, T1.nii. -""" - -normalize = pe.Node(spm.Normalize(), name="normalize") - -"""Smooth the functional data using -:class:`nipype.interfaces.spm.Smooth`. -""" - -smooth = pe.Node(spm.Smooth(), name="smooth") -fwhmlist = [4] -smooth.iterables = ('fwhm', fwhmlist) - -preproc.connect([ - (inputnode, normalize, [(('in_data', _template_path), 'template')]), - (realign, coregister, [('mean_image', 'source'), ('realigned_files', - 'apply_to_files')]), - (coregister, normalize, [('coregistered_files', 'apply_to_files')]), - (normalize, smooth, [('normalized_files', 'in_files')]), - (normalize, skullstrip, [('normalized_source', 'in_file')]), - (realign, art, [('realignment_parameters', 'realignment_parameters')]), - (normalize, art, [('normalized_files', 'realigned_files')]), - (skullstrip, art, [('mask_file', 'mask_file')]), -]) - -""" -Set up analysis workflow ------------------------- -""" - -l1analysis = pe.Workflow(name='analysis') - -"""Generate SPM-specific design information using -:class:`nipype.interfaces.spm.SpecifyModel`. -""" - -modelspec = pe.Node(model.SpecifySPMModel(), name="modelspec") -modelspec.inputs.concatenate_runs = True - -"""Generate a first level SPM.mat file for analysis -:class:`nipype.interfaces.spm.Level1Design`. -""" - -level1design = pe.Node(spm.Level1Design(), name="level1design") -level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} - -"""Use :class:`nipype.interfaces.spm.EstimateModel` to determine the -parameters of the model. -""" - -level1estimate = pe.Node(spm.EstimateModel(), name="level1estimate") -level1estimate.inputs.estimation_method = {'Classical': 1} - -"""Use :class:`nipype.interfaces.spm.EstimateContrast` to estimate the -first level contrasts specified in a few steps above. -""" - -contrastestimate = pe.Node(spm.EstimateContrast(), name="contrastestimate") - -"""Use :class: `nipype.interfaces.utility.Select` to select each contrast for -reporting. -""" - -selectcontrast = pe.Node(niu.Select(), name="selectcontrast") - -"""Use :class:`nipype.interfaces.fsl.Overlay` to combine the statistical output of -the contrast estimate and a background image into one volume. -""" - -overlaystats = pe.Node(fsl.Overlay(), name="overlaystats") -overlaystats.inputs.stat_thresh = (3, 10) -overlaystats.inputs.show_negative_stats = True -overlaystats.inputs.auto_thresh_bg = True - -"""Use :class:`nipype.interfaces.fsl.Slicer` to create images of the overlaid -statistical volumes for a report of the first-level results. -""" - -slicestats = pe.Node(fsl.Slicer(), name="slicestats") -slicestats.inputs.all_axial = True -slicestats.inputs.image_width = 750 - -l1analysis.connect([(modelspec, level1design, - [('session_info', - 'session_info')]), (level1design, level1estimate, - [('spm_mat_file', 'spm_mat_file')]), - (level1estimate, contrastestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', - 'beta_images'), - ('residual_image', - 'residual_image')]), (contrastestimate, selectcontrast, - [('spmT_images', 'inlist')]), - (selectcontrast, overlaystats, - [('out', 'stat_image')]), (overlaystats, slicestats, - [('out_file', 'in_file')])]) - -""" -Preproc + Analysis pipeline ---------------------------- -""" - -l1pipeline = pe.Workflow(name='firstlevel') -l1pipeline.connect([ - (preproc, l1analysis, - [('realign.realignment_parameters', 'modelspec.realignment_parameters'), - ('smooth.smoothed_files', - 'modelspec.functional_runs'), ('art.outlier_files', - 'modelspec.outlier_files'), - ('skullstrip.mask_file', - 'level1design.mask_image'), ('normalize.normalized_source', - 'overlaystats.background_image')]), -]) - -""" -Data specific components ------------------------- -The nipype tutorial contains data for two subjects. Subject data -is in two subdirectories, ``s1`` and ``s2``. Each subject directory -contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And -one anatomical volume named struct.nii. - -Below we set some variables to inform the ``datasource`` about the -layout of our data. We specify the location of the data, the subject -sub-directories and a dictionary that maps each run to a mnemonic (or -field) for the run type (``struct`` or ``func``). These fields become -the output fields of the ``datasource`` node in the pipeline. - -In the example below, run 'f3' is of type 'func' and gets mapped to a -nifti filename through a template '%s.nii'. So 'f3' would become -'f3.nii'. -""" - -# Specify the subject directories -subject_list = ['s1', 's3'] -# Map field names to individual subject runs. -info = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]], - struct=[['subject_id', 'struct']]) - -infosource = pe.Node( - niu.IdentityInterface(fields=['subject_id']), name="infosource") - -"""Here we set up iteration over all the subjects. The following line -is a particular example of the flexibility of the system. The -``datasource`` attribute ``iterables`` tells the pipeline engine that -it should repeat the analysis on each of the items in the -``subject_list``. In the current example, the entire first level -preprocessing and estimation will be repeated for each subject -contained in subject_list. -""" - -infosource.iterables = ('subject_id', subject_list) - -""" -Now we create a :class:`nipype.interfaces.io.DataGrabber` object and -fill in the information from above about the layout of our data. The -:class:`nipype.pipeline.NodeWrapper` module wraps the interface object -and provides additional housekeeping and pipeline specific -functionality. -""" - -datasource = pe.Node( - nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.template = 'nipype-tutorial/data/%s/%s.nii' -datasource.inputs.template_args = info -datasource.inputs.sort_filelist = True - -""" -Experimental paradigm specific components ------------------------------------------ -Here we create a function that returns subject-specific information -about the experimental paradigm. This is used by the -:class:`nipype.interfaces.spm.SpecifyModel` to create the information -necessary to generate an SPM design matrix. In this tutorial, the same -paradigm was used for every participant. -""" - - -def subjectinfo(subject_id): - from nipype.interfaces.base import Bunch - from copy import deepcopy - print("Subject ID: %s\n" % str(subject_id)) - output = [] - names = ['Task-Odd', 'Task-Even'] - for r in range(4): - onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))] - output.insert(r, - Bunch( - conditions=names, - onsets=deepcopy(onsets), - durations=[[15] for s in names], - amplitudes=None, - tmod=None, - pmod=None, - regressor_names=None, - regressors=None)) - return output - - -"""Setup the contrast structure that needs to be evaluated. This is a -list of lists. The inner list specifies the contrasts and has the -following format - [Name,Stat,[list of condition names],[weights on -those conditions]. The condition names must match the `names` listed -in the `subjectinfo` function described above. -""" - -cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]) -cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]) -contrasts = [cont1, cont2] - -# set up node specific inputs -modelspecref = l1pipeline.inputs.analysis.modelspec -modelspecref.input_units = 'secs' -modelspecref.output_units = 'secs' -modelspecref.time_repetition = 3. -modelspecref.high_pass_filter_cutoff = 120 - -l1designref = l1pipeline.inputs.analysis.level1design -l1designref.timing_units = modelspecref.output_units -l1designref.interscan_interval = modelspecref.time_repetition - -l1pipeline.inputs.analysis.contrastestimate.contrasts = contrasts - -# Iterate over each contrast and create report images. -selectcontrast.iterables = ('index', [[i] for i in range(len(contrasts))]) - -""" -Setup the pipeline ------------------- -The nodes created above do not describe the flow of data. They merely -describe the parameters used for each function. In this section we -setup the connections between the nodes such that appropriate outputs -from nodes are piped into appropriate inputs of other nodes. - -Use the :class:`nipype.pipeline.engine.Pipeline` to create a -graph-based execution pipeline for first level analysis. The config -options tells the pipeline engine to use `workdir` as the disk -location to use when running the processes and keeping their -outputs. The `use_parameterized_dirs` tells the engine to create -sub-directories under `workdir` corresponding to the iterables in the -pipeline. Thus for this pipeline there will be subject specific -sub-directories. - -The ``nipype.pipeline.engine.Pipeline.connect`` function creates the -links between the processes, i.e., how data should flow in and out of -the processing nodes. -""" - -level1 = pe.Workflow(name="level1") -level1.base_dir = op.abspath('spm_tutorial2/workingdir') - -level1.connect([ - (inputnode, datasource, [('in_data', 'base_directory')]), - (infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, l1pipeline, [('func', 'preproc.realign.in_files'), - ('struct', 'preproc.coregister.target'), - ('struct', 'preproc.normalize.source')]), - (infosource, l1pipeline, [(('subject_id', subjectinfo), - 'analysis.modelspec.subject_info')]), -]) - -""" -Setup storage results ---------------------- -Use :class:`nipype.interfaces.io.DataSink` to store selected outputs -from the pipeline in a specific location. This allows the user to -selectively choose important output bits from the analysis and keep -them. - -The first step is to create a datasink node and then to connect -outputs from the modules above to storage locations. These take the -following form directory_name[.[@]subdir] where parts between [] are -optional. For example 'realign.@mean' below creates a directory called -realign in 'l1output/subject_id/' and stores the mean image output -from the Realign process in the realign directory. If the @ is left -out, then a sub-directory with the name 'mean' would be created and -the mean image would be copied to that directory. -""" - -datasink = pe.Node(nio.DataSink(), name="datasink") -datasink.inputs.base_directory = op.abspath('spm_tutorial2/l1output') -report = pe.Node(nio.DataSink(), name='report') -report.inputs.base_directory = op.abspath('spm_tutorial2/report') -report.inputs.parameterization = False - - -def getstripdir(subject_id): - import os.path as op - return op.join( - op.abspath('spm_tutorial2/workingdir'), '_subject_id_%s' % subject_id) - - -# store relevant outputs from various stages of the 1st level analysis -level1.connect([ - (infosource, datasink, [('subject_id', 'container'), - (('subject_id', getstripdir), 'strip_dir')]), - (l1pipeline, datasink, - [('analysis.contrastestimate.con_images', 'contrasts.@con'), - ('analysis.contrastestimate.spmT_images', 'contrasts.@T')]), - (infosource, report, [('subject_id', 'container'), - (('subject_id', getstripdir), 'strip_dir')]), - (l1pipeline, report, [('analysis.slicestats.out_file', '@report')]), -]) - -""" -Execute the pipeline --------------------- -The code discussed above sets up all the necessary data structures -with appropriate parameters and the connectivity between the -processes, but does not generate any output. To actually run the -analysis on the data the ``nipype.pipeline.engine.Pipeline.Run`` -function needs to be called. -""" - -if __name__ == '__main__': - level1.run('MultiProc') - level1.write_graph() - -""" -Setup level 2 pipeline ----------------------- -Use :class:`nipype.interfaces.io.DataGrabber` to extract the contrast -images across a group of first level subjects. Unlike the previous -pipeline that iterated over subjects, this pipeline will iterate over -contrasts. -""" - -# collect all the con images for each contrast. -contrast_ids = list(range(1, len(contrasts) + 1)) -l2source = pe.Node(nio.DataGrabber(infields=['fwhm', 'con']), name="l2source") -# we use .*i* to capture both .img (SPM8) and .nii (SPM12) -l2source.inputs.template = op.abspath( - 'spm_tutorial2/l1output/*/con*/*/_fwhm_%d/con_%04d.*i*') -# iterate over all contrast images -l2source.iterables = [('fwhm', fwhmlist), ('con', contrast_ids)] -l2source.inputs.sort_filelist = True - -"""Use :class:`nipype.interfaces.spm.OneSampleTTestDesign` to perform a -simple statistical analysis of the contrasts from the group of -subjects (n=2 in this example). -""" - -# setup a 1-sample t-test node -onesamplettestdes = pe.Node(spm.OneSampleTTestDesign(), name="onesampttestdes") -l2estimate = pe.Node(spm.EstimateModel(), name="level2estimate") -l2estimate.inputs.estimation_method = {'Classical': 1} -l2conestimate = pe.Node(spm.EstimateContrast(), name="level2conestimate") -cont1 = ('Group', 'T', ['mean'], [1]) -l2conestimate.inputs.contrasts = [cont1] -l2conestimate.inputs.group_contrast = True - -"""As before, we setup a pipeline to connect these two nodes (l2source --> onesamplettest). -""" - -l2pipeline = pe.Workflow(name="level2") -l2pipeline.base_dir = op.abspath('spm_tutorial2/l2output') -l2pipeline.connect([ - (l2source, onesamplettestdes, [('outfiles', 'in_files')]), - (onesamplettestdes, l2estimate, [('spm_mat_file', 'spm_mat_file')]), - (l2estimate, l2conestimate, - [('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')]), -]) - -""" -Execute the second level pipeline ---------------------------------- -""" - -if __name__ == '__main__': - l2pipeline.run('MultiProc') diff --git a/examples/frontiers_paper/smoothing_comparison.py b/examples/frontiers_paper/smoothing_comparison.py deleted file mode 100644 index 696e8a94b8..0000000000 --- a/examples/frontiers_paper/smoothing_comparison.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -=========================== -Paper: Smoothing comparison -=========================== -""" - -from builtins import range - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.spm as spm # spm -import nipype.interfaces.freesurfer as fs # freesurfer -import nipype.interfaces.nipy as nipy -import nipype.interfaces.utility as util -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.modelgen as model # model specification -import niflow.nipype1.workflows.fmri.fsl as fsl_wf -from nipype.interfaces.base import Bunch -import os # system functions - -preprocessing = pe.Workflow(name="preprocessing") - -iter_fwhm = pe.Node( - interface=util.IdentityInterface(fields=["fwhm"]), name="iter_fwhm") -iter_fwhm.iterables = [('fwhm', [4, 8])] - -iter_smoothing_method = pe.Node( - interface=util.IdentityInterface(fields=["smoothing_method"]), - name="iter_smoothing_method") -iter_smoothing_method.iterables = [('smoothing_method', [ - 'isotropic_voxel', 'anisotropic_voxel', 'isotropic_surface' -])] - -realign = pe.Node(interface=spm.Realign(), name="realign") -realign.inputs.register_to_mean = True - -isotropic_voxel_smooth = pe.Node( - interface=spm.Smooth(), name="isotropic_voxel_smooth") -preprocessing.connect(realign, "realigned_files", isotropic_voxel_smooth, - "in_files") -preprocessing.connect(iter_fwhm, "fwhm", isotropic_voxel_smooth, "fwhm") - -compute_mask = pe.Node(interface=nipy.ComputeMask(), name="compute_mask") -preprocessing.connect(realign, "mean_image", compute_mask, "mean_volume") - -anisotropic_voxel_smooth = fsl_wf.create_susan_smooth( - name="anisotropic_voxel_smooth", separate_masks=False) -anisotropic_voxel_smooth.inputs.smooth.output_type = 'NIFTI' -preprocessing.connect(realign, "realigned_files", anisotropic_voxel_smooth, - "inputnode.in_files") -preprocessing.connect(iter_fwhm, "fwhm", anisotropic_voxel_smooth, - "inputnode.fwhm") -preprocessing.connect(compute_mask, "brain_mask", anisotropic_voxel_smooth, - 'inputnode.mask_file') - -recon_all = pe.Node(interface=fs.ReconAll(), name="recon_all") - -surfregister = pe.Node(interface=fs.BBRegister(), name='surfregister') -surfregister.inputs.init = 'fsl' -surfregister.inputs.contrast_type = 't2' -preprocessing.connect(realign, 'mean_image', surfregister, 'source_file') -preprocessing.connect(recon_all, 'subject_id', surfregister, 'subject_id') -preprocessing.connect(recon_all, 'subjects_dir', surfregister, 'subjects_dir') - -isotropic_surface_smooth = pe.MapNode( - interface=fs.Smooth(proj_frac_avg=(0, 1, 0.1)), - iterfield=['in_file'], - name="isotropic_surface_smooth") -preprocessing.connect(surfregister, 'out_reg_file', isotropic_surface_smooth, - 'reg_file') -preprocessing.connect(realign, "realigned_files", isotropic_surface_smooth, - "in_file") -preprocessing.connect(iter_fwhm, "fwhm", isotropic_surface_smooth, - "surface_fwhm") -preprocessing.connect(iter_fwhm, "fwhm", isotropic_surface_smooth, "vol_fwhm") -preprocessing.connect(recon_all, 'subjects_dir', isotropic_surface_smooth, - 'subjects_dir') - -merge_smoothed_files = pe.Node( - interface=util.Merge(3), name='merge_smoothed_files') -preprocessing.connect(isotropic_voxel_smooth, 'smoothed_files', - merge_smoothed_files, 'in1') -preprocessing.connect(anisotropic_voxel_smooth, 'outputnode.smoothed_files', - merge_smoothed_files, 'in2') -preprocessing.connect(isotropic_surface_smooth, 'smoothed_file', - merge_smoothed_files, 'in3') - -select_smoothed_files = pe.Node( - interface=util.Select(), name="select_smoothed_files") -preprocessing.connect(merge_smoothed_files, 'out', select_smoothed_files, - 'inlist') - - -def chooseindex(roi): - return { - 'isotropic_voxel': list(range(0, 4)), - 'anisotropic_voxel': list(range(4, 8)), - 'isotropic_surface': list(range(8, 12)) - }[roi] - - -preprocessing.connect(iter_smoothing_method, ("smoothing_method", chooseindex), - select_smoothed_files, 'index') - -rename = pe.MapNode( - util.Rename(format_string="%(orig)s"), - name="rename", - iterfield=['in_file']) -rename.inputs.parse_string = "(?P.*)" - -preprocessing.connect(select_smoothed_files, 'out', rename, 'in_file') - -specify_model = pe.Node(interface=model.SpecifyModel(), name="specify_model") -specify_model.inputs.input_units = 'secs' -specify_model.inputs.time_repetition = 3. -specify_model.inputs.high_pass_filter_cutoff = 120 -specify_model.inputs.subject_info = [ - Bunch( - conditions=['Task-Odd', 'Task-Even'], - onsets=[list(range(15, 240, 60)), - list(range(45, 240, 60))], - durations=[[15], [15]]) -] * 4 - -level1design = pe.Node(interface=spm.Level1Design(), name="level1design") -level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} -level1design.inputs.timing_units = 'secs' -level1design.inputs.interscan_interval = specify_model.inputs.time_repetition - -level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") -level1estimate.inputs.estimation_method = {'Classical': 1} - -contrastestimate = pe.Node( - interface=spm.EstimateContrast(), name="contrastestimate") -contrastestimate.inputs.contrasts = [('Task>Baseline', 'T', - ['Task-Odd', 'Task-Even'], [0.5, 0.5])] - -modelling = pe.Workflow(name="modelling") -modelling.connect(specify_model, 'session_info', level1design, 'session_info') -modelling.connect(level1design, 'spm_mat_file', level1estimate, 'spm_mat_file') -modelling.connect(level1estimate, 'spm_mat_file', contrastestimate, - 'spm_mat_file') -modelling.connect(level1estimate, 'beta_images', contrastestimate, - 'beta_images') -modelling.connect(level1estimate, 'residual_image', contrastestimate, - 'residual_image') - -main_workflow = pe.Workflow(name="main_workflow") -main_workflow.base_dir = "smoothing_comparison_workflow" -main_workflow.connect(preprocessing, "realign.realignment_parameters", - modelling, "specify_model.realignment_parameters") -main_workflow.connect(preprocessing, "select_smoothed_files.out", modelling, - "specify_model.functional_runs") -main_workflow.connect(preprocessing, "compute_mask.brain_mask", modelling, - "level1design.mask_image") - -datasource = pe.Node( - interface=nio.DataGrabber( - infields=['subject_id'], outfields=['func', 'struct']), - name='datasource') -datasource.inputs.base_directory = os.path.abspath('data') -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = info = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]], - struct=[['subject_id', 'struct']]) -datasource.inputs.subject_id = 's1' -datasource.inputs.sort_filelist = True - -main_workflow.connect(datasource, 'func', preprocessing, 'realign.in_files') -main_workflow.connect(datasource, 'struct', preprocessing, - 'recon_all.T1_files') - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.abspath( - 'smoothing_comparison_workflow/output') -datasink.inputs.regexp_substitutions = [("_rename[0-9]", "")] - -main_workflow.connect(modelling, 'contrastestimate.spmT_images', datasink, - 'contrasts') -main_workflow.connect(preprocessing, 'rename.out_file', datasink, - 'smoothed_epi') - -main_workflow.run() -main_workflow.write_graph() diff --git a/examples/frontiers_paper/workflow_from_scratch.py b/examples/frontiers_paper/workflow_from_scratch.py deleted file mode 100644 index 9b1c939d29..0000000000 --- a/examples/frontiers_paper/workflow_from_scratch.py +++ /dev/null @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -===================== -Workflow from scratch -===================== - -""" - -from builtins import range - -import nipype.interfaces.io as nio # Data i/o -import nipype.interfaces.spm as spm # spm -import nipype.pipeline.engine as pe # pypeline engine -import nipype.algorithms.modelgen as model # model specification -from nipype.interfaces.base import Bunch -import os # system functions -"""In the following section, to showcase NiPyPe, we will describe how to create -and extend a typical fMRI processing pipeline. We will begin with a basic -processing layout and follow with extending it by adding/exchanging different -components. - -Most fMRI pipeline can be divided into two sections - preprocessing and -modelling. First one deals with cleaning data from confounds and noise and the -second one fits a model based on the experimental design. Preprocessing stage -in our first iteration of a pipeline will consist of only two steps: -realignment and smoothing. In NiPyPe Every processing step consist of an -Interface (which defines how to execute corresponding software) encapsulated -in a Node (which defines for example a unique name). For realignment (motion -correction achieved by coregistering all volumes to the mean) and smoothing -(convolution with 3D Gaussian kernel) we will use SPM implementation. -Definition of appropriate nodes can be found in Listing 1 (TODO). Inputs -(such as register_to_mean from listing 1) of nodes are accessible through the -inputs property. Upon setting any input its type is verified to avoid errors -during the execution.""" - -realign = pe.Node(interface=spm.Realign(), name="realign") -realign.inputs.register_to_mean = True - -smooth = pe.Node(interface=spm.Smooth(), name="smooth") -smooth.inputs.fwhm = 4 -"""To connect two nodes a Workflow has to be created. connect() method of a -Workflow allows to specify which outputs of which Nodes should be connected to -which inputs of which Nodes (see Listing 2). By connecting realigned_files -output of realign to in_files input of Smooth we have created a simple -preprocessing workflow (see Figure TODO).""" - -preprocessing = pe.Workflow(name="preprocessing") -preprocessing.connect(realign, "realigned_files", smooth, "in_files") -"""Creating a modelling workflow which will define the design, estimate model -and contrasts follows the same suite. We will again use SPM implementations. -NiPyPe, however, adds extra abstraction layer to model definition which allows -using the same definition for many model estimation implemantations (for -example one from FSL or nippy). Therefore we will need four nodes: -SpecifyModel (NiPyPe specific abstraction layer), Level1Design (SPM design -definition), ModelEstimate, and ContrastEstimate. The connected modelling -Workflow can be seen on Figure TODO. Model specification supports block, event -and sparse designs. Contrasts provided to ContrastEstimate are defined using -the same names of regressors as defined in the SpecifyModel.""" - -specify_model = pe.Node(interface=model.SpecifyModel(), name="specify_model") -specify_model.inputs.input_units = 'secs' -specify_model.inputs.time_repetition = 3. -specify_model.inputs.high_pass_filter_cutoff = 120 -specify_model.inputs.subject_info = [ - Bunch( - conditions=['Task-Odd', 'Task-Even'], - onsets=[list(range(15, 240, 60)), - list(range(45, 240, 60))], - durations=[[15], [15]]) -] * 4 - -level1design = pe.Node(interface=spm.Level1Design(), name="level1design") -level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} -level1design.inputs.timing_units = 'secs' -level1design.inputs.interscan_interval = specify_model.inputs.time_repetition - -level1estimate = pe.Node(interface=spm.EstimateModel(), name="level1estimate") -level1estimate.inputs.estimation_method = {'Classical': 1} - -contrastestimate = pe.Node( - interface=spm.EstimateContrast(), name="contrastestimate") -cont1 = ('Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]) -cont2 = ('Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]) -contrastestimate.inputs.contrasts = [cont1, cont2] - -modelling = pe.Workflow(name="modelling") -modelling.connect(specify_model, 'session_info', level1design, 'session_info') -modelling.connect(level1design, 'spm_mat_file', level1estimate, 'spm_mat_file') -modelling.connect(level1estimate, 'spm_mat_file', contrastestimate, - 'spm_mat_file') -modelling.connect(level1estimate, 'beta_images', contrastestimate, - 'beta_images') -modelling.connect(level1estimate, 'residual_image', contrastestimate, - 'residual_image') -"""Having preprocessing and modelling workflows we need to connect them -together, add data grabbing facility and save the results. For this we will -create a master Workflow which will host preprocessing and model Workflows as -well as DataGrabber and DataSink Nodes. NiPyPe allows connecting Nodes between -Workflows. We will use this feature to connect realignment_parameters and -smoothed_files to modelling workflow.""" - -main_workflow = pe.Workflow(name="main_workflow") -main_workflow.base_dir = "workflow_from_scratch" -main_workflow.connect(preprocessing, "realign.realignment_parameters", - modelling, "specify_model.realignment_parameters") -main_workflow.connect(preprocessing, "smooth.smoothed_files", modelling, - "specify_model.functional_runs") -"""DataGrabber allows to define flexible search patterns which can be -parameterized by user defined inputs (such as subject ID, session etc.). -This allows to adapt to a wide range of file layouts. In our case we will -parameterize it with subject ID. In this way we will be able to run it for -different subjects. We can automate this by iterating over a list of subject -Ids, by setting an iterables property on the subject_id input of DataGrabber. -Its output will be connected to realignment node from preprocessing workflow. -""" - -datasource = pe.Node( - interface=nio.DataGrabber(infields=['subject_id'], outfields=['func']), - name='datasource') -datasource.inputs.base_directory = os.path.abspath('data') -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = dict( - func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]]) -datasource.inputs.subject_id = 's1' -datasource.inputs.sort_filelist = True - -main_workflow.connect(datasource, 'func', preprocessing, 'realign.in_files') -"""DataSink on the other side provides means to storing selected results to a -specified location. It supports automatic creation of folder stricter and -regular expression based substitutions. In this example we will store T maps. -""" - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.abspath( - 'workflow_from_scratch/output') - -main_workflow.connect(modelling, 'contrastestimate.spmT_images', datasink, - 'contrasts.@T') - -main_workflow.run() -main_workflow.write_graph() diff --git a/examples/howto_caching_example.py b/examples/howto_caching_example.py deleted file mode 100644 index debd826402..0000000000 --- a/examples/howto_caching_example.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -=========================================== -HOWTO: Using caching without using Workflow -=========================================== - -Using nipype in an imperative way: caching without workflow - -Note that in the following example, we are calling command-lines with -disk I/O that persists across runs, but we never have to worry about the -file names or the directories. - -The disk location of the persistence is encoded by hashes. To find out -where an operation has been persisted, simply look in it's output -variable:: - - out.runtime.cwd -""" - -from nipype.interfaces import fsl -fsl.FSLCommand.set_default_output_type('NIFTI') - -from nipype.caching import Memory - -import glob - -# First retrieve the list of files that we want to work upon -in_files = glob.glob('data/*/f3.nii') - -# Create a memory context -mem = Memory('.') - -# Apply an arbitrary (and pointless, here) threshold to the files) -threshold = [ - mem.cache(fsl.Threshold)(in_file=f, thresh=i) - for i, f in enumerate(in_files) -] - -# Merge all these files along the time dimension -out_merge = mem.cache(fsl.Merge)( - dimension="t", - in_files=[t.outputs.out_file for t in threshold], -) -# And finally compute the mean -out_mean = mem.cache(fsl.MeanImage)(in_file=out_merge.outputs.merged_file) - -# To avoid having increasing disk size we can keep only what was touched -# in this run -# mem.clear_previous_runs() - -# or what wasn't used since the start of 2011 -# mem.clear_runs_since(year=2011) diff --git a/examples/nipype_tutorial.ipynb b/examples/nipype_tutorial.ipynb deleted file mode 100644 index 90a06a631e..0000000000 --- a/examples/nipype_tutorial.ipynb +++ /dev/null @@ -1,1750 +0,0 @@ -{ - "metadata": { - "_draft": { - "nbviewer_url": "gisting : nipype.ipynb\r\n" - }, - "name": "nipype_tutorial" - }, - "nbformat": 3, - "nbformat_minor": 0, - "worksheets": [ - { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Dissecting Nipype Workflows\n", - "\n", - "
\n", - "Nipype team | contact: satra@mit.edu | nipy.org/nipype\n", - "
\n", - "(Hit Esc to get an overview)\n", - "
[Latest version][notebook] | [Latest slideshow][slideshow]\n", - "\n", - "[notebook]: http://nbviewer.ipython.org/urls/raw.github.com/nipy/nipype/master/examples/nipype_tutorial.ipynb\n", - "[slideshow]: http://slideviewer.herokuapp.com/url/raw.github.com/nipy/nipype/master/examples/nipype_tutorial.ipynb" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Contributors\n", - "\n", - "http://nipy.org/nipype/about.html#code-contributors\n", - "\n", - "# Funding\n", - "\n", - "- 1R03EB008673-01 from NIBIB, Satrajit Ghosh, Susan Whitfield-Gabrieli\n", - "- 5R01MH081909-02 from NIMH, Mark D'Esposito\n", - "- INCF\n", - "\n", - "# Conflict of interest\n", - "\n", - "
\n", - "Satrajit Ghosh: TankThink Labs, LLC\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# What is Nipype?\n", - "\n", - "
\n", - "\n", - "
\n", - "Figure designed and created by: Arno Klein (www.mindboggle.info)\n", - "
\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Make life a little easier\n", - "\n", - "\n", - "\n", - "Poline _et al._ (2012)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Many workflow systems out there\n", - "\n", - "- [BioImage Suite](http://www.bioimagesuite.org/)\n", - "- [BIRN Tools](https://wiki.birncommunity.org/x/LgFrAQ)\n", - "- [BrainVisa](http://brainvisa.info)\n", - "- [CambaFX](http://www-bmu.psychiatry.cam.ac.uk/software/)\n", - "- [JIST for MIPAV](http://www.nitrc.org/projects/jist/)\n", - "- [LONI pipeline](http://pipeline.loni.ucla.edu)\n", - "- [MEVIS Lab](http://www.mevislab.de)\n", - "- [PSOM](http://code.google.com/p/psom/)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "skip" - } - }, - "source": [ - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Solution requirements\n", - "\n", - "Coming at it from a developer's perspective, we needed something\n", - "\n", - "- lightweight\n", - "- scriptable\n", - "- provided formal, common semantics\n", - "- allowed interactive exploration\n", - "- supported efficient batch processing\n", - "- enabled rapid algorithm prototyping\n", - "- was flexible and adaptive\n", - "- part of an ecosystem" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Python ecosystem\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Existing technologies\n", - "\n", - "**shell scripting**:\n", - "\n", - " Can be quick to do, and powerful, but only provides application specific \n", - " scalability, and not easy to port across different architectures.\n", - "\n", - "**make/CMake**:\n", - "\n", - " Similar in concept to workflow execution in Nipype, but again limited by the\n", - " need for command line tools and flexibility in terms of scaling across\n", - " hardware architectures (although see [makeflow](http://nd.edu/~ccl/software/makeflow).\n", - "\n", - "**Octave/MATLAB**:\n", - "\n", - " Integration with other tools is *ad hoc* (i.e., system call) and dataflow is\n", - " managed at a programmatic level. However, see [PSOM](http://code.google.com/p/psom/) which offers a nice\n", - " alternative to some aspects of Nipype for Octave/Matlab users.\n", - "\n", - "**Graphical options**: (e.g., [LONI Pipeline](http://pipeline.loni.ucla.edu), [VisTrails](http://www.vistrails.org/))\n", - "\n", - " Are easy to use but reduces flexibility relative to scripting options." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Nipype architecture\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "notes" - } - }, - "source": [ - "* **Interface**: Wraps a program or function\n", - "\n", - "- **Node/MapNode**: Wraps an `Interface` for use in a Workflow that provides\n", - " caching and other goodies (e.g., pseudo-sandbox)\n", - "- **Workflow**: A *graph* or *forest of graphs* whose nodes are of type `Node`,\n", - " `MapNode` or `Workflow` and whose edges represent data flow\n", - "\n", - "* **Plugin**: A component that describes how a `Workflow` should be executed" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Software interfaces\n", - "\n", - "Currently supported (5-2-2013). [Click here for latest](http://www.mit.edu/~satra/nipype-nightly/documentation.html)\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "Most used/contributed policy!\n", - "\n", - "Not all components of these packages are available." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "skip" - } - }, - "source": [ - "# Workflows\n", - "\n", - "- Properties:\n", - "\n", - " - processing pipeline is a directed acyclic graph (DAG)\n", - " - nodes are processes\n", - " - edges represent data flow\n", - " - compact represenation for any process\n", - " - code and data separation" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Execution Plugins\n", - "\n", - "Allows seamless execution across many architectures\n", - "\n", - " - Local\n", - "\n", - " - Serial\n", - " - Multicore\n", - "\n", - " - Clusters\n", - "\n", - " - HTCondor\n", - " - PBS/Torque/SGE/LSF (native and via IPython)\n", - " - SSH (via IPython)\n", - " - Soma Workflow" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Learn Nipype concepts in 10 easy steps\n", - "\n", - "\n", - "1. Installing and testing the installation \n", - "2. Working with interfaces\n", - "3. Using Nipype caching\n", - "4. Creating Nodes, MapNodes and Workflows\n", - "5. Getting and saving data\n", - "6. Using Iterables\n", - "7. Function nodes\n", - "8. Distributed computation\n", - "9. Connecting to databases\n", - "10. Execution configuration options" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 1. Installing Nipype\n", - "\n", - "## Scientific Python:\n", - "\n", - "* Debian/Ubuntu/Scientific Fedora\n", - "* [Canopy from Enthought](https://www.enthought.com/products/canopy/)\n", - "* [Anaconda from Contnuum Analytics](https://store.continuum.io/cshop/anaconda/)\n", - "\n", - "## Installing Nipype:\n", - "\n", - "* Available from [@NeuroDebian](http://neuro.debian.net/pkgs/python-nipype.html),\n", - " [@PyPI](http://pypi.python.org/pypi/nipype/), and\n", - " [@GitHub](http://github.com/nipy/nipype)\n", - " \n", - " - pip install nipype\n", - " - easy_install nipype\n", - " - sudo apt-get install python-nipype\n", - "\n", - "* Dependencies: networkx, nibabel, numpy, scipy, traits\n", - "\n", - "## Running Nipype ([Quickstart](http://nipy.org/nipype/quickstart.html)):\n", - "\n", - "* Ensure underlying tools are installed and accessible\n", - "* Nipype **is a wrapper, not a substitute** for AFNI, ANTS, FreeSurfer, FSL, SPM,\n", - " NiPy, etc.,." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Step 1. Testing nipype" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```\n", - "$ ipython notebook\n", - "```" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import nipype\n", - "\n", - "# Comment the following section to increase verbosity of output\n", - "nipype.config.set('logging', 'workflow_level', 'CRITICAL')\n", - "nipype.config.set('logging', 'interface_level', 'CRITICAL')\n", - "nipype.logging.update_logging(nipype.config)\n", - "\n", - "nipype.test(verbose=0) # Increase verbosity parameter for more info" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "If all goes well you will see an OK:\n", - "\n", - " ----------------------------------------------------------------------\n", - " Ran 2497 tests in 68.486s\n", - "\n", - " OK (SKIP=13)\n", - "\n", - "The number of tests and time will vary depending on which interfaces you have installed on your system." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "nipype.get_info()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Environment and data setup\n", - "\n", - "Setting up your Ipython notebook environment and download some data to play with" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "%pylab inline" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "# Some preliminaries\n", - "import os\n", - "cwd = os.getcwd()\n", - "tutorial_dir = '/software/temp/nipype-tutorial/ohbm/'\n", - "if not os.path.exists(tutorial_dir):\n", - " os.mkdir(tutorial_dir)\n", - "os.chdir(tutorial_dir)" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import urllib\n", - "required_files = ['ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", - " 'ds107/sub001/BOLD/task001_run002/bold.nii.gz',\n", - " 'ds107/sub001/anatomy/highres001.nii.gz',\n", - " 'ds107/sub044/BOLD/task001_run001/bold.nii.gz',\n", - " 'ds107/sub044/BOLD/task001_run002/bold.nii.gz',\n", - " 'ds107/sub044/anatomy/highres001.nii.gz'\n", - " ]\n", - "base_url = 'http://openfmri.aws.amazon.com.s3.amazonaws.com/'\n", - "for filepath in required_files:\n", - " file_location = os.path.join(tutorial_dir, filepath)\n", - " if not os.path.exists(file_location):\n", - " print('Retrieving: ' + file_location)\n", - " os.makedirs(os.path.dirname(file_location))\n", - " urllib.urlretrieve(base_url + filepath, file_location)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 2. Working with interfaces" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import nipype.algorithms" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.fsl import DTIFit\n", - "from nipype.interfaces.spm import Realign" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Finding interface inputs and outputs and examples" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "DTIFit.help()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "Realign.help()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Creating a directory for running interfaces" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import os\n", - "from shutil import copyfile\n", - "library_dir = os.path.join(tutorial_dir, 'as_a_library')\n", - "if not os.path.exists(library_dir):\n", - " os.mkdir(library_dir)\n", - "os.chdir(library_dir)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "## Executing interfaces" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.freesurfer import MRIConvert\n", - "convert = MRIConvert(in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", - " out_file='ds107.nii')\n", - "print(convert.cmdline)\n", - "results = convert.run(terminal_output='none') # allatonce, stream (default), file" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "results.outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "## Other ways" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert = MRIConvert()\n", - "convert.inputs.in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz'\n", - "convert.inputs.out_file='ds107.nii'\n", - "convert.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert = MRIConvert()\n", - "convert.run(in_file='../ds107/sub001/BOLD/task001_run001/bold.nii.gz',\n", - " out_file='ds107.nii')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": true, - "input": [ - "convert.inputs" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#### Look at only the defined inputs" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "results.inputs" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Experiment with other interfaces\n", - "\n", - "For example, run realignment with SPM" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.spm import Realign\n", - "results1 = Realign(in_files='ds107.nii',\n", - " register_to_mean=False).run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "And now use FSL" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.fsl import MCFLIRT\n", - "results2 = MCFLIRT(in_file='ds107.nii', ref_vol=0,\n", - " save_plots=True).run()" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Now we can look at some results" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "print results1.runtime.duration, results2.runtime.duration\n", - "subplot(211);plot(genfromtxt('ds107_mcf.nii.gz.par')[:, 3:]);title('FSL')\n", - "subplot(212);plot(genfromtxt('rp_ds107.txt')[:,:3]);title('SPM')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### if i execute the MCFLIRT line again, well, it runs again!" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 3. Nipype caching" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.caching import Memory\n", - "mem = Memory('.')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Create `cacheable` objects" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "spm_realign = mem.cache(Realign)\n", - "fsl_realign = mem.cache(MCFLIRT)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Execute interfaces" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "spm_results = spm_realign(in_files='ds107.nii', register_to_mean=False)\n", - "fsl_results = fsl_realign(in_file='ds107.nii', ref_vol=0, save_plots=True)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "subplot(211);plot(genfromtxt(fsl_results.outputs.par_file)[:, 3:])\n", - "subplot(212);plot(genfromtxt(spm_results.outputs.realignment_parameters)[:,:3])" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "spm_results = spm_realign(in_files='ds107.nii', register_to_mean=False)\n", - "fsl_results = fsl_realign(in_file='ds107.nii', ref_vol=0, save_plots=True)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# More caching" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from os.path import abspath as opap\n", - "files = [opap('../ds107/sub001/BOLD/task001_run001/bold.nii.gz'),\n", - " opap('../ds107/sub001/BOLD/task001_run002/bold.nii.gz')]\n", - "converter = mem.cache(MRIConvert)\n", - "newfiles = []\n", - "for idx, fname in enumerate(files):\n", - " newfiles.append(converter(in_file=fname,\n", - " out_type='nii').outputs.out_file)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "os.chdir(tutorial_dir)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 4: Nodes, Mapnodes and workflows\n", - "\n", - "**Where:**" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.pipeline.engine import Node, MapNode, Workflow" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Node**:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "realign_spm = Node(Realign(), name='motion_correct')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Mapnode**:\n", - "\n", - "" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert2nii = MapNode(MRIConvert(out_type='nii'),\n", - " iterfield=['in_file'],\n", - " name='convert2nii')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# \"Hello World\" of Nipype workflows" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Connect them up:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "realignflow = Workflow(name='realign_with_spm')\n", - "realignflow.connect(convert2nii, 'out_file',\n", - " realign_spm, 'in_files')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert2nii.inputs.in_file = files\n", - "realign_spm.inputs.register_to_mean = False\n", - "\n", - "realignflow.base_dir = opap('.')\n", - "realignflow.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Visualize the workflow" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "realignflow.write_graph()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from IPython.core.display import Image\n", - "Image('realign_with_spm/graph.png')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "realignflow.write_graph(graph2use='orig')\n", - "Image('realign_with_spm/graph_detailed.png')" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 5. Getting and saving data\n", - "\n", - "### Instead of assigning data ourselves, let's *glob* it" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "os.chdir(tutorial_dir)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.io import DataGrabber, DataFinder\n", - "ds = Node(DataGrabber(infields=['subject_id'], outfields=['func']),\n", - " name='datasource')\n", - "ds.inputs.base_directory = opap('ds107')\n", - "ds.inputs.template = '%s/BOLD/task001*/bold.nii.gz'\n", - "ds.inputs.sort_filelist = True\n", - "\n", - "ds.inputs.subject_id = 'sub001'\n", - "print ds.run().outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "ds.inputs.subject_id = 'sub044'\n", - "print ds.run().outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Multiple files\n", - "\n", - "### A little more practical usage" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "ds = Node(DataGrabber(infields=['subject_id', 'task_id'],\n", - " outfields=['func', 'anat']),\n", - " name='datasource')\n", - "ds.inputs.base_directory = opap('ds107')\n", - "ds.inputs.template = '*'\n", - "ds.inputs.template_args = {'func': [['subject_id', 'task_id']],\n", - " 'anat': [['subject_id']]}\n", - "ds.inputs.field_template = {'func': '%s/BOLD/task%03d*/bold.nii.gz',\n", - " 'anat': '%s/anatomy/highres001.nii.gz'}\n", - "ds.inputs.sort_filelist = True\n", - "ds.inputs.subject_id = 'sub001'\n", - "ds.inputs.task_id = 1\n", - "print ds.run().outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "# Connecting to computation" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "convert2nii = MapNode(MRIConvert(out_type='nii'),\n", - " iterfield=['in_file'],\n", - " name='convert2nii')\n", - "\n", - "realign_spm = Node(Realign(), name='motion_correct')\n", - "realign_spm.inputs.register_to_mean = False\n", - "\n", - "connectedworkflow = Workflow(name='connectedtogether')\n", - "connectedworkflow.base_dir = opap('working_dir')\n", - "connectedworkflow.connect(ds, 'func', convert2nii, 'in_file')\n", - "connectedworkflow.connect(convert2nii, 'out_file', realign_spm, 'in_files')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Data sinking\n", - "\n", - "###Take output computed in a workflow out of it." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.io import DataSink\n", - "sinker = Node(DataSink(), name='sinker')\n", - "sinker.inputs.base_directory = opap('output')\n", - "connectedworkflow.connect(realign_spm, 'realigned_files',\n", - " sinker, 'realigned')\n", - "connectedworkflow.connect(realign_spm, 'realignment_parameters',\n", - " sinker, 'realigned.@parameters')\n", - "connectedworkflow.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### How to determine output location\n", - "\n", - " 'base_directory/container/parameterization/destloc/filename'\n", - " \n", - " destloc = [@]string[[.[@]]string[[.[@]]string]...] and\n", - " destloc = realigned.@parameters --> 'realigned'\n", - " destloc = realigned.parameters.@1 --> 'realigned/parameters'\n", - " destloc = realigned.parameters.@2 --> 'realigned/parameters'\n", - " filename comes from the input to the connect statement." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Step 6: *iterables* - parametric execution\n", - "\n", - "**Workflow + iterables**: runs subgraph several times, attribute not input" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "ds.iterables = ('subject_id', ['sub001', 'sub044'])\n", - "connectedworkflow.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "#Putting it all together\n", - "\n", - "### iterables + MapNode + Node + Workflow + DataGrabber + DataSink" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "connectedworkflow.write_graph()\n", - "Image('working_dir/connectedtogether/graph.png')" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Step 7: The Function interface\n", - "\n", - "### The do anything you want card" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.utility import Function\n", - "\n", - "def myfunc(input1, input2):\n", - " \"\"\"Add and subtract two inputs\n", - " \"\"\"\n", - " return input1 + input2, input1 - input2\n", - "\n", - "calcfunc = Node(Function(input_names=['input1', 'input2'],\n", - " output_names = ['sum', 'difference'],\n", - " function=myfunc),\n", - " name='mycalc')\n", - "calcfunc.inputs.input1 = 1\n", - "calcfunc.inputs.input2 = 2\n", - "res = calcfunc.run()\n", - "print res.outputs" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Step 8: Distributed computing\n", - "\n", - "### Normally calling run executes the workflow in series" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "connectedworkflow.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### but you can scale very easily\n", - "\n", - "For example, to use multiple cores on your local machine" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "connectedworkflow.run('MultiProc', plugin_args={'n_procs': 4})" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### Or to other job managers\n", - "\n", - "- connectedworkflow.run('PBS', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('SGE', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('LSF', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('Condor')\n", - "- connectedworkflow.run('IPython')\n", - "\n", - "### or submit graphs as a whole\n", - "\n", - "\n", - "- connectedworkflow.run('PBSGraph', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('SGEGraph', plugin_args={'qsub_args': '-q many'})\n", - "- connectedworkflow.run('CondorDAGMan')\n", - "\n", - "### Current Requirement: **SHARED FILESYSTEM**" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "### You can also set node specific plugin arguments" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "- node.plugin_args = {'qsub_args': '-l nodes=1:ppn=3', 'overwrite': True}\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Step 9: Connecting to Databases" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from os.path import abspath as opap\n", - "\n", - "from nipype.interfaces.io import XNATSource\n", - "from nipype.pipeline.engine import Node, Workflow\n", - "from nipype.interfaces.fsl import BET\n", - "\n", - "subject_id = 'xnat_S00001'\n", - "\n", - "dg = Node(XNATSource(infields=['subject_id'],\n", - " outfields=['struct'],\n", - " config='/Users/satra/xnat_configs/nitrc_ir_config'),\n", - " name='xnatsource')\n", - "dg.inputs.query_template = ('/projects/fcon_1000/subjects/%s/experiments/xnat_E00001'\n", - " '/scans/%s/resources/NIfTI/files')\n", - "dg.inputs.query_template_args['struct'] = [['subject_id', 'anat_mprage_anonymized']]\n", - "dg.inputs.subject_id = subject_id\n", - "\n", - "bet = Node(BET(), name='skull_stripper')\n", - "\n", - "wf = Workflow(name='testxnat')\n", - "wf.base_dir = opap('xnattest')\n", - "wf.connect(dg, 'struct', bet, 'in_file')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.io import XNATSink\n", - "\n", - "ds = Node(XNATSink(config='/Users/satra/xnat_configs/central_config'),\n", - " name='xnatsink')\n", - "ds.inputs.project_id = 'NPTEST'\n", - "ds.inputs.subject_id = 'NPTEST_xnat_S00001'\n", - "ds.inputs.experiment_id = 'test_xnat'\n", - "ds.inputs.reconstruction_id = 'bet'\n", - "ds.inputs.share = True\n", - "wf.connect(bet, 'out_file', ds, 'brain')" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "skip" - } - }, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "wf.run()" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "#Step 10: Configuration options\n", - "\n", - "[Configurable options](http://nipy.org/nipype/users/config_file.html) control workflow and node execution options\n", - "\n", - "At the global level:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype import config, logging\n", - "\n", - "config.enable_debug_mode()\n", - "logging.update_logging(config)\n", - "\n", - "config.set('execution', 'stop_on_first_crash', 'true')" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "At the workflow level:" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "wf.config['execution']['hash_method'] = 'content'" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "Configurations can also be set at the node level." - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "bet.config = {'execution': {'keep_unnecessary_outputs': 'true'}}" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "wf.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Reusable workflows" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "config.set_default_config()\n", - "logging.update_logging(config)" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from niflow.nipype1.workflows.fmri.fsl.preprocess import create_susan_smooth\n", - "\n", - "smooth = create_susan_smooth()\n", - "smooth.inputs.inputnode.in_files = opap('output/realigned/_subject_id_sub044/rbold_out.nii')\n", - "smooth.inputs.inputnode.fwhm = 5\n", - "smooth.inputs.inputnode.mask_file = 'mask.nii'\n", - "\n", - "smooth.run() # Will error because mask.nii does not exist" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "from nipype.interfaces.fsl import BET, MeanImage, ImageMaths\n", - "from nipype.pipeline.engine import Node\n", - "\n", - "\n", - "remove_nan = Node(ImageMaths(op_string= '-nan'), name='nanremove')\n", - "remove_nan.inputs.in_file = opap('output/realigned/_subject_id_sub044/rbold_out.nii')\n", - "\n", - "mi = Node(MeanImage(), name='mean')\n", - "\n", - "mask = Node(BET(mask=True), name='mask')\n", - "\n", - "wf = Workflow('reuse')\n", - "wf.base_dir = opap('.')\n", - "wf.connect(remove_nan, 'out_file', mi, 'in_file')\n", - "wf.connect(mi, 'out_file', mask, 'in_file')\n", - "wf.connect(mask, 'out_file', smooth, 'inputnode.mask_file')\n", - "wf.connect(remove_nan, 'out_file', smooth, 'inputnode.in_files')\n", - "\n", - "wf.run()" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, - "source": [ - "## Setting internal parameters of workflows" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "print(smooth.list_node_names())\n", - "\n", - "median = smooth.get_node('median')\n", - "median.inputs.op_string = '-k %s -p 60'" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "-" - } - }, - "outputs": [] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "wf.run()" - ], - "language": "python", - "metadata": {}, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, - "source": [ - "# Summary\n", - "\n", - "\n", - "- This tutorial covers the concepts of Nipype\n", - "\n", - " 1. Installing and testing the installation \n", - " 2. Working with interfaces\n", - " 3. Using Nipype caching\n", - " 4. Creating Nodes, MapNodes and Workflows\n", - " 5. Getting and saving data\n", - " 6. Using Iterables\n", - " 7. Function nodes\n", - " 8. Distributed computation\n", - " 9. Connecting to databases\n", - " 10. Execution configuration options\n", - "\n", - "- It will allow you to reuse and debug the various workflows available in Nipype, BIPS and CPAC\n", - "- Please contribute new interfaces and workflows!" - ] - }, - { - "cell_type": "code", - "collapsed": false, - "input": [ - "import os\n", - "basedir = '/Users/satra/Dropbox/WORK/notebooks/'\n", - "if os.path.exists(basedir):\n", - " os.chdir(basedir)" - ], - "language": "python", - "metadata": { - "slideshow": { - "slide_type": "skip" - } - }, - "outputs": [] - } - ], - "metadata": {} - } - ] -} diff --git a/examples/rsfmri_vol_surface_preprocessing.py b/examples/rsfmri_vol_surface_preprocessing.py deleted file mode 100644 index 43e9d3d089..0000000000 --- a/examples/rsfmri_vol_surface_preprocessing.py +++ /dev/null @@ -1,1082 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -==================================== -rsfMRI: ANTS, FS, FSL, SPM, aCompCor -==================================== - - -A preprocessing workflow for Siemens resting state data. - -This workflow makes use of: - -- ANTS -- FreeSurfer -- FSL -- SPM -- CompCor - -For example:: - - python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii - -s subj001 -o output -p PBS --plugin_args "dict(qsub_args='-q many')" - - or - - python rsfmri_vol_surface_preprocessing.py -f SUB_1024011/E?/func/rest.nii - -t OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz --TR 2 -s SUB_1024011 - --subjects_dir fsdata --slice_times 0 17 1 18 2 19 3 20 4 21 5 22 6 23 - 7 24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15 32 16 -o . - -This workflow takes resting timeseries and a Siemens dicom file corresponding -to it and preprocesses it to produce timeseries coordinates or grayordinates. - -This workflow also requires 2mm subcortical atlas and templates that are -available from: - -http://mindboggle.info/data.html - -specifically the 2mm versions of: - -- `Joint Fusion Atlas `_ -- `MNI template `_ -""" - -from __future__ import division, unicode_literals -from builtins import open, range, str - -import os - -from nipype.interfaces.base import CommandLine -CommandLine.set_default_terminal_output('allatonce') - -from dicom import read_file - -from nipype.interfaces import (spm, fsl, Function, ants, freesurfer) -from nipype.interfaces.c3 import C3dAffineTool - -fsl.FSLCommand.set_default_output_type('NIFTI') - -from nipype import Workflow, Node, MapNode -from nipype.interfaces import matlab as mlab - -mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodisplay") -# If SPM is not in your MATLAB path you should add it here -# mlab.MatlabCommand.set_default_paths('/software/matlab/spm12') - -from nipype.algorithms.rapidart import ArtifactDetect -from nipype.algorithms.misc import TSNR, CalculateMedian -from nipype.interfaces.utility import Rename, Merge, IdentityInterface -from nipype.utils.filemanip import filename_to_list -from nipype.interfaces.io import DataSink, FreeSurferSource - -import numpy as np -import scipy as sp -import nibabel as nb - -imports = [ - 'import os', 'import nibabel as nb', 'import numpy as np', - 'import scipy as sp', - 'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename', - 'from scipy.special import legendre' -] - - -def get_info(dicom_files): - from dcmstack.extract import default_extractor - """Given a Siemens dicom file return metadata - - Returns - ------- - RepetitionTime - Slice Acquisition Times - Spacing between slices - """ - meta = default_extractor( - read_file( - filename_to_list(dicom_files)[0], - stop_before_pixels=True, - force=True)) - return (meta['RepetitionTime'] / 1000., meta['CsaImage.MosaicRefAcqTimes'], - meta['SpacingBetweenSlices']) - - -def median(in_files): - """Computes an average of the median of each realigned timeseries - - Parameters - ---------- - - in_files: one or more realigned Nifti 4D time series - - Returns - ------- - - out_file: a 3D Nifti file - """ - import numpy as np - import nibabel as nb - average = None - for idx, filename in enumerate(filename_to_list(in_files)): - img = nb.load(filename) - data = np.median(img.get_data(), axis=3) - if average is None: - average = data - else: - average = average + data - median_img = nb.Nifti1Image(average / float(idx + 1), img.affine, - img.header) - filename = os.path.join(os.getcwd(), 'median.nii.gz') - median_img.to_filename(filename) - return filename - - -def bandpass_filter(files, lowpass_freq, highpass_freq, fs): - """Bandpass filter the input files - - Parameters - ---------- - files: list of 4d nifti files - lowpass_freq: cutoff frequency for the low pass filter (in Hz) - highpass_freq: cutoff frequency for the high pass filter (in Hz) - fs: sampling rate (in Hz) - """ - from nipype.utils.filemanip import split_filename, list_to_filename - import numpy as np - import nibabel as nb - out_files = [] - for filename in filename_to_list(files): - path, name, ext = split_filename(filename) - out_file = os.path.join(os.getcwd(), name + '_bp' + ext) - img = nb.load(filename) - timepoints = img.shape[-1] - F = np.zeros((timepoints)) - lowidx = int(timepoints / 2) + 1 - if lowpass_freq > 0: - lowidx = np.round(lowpass_freq / fs * timepoints) - highidx = 0 - if highpass_freq > 0: - highidx = np.round(highpass_freq / fs * timepoints) - F[highidx:lowidx] = 1 - F = ((F + F[::-1]) > 0).astype(int) - data = img.get_data() - if np.all(F == 1): - filtered_data = data - else: - filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F)) - img_out = nb.Nifti1Image(filtered_data, img.affine, img.header) - img_out.to_filename(out_file) - out_files.append(out_file) - return list_to_filename(out_files) - - -def motion_regressors(motion_params, order=0, derivatives=1): - """Compute motion regressors upto given order and derivative - - motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic) - """ - import numpy as np - out_files = [] - for idx, filename in enumerate(filename_to_list(motion_params)): - params = np.genfromtxt(filename) - out_params = params - for d in range(1, derivatives + 1): - cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0), - params)) - out_params = np.hstack((out_params, np.diff(cparams, d, axis=0))) - out_params2 = out_params - for i in range(2, order + 1): - out_params2 = np.hstack((out_params2, np.power(out_params, i))) - filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx) - np.savetxt(filename, out_params2, fmt=b"%.10f") - out_files.append(filename) - return out_files - - -def build_filter1(motion_params, comp_norm, outliers, detrend_poly=None): - """Builds a regressor set comprisong motion parameters, composite norm and - outliers - - The outliers are added as a single time point column for each outlier - - - Parameters - ---------- - - motion_params: a text file containing motion parameters and its derivatives - comp_norm: a text file containing the composite norm - outliers: a text file containing 0-based outlier indices - detrend_poly: number of polynomials to add to detrend - - Returns - ------- - components_file: a text file containing all the regressors - """ - import numpy as np - import nibabel as nb - from scipy.special import legendre - out_files = [] - for idx, filename in enumerate(filename_to_list(motion_params)): - params = np.genfromtxt(filename) - norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx]) - out_params = np.hstack((params, norm_val[:, None])) - try: - outlier_val = np.genfromtxt(filename_to_list(outliers)[idx]) - except IOError: - outlier_val = np.empty((0)) - for index in np.atleast_1d(outlier_val): - outlier_vector = np.zeros((out_params.shape[0], 1)) - outlier_vector[index] = 1 - out_params = np.hstack((out_params, outlier_vector)) - if detrend_poly: - timepoints = out_params.shape[0] - X = np.empty((timepoints, 0)) - for i in range(detrend_poly): - X = np.hstack((X, legendre(i + 1)(np.linspace( - -1, 1, timepoints))[:, None])) - out_params = np.hstack((out_params, X)) - filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx) - np.savetxt(filename, out_params, fmt=b"%.10f") - out_files.append(filename) - return out_files - - -def extract_noise_components(realigned_file, - mask_file, - num_components=5, - extra_regressors=None): - """Derive components most reflective of physiological noise - - Parameters - ---------- - realigned_file: a 4D Nifti file containing realigned volumes - mask_file: a 3D Nifti file containing white matter + ventricular masks - num_components: number of components to use for noise decomposition - extra_regressors: additional regressors to add - - Returns - ------- - components_file: a text file containing the noise components - """ - from scipy.linalg.decomp_svd import svd - import numpy as np - import nibabel as nb - import os - imgseries = nb.load(realigned_file) - components = None - for filename in filename_to_list(mask_file): - mask = nb.load(filename).get_data() - if len(np.nonzero(mask > 0)[0]) == 0: - continue - voxel_timecourses = imgseries.get_data()[mask > 0] - voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 - # remove mean and normalize by variance - # voxel_timecourses.shape == [nvoxels, time] - X = voxel_timecourses.T - stdX = np.std(X, axis=0) - stdX[stdX == 0] = 1. - stdX[np.isnan(stdX)] = 1. - stdX[np.isinf(stdX)] = 1. - X = (X - np.mean(X, axis=0)) / stdX - u, _, _ = svd(X, full_matrices=False) - if components is None: - components = u[:, :num_components] - else: - components = np.hstack((components, u[:, :num_components])) - if extra_regressors: - regressors = np.genfromtxt(extra_regressors) - components = np.hstack((components, regressors)) - components_file = os.path.join(os.getcwd(), 'noise_components.txt') - np.savetxt(components_file, components, fmt=b"%.10f") - return components_file - - -def rename(in_files, suffix=None): - from nipype.utils.filemanip import (filename_to_list, split_filename, - list_to_filename) - out_files = [] - for idx, filename in enumerate(filename_to_list(in_files)): - _, name, ext = split_filename(filename) - if suffix is None: - out_files.append(name + ('_%03d' % idx) + ext) - else: - out_files.append(name + suffix + ext) - return list_to_filename(out_files) - - -def get_aparc_aseg(files): - """Return the aparc+aseg.mgz file""" - for name in files: - if 'aparc+aseg.mgz' in name: - return name - raise ValueError('aparc+aseg.mgz not found') - - -def extract_subrois(timeseries_file, label_file, indices): - """Extract voxel time courses for each subcortical roi index - - Parameters - ---------- - - timeseries_file: a 4D Nifti file - label_file: a 3D file containing rois in the same space/size of the 4D file - indices: a list of indices for ROIs to extract. - - Returns - ------- - out_file: a text file containing time courses for each voxel of each roi - The first four columns are: freesurfer index, i, j, k positions in the - label file - """ - from nipype.utils.filemanip import split_filename - import nibabel as nb - import os - img = nb.load(timeseries_file) - data = img.get_data() - roiimg = nb.load(label_file) - rois = roiimg.get_data() - prefix = split_filename(timeseries_file)[1] - out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix) - with open(out_ts_file, 'wt') as fp: - for fsindex in indices: - ijk = np.nonzero(rois == fsindex) - ts = data[ijk] - for i0, row in enumerate(ts): - fp.write('%d,%d,%d,%d,' % ( - fsindex, ijk[0][i0], ijk[1][i0], - ijk[2][i0]) + ','.join(['%.10f' % val - for val in row]) + '\n') - return out_ts_file - - -def combine_hemi(left, right): - """Combine left and right hemisphere time series into a single text file - """ - import os - import numpy as np - lh_data = nb.load(left).get_data() - rh_data = nb.load(right).get_data() - - indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None], - 2000000 + np.arange(0, rh_data.shape[0])[:, None])) - all_data = np.hstack((indices, - np.vstack((lh_data.squeeze(), rh_data.squeeze())))) - filename = left.split('.')[1] + '_combined.txt' - np.savetxt( - filename, - all_data, - fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1))) - return os.path.abspath(filename) - - -def create_reg_workflow(name='registration'): - """Create a FEAT preprocessing workflow together with freesurfer - - Parameters - ---------- - - name : name of workflow (default: 'registration') - - Inputs:: - - inputspec.source_files : files (filename or list of filenames to register) - inputspec.mean_image : reference image to use - inputspec.anatomical_image : anatomical image to coregister to - inputspec.target_image : registration target - - Outputs:: - - outputspec.func2anat_transform : FLIRT transform - outputspec.anat2target_transform : FLIRT+FNIRT transform - outputspec.transformed_files : transformed files in target space - outputspec.transformed_mean : mean image in target space - """ - - register = Workflow(name=name) - - inputnode = Node( - interface=IdentityInterface(fields=[ - 'source_files', 'mean_image', 'subject_id', 'subjects_dir', - 'target_image' - ]), - name='inputspec') - - outputnode = Node( - interface=IdentityInterface(fields=[ - 'func2anat_transform', 'out_reg_file', 'anat2target_transform', - 'transforms', 'transformed_mean', 'segmentation_files', - 'anat2target', 'aparc' - ]), - name='outputspec') - - # Get the subject's freesurfer source directory - fssource = Node(FreeSurferSource(), name='fssource') - fssource.run_without_submitting = True - register.connect(inputnode, 'subject_id', fssource, 'subject_id') - register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir') - - convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert") - register.connect(fssource, 'T1', convert, 'in_file') - - # Coregister the median to the surface - bbregister = Node(freesurfer.BBRegister(), name='bbregister') - bbregister.inputs.init = 'fsl' - bbregister.inputs.contrast_type = 't2' - bbregister.inputs.out_fsl_file = True - bbregister.inputs.epi_mask = True - register.connect(inputnode, 'subject_id', bbregister, 'subject_id') - register.connect(inputnode, 'mean_image', bbregister, 'source_file') - register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir') - """ - Estimate the tissue classes from the anatomical image. But use spm's segment - as FSL appears to be breaking. - """ - - stripper = Node(fsl.BET(), name='stripper') - register.connect(convert, 'out_file', stripper, 'in_file') - fast = Node(fsl.FAST(), name='fast') - register.connect(stripper, 'out_file', fast, 'in_files') - """ - Binarize the segmentation - """ - - binarize = MapNode( - fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'), - iterfield=['in_file'], - name='binarize') - register.connect(fast, 'partial_volume_files', binarize, 'in_file') - """ - Apply inverse transform to take segmentations to functional space - """ - - applyxfm = MapNode( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - iterfield=['target_file'], - name='inverse_transform') - register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file') - register.connect(binarize, 'out_file', applyxfm, 'target_file') - register.connect(inputnode, 'mean_image', applyxfm, 'source_file') - """ - Apply inverse transform to aparc file - """ - - aparcxfm = Node( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - name='aparc_inverse_transform') - register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file') - register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm, - 'target_file') - register.connect(inputnode, 'mean_image', aparcxfm, 'source_file') - """ - Convert the BBRegister transformation to ANTS ITK format - """ - - convert2itk = Node(C3dAffineTool(), name='convert2itk') - convert2itk.inputs.fsl2ras = True - convert2itk.inputs.itk_transform = True - register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file') - register.connect(inputnode, 'mean_image', convert2itk, 'source_file') - register.connect(stripper, 'out_file', convert2itk, 'reference_file') - """ - Compute registration between the subject's structural and MNI template - This is currently set to perform a very quick registration. However, the - registration can be made significantly more accurate for cortical - structures by increasing the number of iterations - All parameters are set using the example from: - #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh - """ - - reg = Node(ants.Registration(), name='antsRegister') - reg.inputs.output_transform_prefix = "output_" - reg.inputs.transforms = ['Rigid', 'Affine', 'SyN'] - reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)] - reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[ - 100, 30, 20 - ]] - reg.inputs.dimension = 3 - reg.inputs.write_composite_transform = True - reg.inputs.collapse_output_transforms = True - reg.inputs.initial_moving_transform_com = True - reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']] - reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]] - reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]] - reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]] - reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]] - reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01] - reg.inputs.convergence_window_size = [20] * 2 + [5] - reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]] - reg.inputs.sigma_units = ['vox'] * 3 - reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]] - reg.inputs.use_estimate_learning_rate_once = [True] * 3 - reg.inputs.use_histogram_matching = [False] * 2 + [True] - reg.inputs.winsorize_lower_quantile = 0.005 - reg.inputs.winsorize_upper_quantile = 0.995 - reg.inputs.float = True - reg.inputs.output_warped_image = 'output_warped_image.nii.gz' - reg.inputs.num_threads = 4 - reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'} - register.connect(stripper, 'out_file', reg, 'moving_image') - register.connect(inputnode, 'target_image', reg, 'fixed_image') - """ - Concatenate the affine and ants transforms into a list - """ - - merge = Node(Merge(2), iterfield=['in2'], name='mergexfm') - register.connect(convert2itk, 'itk_transform', merge, 'in2') - register.connect(reg, 'composite_transform', merge, 'in1') - """ - Transform the mean image. First to anatomical and then to target - """ - - warpmean = Node(ants.ApplyTransforms(), name='warpmean') - warpmean.inputs.input_image_type = 3 - warpmean.inputs.interpolation = 'Linear' - warpmean.inputs.invert_transform_flags = [False, False] - warpmean.terminal_output = 'file' - warpmean.inputs.args = '--float' - warpmean.inputs.num_threads = 4 - - register.connect(inputnode, 'target_image', warpmean, 'reference_image') - register.connect(inputnode, 'mean_image', warpmean, 'input_image') - register.connect(merge, 'out', warpmean, 'transforms') - """ - Assign all the output files - """ - - register.connect(reg, 'warped_image', outputnode, 'anat2target') - register.connect(warpmean, 'output_image', outputnode, 'transformed_mean') - register.connect(applyxfm, 'transformed_file', outputnode, - 'segmentation_files') - register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc') - register.connect(bbregister, 'out_fsl_file', outputnode, - 'func2anat_transform') - register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file') - register.connect(reg, 'composite_transform', outputnode, - 'anat2target_transform') - register.connect(merge, 'out', outputnode, 'transforms') - - return register - - -""" -Creates the main preprocessing workflow -""" - - -def create_workflow(files, - target_file, - subject_id, - TR, - slice_times, - norm_threshold=1, - num_components=5, - vol_fwhm=None, - surf_fwhm=None, - lowpass_freq=-1, - highpass_freq=-1, - subjects_dir=None, - sink_directory=os.getcwd(), - target_subject=['fsaverage3', 'fsaverage4'], - name='resting'): - - wf = Workflow(name=name) - - # Rename files in case they are named identically - name_unique = MapNode( - Rename(format_string='rest_%(run)02d'), - iterfield=['in_file', 'run'], - name='rename') - name_unique.inputs.keep_ext = True - name_unique.inputs.run = list(range(1, len(files) + 1)) - name_unique.inputs.in_file = files - - realign = Node(interface=spm.Realign(), name="realign") - realign.inputs.jobtype = 'estwrite' - - num_slices = len(slice_times) - slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing") - slice_timing.inputs.num_slices = num_slices - slice_timing.inputs.time_repetition = TR - slice_timing.inputs.time_acquisition = TR - TR / float(num_slices) - slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist() - slice_timing.inputs.ref_slice = int(num_slices / 2) - - # Comute TSNR on realigned data regressing polynomials upto order 2 - tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr') - wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file') - - # Compute the median image across runs - calc_median = Node(CalculateMedian(), name='median') - wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') - """Segment and Register - """ - - registration = create_reg_workflow(name='registration') - wf.connect(calc_median, 'median_file', registration, - 'inputspec.mean_image') - registration.inputs.inputspec.subject_id = subject_id - registration.inputs.inputspec.subjects_dir = subjects_dir - registration.inputs.inputspec.target_image = target_file - """Use :class:`nipype.algorithms.rapidart` to determine which of the - images in the functional series are outliers based on deviations in - intensity or movement. - """ - - art = Node(interface=ArtifactDetect(), name="art") - art.inputs.use_differences = [True, True] - art.inputs.use_norm = True - art.inputs.norm_threshold = norm_threshold - art.inputs.zintensity_threshold = 9 - art.inputs.mask_type = 'spm_global' - art.inputs.parameter_source = 'SPM' - """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose - to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal - voxel sizes. - """ - - wf.connect([ - (name_unique, realign, [('out_file', 'in_files')]), - (realign, slice_timing, [('realigned_files', 'in_files')]), - (slice_timing, art, [('timecorrected_files', 'realigned_files')]), - (realign, art, [('realignment_parameters', 'realignment_parameters')]), - ]) - - def selectindex(files, idx): - import numpy as np - from nipype.utils.filemanip import filename_to_list, list_to_filename - return list_to_filename( - np.array(filename_to_list(files))[idx].tolist()) - - mask = Node(fsl.BET(), name='getmask') - mask.inputs.mask = True - wf.connect(calc_median, 'median_file', mask, 'in_file') - - # get segmentation in normalized functional space - - def merge_files(in1, in2): - out_files = filename_to_list(in1) - out_files.extend(filename_to_list(in2)) - return out_files - - # filter some noise - - # Compute motion regressors - motreg = Node( - Function( - input_names=['motion_params', 'order', 'derivatives'], - output_names=['out_files'], - function=motion_regressors, - imports=imports), - name='getmotionregress') - wf.connect(realign, 'realignment_parameters', motreg, 'motion_params') - - # Create a filter to remove motion and art confounds - createfilter1 = Node( - Function( - input_names=[ - 'motion_params', 'comp_norm', 'outliers', 'detrend_poly' - ], - output_names=['out_files'], - function=build_filter1, - imports=imports), - name='makemotionbasedfilter') - createfilter1.inputs.detrend_poly = 2 - wf.connect(motreg, 'out_files', createfilter1, 'motion_params') - wf.connect(art, 'norm_files', createfilter1, 'comp_norm') - wf.connect(art, 'outlier_files', createfilter1, 'outliers') - - filter1 = MapNode( - fsl.GLM( - out_f_name='F_mcart.nii', out_pf_name='pF_mcart.nii', demean=True), - iterfield=['in_file', 'design', 'out_res_name'], - name='filtermotion') - - wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file') - wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'), - filter1, 'out_res_name') - wf.connect(createfilter1, 'out_files', filter1, 'design') - - createfilter2 = MapNode( - Function( - input_names=[ - 'realigned_file', 'mask_file', 'num_components', - 'extra_regressors' - ], - output_names=['out_files'], - function=extract_noise_components, - imports=imports), - iterfield=['realigned_file', 'extra_regressors'], - name='makecompcorrfilter') - createfilter2.inputs.num_components = num_components - - wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors') - wf.connect(filter1, 'out_res', createfilter2, 'realigned_file') - wf.connect(registration, - ('outputspec.segmentation_files', selectindex, [0, 2]), - createfilter2, 'mask_file') - - filter2 = MapNode( - fsl.GLM(out_f_name='F.nii', out_pf_name='pF.nii', demean=True), - iterfield=['in_file', 'design', 'out_res_name'], - name='filter_noise_nosmooth') - wf.connect(filter1, 'out_res', filter2, 'in_file') - wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2, - 'out_res_name') - wf.connect(createfilter2, 'out_files', filter2, 'design') - wf.connect(mask, 'mask_file', filter2, 'mask') - - bandpass = Node( - Function( - input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'], - output_names=['out_files'], - function=bandpass_filter, - imports=imports), - name='bandpass_unsmooth') - bandpass.inputs.fs = 1. / TR - bandpass.inputs.highpass_freq = highpass_freq - bandpass.inputs.lowpass_freq = lowpass_freq - wf.connect(filter2, 'out_res', bandpass, 'files') - """Smooth the functional data using - :class:`nipype.interfaces.spm.Smooth`. - """ - - smooth = Node(interface=spm.Smooth(), name="smooth") - smooth.inputs.fwhm = vol_fwhm - - wf.connect(bandpass, 'out_files', smooth, 'in_files') - - collector = Node(Merge(2), name='collect_streams') - wf.connect(smooth, 'smoothed_files', collector, 'in1') - wf.connect(bandpass, 'out_files', collector, 'in2') - """ - Transform the remaining images. First to anatomical and then to target - """ - - warpall = MapNode( - ants.ApplyTransforms(), iterfield=['input_image'], name='warpall') - warpall.inputs.input_image_type = 3 - warpall.inputs.interpolation = 'Linear' - warpall.inputs.invert_transform_flags = [False, False] - warpall.terminal_output = 'file' - warpall.inputs.reference_image = target_file - warpall.inputs.args = '--float' - warpall.inputs.num_threads = 1 - - # transform to target - wf.connect(collector, 'out', warpall, 'input_image') - wf.connect(registration, 'outputspec.transforms', warpall, 'transforms') - - mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask') - - wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file') - - maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker') - wf.connect(warpall, 'output_image', maskts, 'in_file') - wf.connect(mask_target, 'out_file', maskts, 'mask_file') - - # map to surface - # extract aparc+aseg ROIs - # extract subcortical ROIs - # extract target space ROIs - # combine subcortical and cortical rois into a single cifti file - - ####### - # Convert aparc to subject functional space - - # Sample the average time series in aparc ROIs - sampleaparc = MapNode( - freesurfer.SegStats(default_color_table=True), - iterfield=['in_file', 'summary_file', 'avgwf_txt_file'], - name='aparc_ts') - sampleaparc.inputs.segment_id = ( - [8] + list(range(10, 14)) + [17, 18, 26, 47] + list(range(49, 55)) + - [58] + list(range(1001, 1036)) + list(range(2001, 2036))) - - wf.connect(registration, 'outputspec.aparc', sampleaparc, - 'segmentation_file') - wf.connect(collector, 'out', sampleaparc, 'in_file') - - def get_names(files, suffix): - """Generate appropriate names for output files - """ - from nipype.utils.filemanip import (split_filename, filename_to_list, - list_to_filename) - out_names = [] - for filename in files: - _, name, _ = split_filename(filename) - out_names.append(name + suffix) - return list_to_filename(out_names) - - wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc, - 'avgwf_txt_file') - wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc, - 'summary_file') - - # Sample the time series onto the surface of the target surface. Performs - # sampling into left and right hemisphere - target = Node(IdentityInterface(fields=['target_subject']), name='target') - target.iterables = ('target_subject', filename_to_list(target_subject)) - - samplerlh = MapNode( - freesurfer.SampleToSurface(), - iterfield=['source_file'], - name='sampler_lh') - samplerlh.inputs.sampling_method = "average" - samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1) - samplerlh.inputs.sampling_units = "frac" - samplerlh.inputs.interp_method = "trilinear" - samplerlh.inputs.smooth_surf = surf_fwhm - # samplerlh.inputs.cortex_mask = True - samplerlh.inputs.out_type = 'niigz' - samplerlh.inputs.subjects_dir = subjects_dir - - samplerrh = samplerlh.clone('sampler_rh') - - samplerlh.inputs.hemi = 'lh' - wf.connect(collector, 'out', samplerlh, 'source_file') - wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file') - wf.connect(target, 'target_subject', samplerlh, 'target_subject') - - samplerrh.set_input('hemi', 'rh') - wf.connect(collector, 'out', samplerrh, 'source_file') - wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file') - wf.connect(target, 'target_subject', samplerrh, 'target_subject') - - # Combine left and right hemisphere to text file - combiner = MapNode( - Function( - input_names=['left', 'right'], - output_names=['out_file'], - function=combine_hemi, - imports=imports), - iterfield=['left', 'right'], - name="combiner") - wf.connect(samplerlh, 'out_file', combiner, 'left') - wf.connect(samplerrh, 'out_file', combiner, 'right') - - # Sample the time series file for each subcortical roi - ts2txt = MapNode( - Function( - input_names=['timeseries_file', 'label_file', 'indices'], - output_names=['out_file'], - function=extract_subrois, - imports=imports), - iterfield=['timeseries_file'], - name='getsubcortts') - ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\ - list(range(49, 55)) + [58] - ts2txt.inputs.label_file = \ - os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_' - '2mm_v2.nii.gz')) - wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file') - - ###### - - substitutions = [('_target_subject_', - ''), ('_filtermotart_cleaned_bp_trans_masked', ''), - ('_filtermotart_cleaned_bp', '')] - regex_subs = [ - ('_ts_masker.*/sar', '/smooth/'), - ('_ts_masker.*/ar', '/unsmooth/'), - ('_combiner.*/sar', '/smooth/'), - ('_combiner.*/ar', '/unsmooth/'), - ('_aparc_ts.*/sar', '/smooth/'), - ('_aparc_ts.*/ar', '/unsmooth/'), - ('_getsubcortts.*/sar', '/smooth/'), - ('_getsubcortts.*/ar', '/unsmooth/'), - ('series/sar', 'series/smooth/'), - ('series/ar', 'series/unsmooth/'), - ('_inverse_transform./', ''), - ] - # Save the relevant data into an output directory - datasink = Node(interface=DataSink(), name="datasink") - datasink.inputs.base_directory = sink_directory - datasink.inputs.container = subject_id - datasink.inputs.substitutions = substitutions - datasink.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2') - wf.connect(realign, 'realignment_parameters', datasink, - 'resting.qa.motion') - wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm') - wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity') - wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files') - wf.connect(registration, 'outputspec.segmentation_files', datasink, - 'resting.mask_files') - wf.connect(registration, 'outputspec.anat2target', datasink, - 'resting.qa.ants') - wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask') - wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target') - wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F') - wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF') - wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps') - wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p') - wf.connect(bandpass, 'out_files', datasink, - 'resting.timeseries.@bandpassed') - wf.connect(smooth, 'smoothed_files', datasink, - 'resting.timeseries.@smoothed') - wf.connect(createfilter1, 'out_files', datasink, - 'resting.regress.@regressors') - wf.connect(createfilter2, 'out_files', datasink, - 'resting.regress.@compcorr') - wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target') - wf.connect(sampleaparc, 'summary_file', datasink, - 'resting.parcellations.aparc') - wf.connect(sampleaparc, 'avgwf_txt_file', datasink, - 'resting.parcellations.aparc.@avgwf') - wf.connect(ts2txt, 'out_file', datasink, - 'resting.parcellations.grayo.@subcortical') - - datasink2 = Node(interface=DataSink(), name="datasink2") - datasink2.inputs.base_directory = sink_directory - datasink2.inputs.container = subject_id - datasink2.inputs.substitutions = substitutions - datasink2.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2') - wf.connect(combiner, 'out_file', datasink2, - 'resting.parcellations.grayo.@surface') - return wf - - -""" -Creates the full workflow including getting information from dicom files -""" - - -def create_resting_workflow(args, name=None): - TR = args.TR - slice_times = args.slice_times - if args.dicom_file: - TR, slice_times, slice_thickness = get_info(args.dicom_file) - slice_times = (np.array(slice_times) / 1000.).tolist() - if name is None: - name = 'resting_' + args.subject_id - kwargs = dict( - files=[os.path.abspath(filename) for filename in args.files], - target_file=os.path.abspath(args.target_file), - subject_id=args.subject_id, - TR=TR, - slice_times=slice_times, - vol_fwhm=args.vol_fwhm, - surf_fwhm=args.surf_fwhm, - norm_threshold=2., - subjects_dir=os.path.abspath(args.fsdir), - target_subject=args.target_surfs, - lowpass_freq=args.lowpass_freq, - highpass_freq=args.highpass_freq, - sink_directory=os.path.abspath(args.sink), - name=name) - wf = create_workflow(**kwargs) - return wf - - -if __name__ == "__main__": - from argparse import ArgumentParser, RawTextHelpFormatter - defstr = ' (default %(default)s)' - parser = ArgumentParser( - description=__doc__, formatter_class=RawTextHelpFormatter) - parser.add_argument( - "-d", - "--dicom_file", - dest="dicom_file", - help="an example dicom file from the resting series") - parser.add_argument( - "-f", - "--files", - dest="files", - nargs="+", - help="4d nifti files for resting state", - required=True) - parser.add_argument( - "-t", - "--target", - dest="target_file", - help=("Target in MNI space. Best to use the MindBoggle " - "template - " - "OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"), - required=True) - parser.add_argument( - "-s", - "--subject_id", - dest="subject_id", - help="FreeSurfer subject id", - required=True) - parser.add_argument( - "--subjects_dir", - dest="fsdir", - help="FreeSurfer subject directory", - required=True) - parser.add_argument( - "--target_surfaces", - dest="target_surfs", - nargs="+", - default=['fsaverage5'], - help="FreeSurfer target surfaces" + defstr) - parser.add_argument( - "--TR", - dest="TR", - default=None, - type=float, - help="TR if dicom not provided in seconds") - parser.add_argument( - "--slice_times", - dest="slice_times", - nargs="+", - type=float, - help="Slice onset times in seconds") - parser.add_argument( - '--vol_fwhm', - default=6., - dest='vol_fwhm', - type=float, - help="Spatial FWHM" + defstr) - parser.add_argument( - '--surf_fwhm', - default=15., - dest='surf_fwhm', - type=float, - help="Spatial FWHM" + defstr) - parser.add_argument( - "-l", - "--lowpass_freq", - dest="lowpass_freq", - default=0.1, - type=float, - help="Low pass frequency (Hz)" + defstr) - parser.add_argument( - "-u", - "--highpass_freq", - dest="highpass_freq", - default=0.01, - type=float, - help="High pass frequency (Hz)" + defstr) - parser.add_argument( - "-o", - "--output_dir", - dest="sink", - help="Output directory base", - required=True) - parser.add_argument( - "-w", "--work_dir", dest="work_dir", help="Output directory base") - parser.add_argument( - "-p", - "--plugin", - dest="plugin", - default='Linear', - help="Plugin to use") - parser.add_argument( - "--plugin_args", dest="plugin_args", help="Plugin arguments") - args = parser.parse_args() - - wf = create_resting_workflow(args) - - if args.work_dir: - work_dir = os.path.abspath(args.work_dir) - else: - work_dir = os.getcwd() - - wf.base_dir = work_dir - if args.plugin_args: - wf.run(args.plugin, plugin_args=eval(args.plugin_args)) - else: - wf.run(args.plugin) diff --git a/examples/rsfmri_vol_surface_preprocessing_nipy.py b/examples/rsfmri_vol_surface_preprocessing_nipy.py deleted file mode 100644 index 2397a136e0..0000000000 --- a/examples/rsfmri_vol_surface_preprocessing_nipy.py +++ /dev/null @@ -1,1083 +0,0 @@ -#!/usr/bin/env python -""" -===================================== -rsfMRI: ANTS, FS, FSL, NiPy, aCompCor -===================================== - - -A preprocessing workflow for Siemens resting state data. - -This workflow makes use of: - -- ANTS -- FreeSurfer -- FSL -- NiPy -- CompCor - -For example:: - - python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii - -s subj001 -o output -p PBS --plugin_args "dict(qsub_args='-q many')" - -or:: - - python rsfmri_vol_surface_preprocessing.py -f SUB_1024011/E?/func/rest.nii - -t OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz --TR 2 -s SUB_1024011 - --subjects_dir fsdata --slice_times 0 17 1 18 2 19 3 20 4 21 5 22 6 23 - 7 24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15 32 16 -o . - -This workflow takes resting timeseries and a Siemens dicom file corresponding -to it and preprocesses it to produce timeseries coordinates or grayordinates. - -For non-Siemens dicoms, provide slice times instead, since the dicom extractor is not guaranteed to work. - -This workflow also requires 2mm subcortical atlas and templates that are -available from: - -http://mindboggle.info/data.html - -specifically the 2mm versions of: - - * `Joint Fusion Atlas `_ - * `MNI template `_ - -Import necessary modules from nipype. -""" - -from __future__ import division, unicode_literals -from builtins import open, range, str - -import os - -from nipype.interfaces.base import CommandLine -CommandLine.set_default_terminal_output('allatonce') - -# https://github.com/moloney/dcmstack -from dcmstack.extract import default_extractor -# pip install pydicom -from dicom import read_file - -from nipype.interfaces import (fsl, Function, ants, freesurfer, nipy) -from nipype.interfaces.c3 import C3dAffineTool - -fsl.FSLCommand.set_default_output_type('NIFTI_GZ') - -from nipype import Workflow, Node, MapNode - -from nipype.algorithms.rapidart import ArtifactDetect -from nipype.algorithms.misc import TSNR, CalculateMedian -from nipype.algorithms.confounds import ACompCor -from nipype.interfaces.utility import Rename, Merge, IdentityInterface -from nipype.utils.filemanip import filename_to_list -from nipype.interfaces.io import DataSink, FreeSurferSource -import nipype.interfaces.freesurfer as fs - -import numpy as np -import scipy as sp -import nibabel as nb - -""" -A list of modules and functions to import inside of nodes -""" - -imports = [ - 'import os', - 'import nibabel as nb', - 'import numpy as np', - 'import scipy as sp', - 'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename', - 'from scipy.special import legendre' - ] - -""" -Define utility functions for use in workflow nodes -""" - - -def get_info(dicom_files): - """Given a Siemens dicom file return metadata - - Returns - ------- - RepetitionTime - Slice Acquisition Times - Spacing between slices - """ - meta = default_extractor( - read_file( - filename_to_list(dicom_files)[0], - stop_before_pixels=True, - force=True)) - return (meta['RepetitionTime'] / 1000., meta['CsaImage.MosaicRefAcqTimes'], - meta['SpacingBetweenSlices']) - - -def median(in_files): - """Computes an average of the median of each realigned timeseries - - Parameters - ---------- - - in_files: one or more realigned Nifti 4D time series - - Returns - ------- - - out_file: a 3D Nifti file - """ - average = None - for idx, filename in enumerate(filename_to_list(in_files)): - img = nb.load(filename) - data = np.median(img.get_data(), axis=3) - if average is None: - average = data - else: - average = average + data - median_img = nb.Nifti1Image(average / float(idx + 1), img.affine, - img.header) - filename = os.path.join(os.getcwd(), 'median.nii.gz') - median_img.to_filename(filename) - return filename - - -def bandpass_filter(files, lowpass_freq, highpass_freq, fs): - """Bandpass filter the input files - - Parameters - ---------- - files: list of 4d nifti files - lowpass_freq: cutoff frequency for the low pass filter (in Hz) - highpass_freq: cutoff frequency for the high pass filter (in Hz) - fs: sampling rate (in Hz) - """ - out_files = [] - for filename in filename_to_list(files): - path, name, ext = split_filename(filename) - out_file = os.path.join(os.getcwd(), name + '_bp' + ext) - img = nb.load(filename) - timepoints = img.shape[-1] - F = np.zeros((timepoints)) - lowidx = int(timepoints / 2) + 1 - if lowpass_freq > 0: - lowidx = np.round(float(lowpass_freq) / fs * timepoints) - highidx = 0 - if highpass_freq > 0: - highidx = np.round(float(highpass_freq) / fs * timepoints) - F[highidx:lowidx] = 1 - F = ((F + F[::-1]) > 0).astype(int) - data = img.get_data() - if np.all(F == 1): - filtered_data = data - else: - filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F)) - img_out = nb.Nifti1Image(filtered_data, img.affine, img.header) - img_out.to_filename(out_file) - out_files.append(out_file) - return list_to_filename(out_files) - - -def motion_regressors(motion_params, order=0, derivatives=1): - """Compute motion regressors upto given order and derivative - - motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic) - """ - out_files = [] - for idx, filename in enumerate(filename_to_list(motion_params)): - params = np.genfromtxt(filename) - out_params = params - for d in range(1, derivatives + 1): - cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0), - params)) - out_params = np.hstack((out_params, np.diff(cparams, d, axis=0))) - out_params2 = out_params - for i in range(2, order + 1): - out_params2 = np.hstack((out_params2, np.power(out_params, i))) - filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx) - np.savetxt(filename, out_params2, fmt=b"%.10f") - out_files.append(filename) - return out_files - - -def build_filter1(motion_params, comp_norm, outliers, detrend_poly=None): - """Builds a regressor set comprisong motion parameters, composite norm and - outliers - - The outliers are added as a single time point column for each outlier - - - Parameters - ---------- - - motion_params: a text file containing motion parameters and its derivatives - comp_norm: a text file containing the composite norm - outliers: a text file containing 0-based outlier indices - detrend_poly: number of polynomials to add to detrend - - Returns - ------- - components_file: a text file containing all the regressors - """ - out_files = [] - for idx, filename in enumerate(filename_to_list(motion_params)): - params = np.genfromtxt(filename) - norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx]) - out_params = np.hstack((params, norm_val[:, None])) - try: - outlier_val = np.genfromtxt(filename_to_list(outliers)[idx]) - except IOError: - outlier_val = np.empty((0)) - for index in np.atleast_1d(outlier_val): - outlier_vector = np.zeros((out_params.shape[0], 1)) - outlier_vector[index] = 1 - out_params = np.hstack((out_params, outlier_vector)) - if detrend_poly: - timepoints = out_params.shape[0] - X = np.empty((timepoints, 0)) - for i in range(detrend_poly): - X = np.hstack((X, legendre(i + 1)(np.linspace( - -1, 1, timepoints))[:, None])) - out_params = np.hstack((out_params, X)) - filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx) - np.savetxt(filename, out_params, fmt=b"%.10f") - out_files.append(filename) - return out_files - - -def rename(in_files, suffix=None): - from nipype.utils.filemanip import (filename_to_list, split_filename, - list_to_filename) - out_files = [] - for idx, filename in enumerate(filename_to_list(in_files)): - _, name, ext = split_filename(filename) - if suffix is None: - out_files.append(name + ('_%03d' % idx) + ext) - else: - out_files.append(name + suffix + ext) - return list_to_filename(out_files) - - -def get_aparc_aseg(files): - """Return the aparc+aseg.mgz file""" - for name in files: - if 'aparc+aseg.mgz' in name: - return name - raise ValueError('aparc+aseg.mgz not found') - - -def extract_subrois(timeseries_file, label_file, indices): - """Extract voxel time courses for each subcortical roi index - - Parameters - ---------- - - timeseries_file: a 4D Nifti file - label_file: a 3D file containing rois in the same space/size of the 4D file - indices: a list of indices for ROIs to extract. - - Returns - ------- - out_file: a text file containing time courses for each voxel of each roi - The first four columns are: freesurfer index, i, j, k positions in the - label file - """ - img = nb.load(timeseries_file) - data = img.get_data() - roiimg = nb.load(label_file) - rois = roiimg.get_data() - prefix = split_filename(timeseries_file)[1] - out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix) - with open(out_ts_file, 'wt') as fp: - for fsindex in indices: - ijk = np.nonzero(rois == fsindex) - ts = data[ijk] - for i0, row in enumerate(ts): - fp.write('%d,%d,%d,%d,' % ( - fsindex, ijk[0][i0], ijk[1][i0], - ijk[2][i0]) + ','.join(['%.10f' % val - for val in row]) + '\n') - return out_ts_file - - -def combine_hemi(left, right): - """Combine left and right hemisphere time series into a single text file - """ - lh_data = nb.load(left).get_data() - rh_data = nb.load(right).get_data() - - indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None], - 2000000 + np.arange(0, rh_data.shape[0])[:, None])) - all_data = np.hstack((indices, - np.vstack((lh_data.squeeze(), rh_data.squeeze())))) - filename = left.split('.')[1] + '_combined.txt' - np.savetxt( - filename, - all_data, - fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1))) - return os.path.abspath(filename) - -""" -Create a Registration Workflow -""" - - -def create_reg_workflow(name='registration'): - """Create a FEAT preprocessing workflow together with freesurfer - - Parameters - ---------- - name : name of workflow (default: 'registration') - - Inputs: - - inputspec.source_files : files (filename or list of filenames to register) - inputspec.mean_image : reference image to use - inputspec.anatomical_image : anatomical image to coregister to - inputspec.target_image : registration target - - Outputs: - - outputspec.func2anat_transform : FLIRT transform - outputspec.anat2target_transform : FLIRT+FNIRT transform - outputspec.transformed_files : transformed files in target space - outputspec.transformed_mean : mean image in target space - - Example - ------- - See code below - """ - - register = Workflow(name=name) - - inputnode = Node( - interface=IdentityInterface(fields=[ - 'source_files', 'mean_image', 'subject_id', 'subjects_dir', - 'target_image' - ]), - name='inputspec') - - outputnode = Node( - interface=IdentityInterface(fields=[ - 'func2anat_transform', 'out_reg_file', 'anat2target_transform', - 'transforms', 'transformed_mean', 'segmentation_files', - 'anat2target', 'aparc', 'min_cost_file' - ]), - name='outputspec') - - # Get the subject's freesurfer source directory - fssource = Node(FreeSurferSource(), name='fssource') - fssource.run_without_submitting = True - register.connect(inputnode, 'subject_id', fssource, 'subject_id') - register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir') - - convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert") - register.connect(fssource, 'T1', convert, 'in_file') - - # Coregister the median to the surface - bbregister = Node(freesurfer.BBRegister(), name='bbregister') - bbregister.inputs.init = 'fsl' - bbregister.inputs.contrast_type = 't2' - bbregister.inputs.out_fsl_file = True - bbregister.inputs.epi_mask = True - register.connect(inputnode, 'subject_id', bbregister, 'subject_id') - register.connect(inputnode, 'mean_image', bbregister, 'source_file') - register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir') - """ - Estimate the tissue classes from the anatomical image. But use aparc+aseg's brain mask - """ - - binarize = Node( - fs.Binarize(min=0.5, out_type="nii.gz", dilate=1), - name="binarize_aparc") - register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize, - "in_file") - stripper = Node(fsl.ApplyMask(), name='stripper') - register.connect(binarize, "binary_file", stripper, "mask_file") - register.connect(convert, 'out_file', stripper, 'in_file') - - fast = Node(fsl.FAST(), name='fast') - register.connect(stripper, 'out_file', fast, 'in_files') - """ - Binarize the segmentation - """ - - binarize = MapNode( - fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'), - iterfield=['in_file'], - name='binarize') - register.connect(fast, 'partial_volume_files', binarize, 'in_file') - """ - Apply inverse transform to take segmentations to functional space - """ - - applyxfm = MapNode( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - iterfield=['target_file'], - name='inverse_transform') - register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file') - register.connect(binarize, 'out_file', applyxfm, 'target_file') - register.connect(inputnode, 'mean_image', applyxfm, 'source_file') - """ - Apply inverse transform to aparc file - """ - - aparcxfm = Node( - freesurfer.ApplyVolTransform(inverse=True, interp='nearest'), - name='aparc_inverse_transform') - register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir') - register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file') - register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm, - 'target_file') - register.connect(inputnode, 'mean_image', aparcxfm, 'source_file') - """ - Convert the BBRegister transformation to ANTS ITK format - """ - - convert2itk = Node(C3dAffineTool(), name='convert2itk') - convert2itk.inputs.fsl2ras = True - convert2itk.inputs.itk_transform = True - register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file') - register.connect(inputnode, 'mean_image', convert2itk, 'source_file') - register.connect(stripper, 'out_file', convert2itk, 'reference_file') - """ - Compute registration between the subject's structural and MNI template - - * All parameters are set using the example from: - #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh - * This is currently set to perform a very quick registration. However, - the registration can be made significantly more accurate for cortical - structures by increasing the number of iterations. - """ - - reg = Node(ants.Registration(), name='antsRegister') - reg.inputs.output_transform_prefix = "output_" - reg.inputs.transforms = ['Rigid', 'Affine', 'SyN'] - reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)] - reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[ - 100, 30, 20 - ]] - reg.inputs.dimension = 3 - reg.inputs.write_composite_transform = True - reg.inputs.collapse_output_transforms = True - reg.inputs.initial_moving_transform_com = True - reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']] - reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]] - reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]] - reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]] - reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]] - reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01] - reg.inputs.convergence_window_size = [20] * 2 + [5] - reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]] - reg.inputs.sigma_units = ['vox'] * 3 - reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]] - reg.inputs.use_estimate_learning_rate_once = [True] * 3 - reg.inputs.use_histogram_matching = [False] * 2 + [True] - reg.inputs.winsorize_lower_quantile = 0.005 - reg.inputs.winsorize_upper_quantile = 0.995 - reg.inputs.float = True - reg.inputs.output_warped_image = 'output_warped_image.nii.gz' - reg.inputs.num_threads = 4 - reg.plugin_args = {'sbatch_args': '-c%d' % 4} - register.connect(stripper, 'out_file', reg, 'moving_image') - register.connect(inputnode, 'target_image', reg, 'fixed_image') - - """ - Concatenate the affine and ants transforms into a list - """ - - merge = Node(Merge(2), iterfield=['in2'], name='mergexfm') - register.connect(convert2itk, 'itk_transform', merge, 'in2') - register.connect(reg, ('composite_transform', pickfirst), merge, 'in1') - - """ - Transform the mean image. First to anatomical and then to target - """ - - warpmean = Node(ants.ApplyTransforms(), name='warpmean') - warpmean.inputs.input_image_type = 3 - warpmean.inputs.interpolation = 'Linear' - warpmean.inputs.invert_transform_flags = [False, False] - warpmean.terminal_output = 'file' - warpmean.inputs.args = '--float' - warpmean.inputs.num_threads = 4 - warpmean.plugin_args = {'sbatch_args': '-c%d' % 4} - - register.connect(inputnode, 'target_image', warpmean, 'reference_image') - register.connect(inputnode, 'mean_image', warpmean, 'input_image') - register.connect(merge, 'out', warpmean, 'transforms') - - """ - Assign all the output files - """ - - register.connect(reg, 'warped_image', outputnode, 'anat2target') - register.connect(warpmean, 'output_image', outputnode, 'transformed_mean') - register.connect(applyxfm, 'transformed_file', outputnode, - 'segmentation_files') - register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc') - register.connect(bbregister, 'out_fsl_file', outputnode, - 'func2anat_transform') - register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file') - register.connect(reg, 'composite_transform', outputnode, - 'anat2target_transform') - register.connect(merge, 'out', outputnode, 'transforms') - register.connect(bbregister, 'min_cost_file', outputnode, 'min_cost_file') - - return register - -""" -Creates the main preprocessing workflow -""" - - -def create_workflow(files, - target_file, - subject_id, - TR, - slice_times, - norm_threshold=1, - num_components=5, - vol_fwhm=None, - surf_fwhm=None, - lowpass_freq=-1, - highpass_freq=-1, - subjects_dir=None, - sink_directory=os.getcwd(), - target_subject=['fsaverage3', 'fsaverage4'], - name='resting'): - - wf = Workflow(name=name) - - # Rename files in case they are named identically - name_unique = MapNode( - Rename(format_string='rest_%(run)02d'), - iterfield=['in_file', 'run'], - name='rename') - name_unique.inputs.keep_ext = True - name_unique.inputs.run = list(range(1, len(files) + 1)) - name_unique.inputs.in_file = files - - realign = Node(nipy.SpaceTimeRealigner(), name="spacetime_realign") - realign.inputs.slice_times = slice_times - realign.inputs.tr = TR - realign.inputs.slice_info = 2 - realign.plugin_args = {'sbatch_args': '-c%d' % 4} - - # Compute TSNR on realigned data regressing polynomials up to order 2 - tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr') - wf.connect(realign, "out_file", tsnr, "in_file") - - # Compute the median image across runs - calc_median = Node(CalculateMedian(), name='median') - wf.connect(tsnr, 'detrended_file', calc_median, 'in_files') - - """ - Segment and Register - """ - - registration = create_reg_workflow(name='registration') - wf.connect(calc_median, 'median_file', registration, - 'inputspec.mean_image') - registration.inputs.inputspec.subject_id = subject_id - registration.inputs.inputspec.subjects_dir = subjects_dir - registration.inputs.inputspec.target_image = target_file - - """Quantify TSNR in each freesurfer ROI - """ - - get_roi_tsnr = MapNode( - fs.SegStats(default_color_table=True), - iterfield=['in_file'], - name='get_aparc_tsnr') - get_roi_tsnr.inputs.avgwf_txt_file = True - wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file') - wf.connect(registration, 'outputspec.aparc', get_roi_tsnr, - 'segmentation_file') - - """Use :class:`nipype.algorithms.rapidart` to determine which of the - images in the functional series are outliers based on deviations in - intensity or movement. - """ - - art = Node(interface=ArtifactDetect(), name="art") - art.inputs.use_differences = [True, True] - art.inputs.use_norm = True - art.inputs.norm_threshold = norm_threshold - art.inputs.zintensity_threshold = 9 - art.inputs.mask_type = 'spm_global' - art.inputs.parameter_source = 'NiPy' - - """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose - to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal - voxel sizes. - """ - - wf.connect([ - (name_unique, realign, [('out_file', 'in_file')]), - (realign, art, [('out_file', 'realigned_files')]), - (realign, art, [('par_file', 'realignment_parameters')]), - ]) - - def selectindex(files, idx): - import numpy as np - from nipype.utils.filemanip import filename_to_list, list_to_filename - return list_to_filename( - np.array(filename_to_list(files))[idx].tolist()) - - mask = Node(fsl.BET(), name='getmask') - mask.inputs.mask = True - wf.connect(calc_median, 'median_file', mask, 'in_file') - - # get segmentation in normalized functional space - - def merge_files(in1, in2): - out_files = filename_to_list(in1) - out_files.extend(filename_to_list(in2)) - return out_files - - # filter some noise - - # Compute motion regressors - motreg = Node( - Function( - input_names=['motion_params', 'order', 'derivatives'], - output_names=['out_files'], - function=motion_regressors, - imports=imports), - name='getmotionregress') - wf.connect(realign, 'par_file', motreg, 'motion_params') - - # Create a filter to remove motion and art confounds - createfilter1 = Node( - Function( - input_names=[ - 'motion_params', 'comp_norm', 'outliers', 'detrend_poly' - ], - output_names=['out_files'], - function=build_filter1, - imports=imports), - name='makemotionbasedfilter') - createfilter1.inputs.detrend_poly = 2 - wf.connect(motreg, 'out_files', createfilter1, 'motion_params') - wf.connect(art, 'norm_files', createfilter1, 'comp_norm') - wf.connect(art, 'outlier_files', createfilter1, 'outliers') - - filter1 = MapNode( - fsl.GLM( - out_f_name='F_mcart.nii.gz', - out_pf_name='pF_mcart.nii.gz', - demean=True), - iterfield=['in_file', 'design', 'out_res_name'], - name='filtermotion') - - wf.connect(realign, 'out_file', filter1, 'in_file') - wf.connect(realign, ('out_file', rename, '_filtermotart'), filter1, - 'out_res_name') - wf.connect(createfilter1, 'out_files', filter1, 'design') - - createfilter2 = MapNode( - ACompCor(), - iterfield=['realigned_file', 'extra_regressors'], - name='makecompcorrfilter') - createfilter2.inputs.components_file = 'noise_components.txt' - createfilter2.inputs.num_components = num_components - - wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors') - wf.connect(filter1, 'out_res', createfilter2, 'realigned_file') - wf.connect(registration, - ('outputspec.segmentation_files', selectindex, [0, 2]), - createfilter2, 'mask_file') - - filter2 = MapNode( - fsl.GLM(out_f_name='F.nii.gz', out_pf_name='pF.nii.gz', demean=True), - iterfield=['in_file', 'design', 'out_res_name'], - name='filter_noise_nosmooth') - wf.connect(filter1, 'out_res', filter2, 'in_file') - wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2, - 'out_res_name') - wf.connect(createfilter2, 'components_file', filter2, 'design') - wf.connect(mask, 'mask_file', filter2, 'mask') - - bandpass = Node( - Function( - input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'], - output_names=['out_files'], - function=bandpass_filter, - imports=imports), - name='bandpass_unsmooth') - bandpass.inputs.fs = 1. / TR - bandpass.inputs.highpass_freq = highpass_freq - bandpass.inputs.lowpass_freq = lowpass_freq - wf.connect(filter2, 'out_res', bandpass, 'files') - """Smooth the functional data using - :class:`nipype.interfaces.fsl.IsotropicSmooth`. - """ - - smooth = MapNode( - interface=fsl.IsotropicSmooth(), name="smooth", iterfield=["in_file"]) - smooth.inputs.fwhm = vol_fwhm - - wf.connect(bandpass, 'out_files', smooth, 'in_file') - - collector = Node(Merge(2), name='collect_streams') - wf.connect(smooth, 'out_file', collector, 'in1') - wf.connect(bandpass, 'out_files', collector, 'in2') - """ - Transform the remaining images. First to anatomical and then to target - """ - - warpall = MapNode( - ants.ApplyTransforms(), iterfield=['input_image'], name='warpall') - warpall.inputs.input_image_type = 3 - warpall.inputs.interpolation = 'Linear' - warpall.inputs.invert_transform_flags = [False, False] - warpall.terminal_output = 'file' - warpall.inputs.reference_image = target_file - warpall.inputs.args = '--float' - warpall.inputs.num_threads = 2 - warpall.plugin_args = {'sbatch_args': '-c%d' % 2} - - # transform to target - wf.connect(collector, 'out', warpall, 'input_image') - wf.connect(registration, 'outputspec.transforms', warpall, 'transforms') - - mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask') - - wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file') - - maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker') - wf.connect(warpall, 'output_image', maskts, 'in_file') - wf.connect(mask_target, 'out_file', maskts, 'mask_file') - - # map to surface - # extract aparc+aseg ROIs - # extract subcortical ROIs - # extract target space ROIs - # combine subcortical and cortical rois into a single cifti file - - ####### - # Convert aparc to subject functional space - - # Sample the average time series in aparc ROIs - sampleaparc = MapNode( - freesurfer.SegStats(default_color_table=True), - iterfield=['in_file', 'summary_file', 'avgwf_txt_file'], - name='aparc_ts') - sampleaparc.inputs.segment_id = ( - [8] + list(range(10, 14)) + [17, 18, 26, 47] + list(range(49, 55)) + - [58] + list(range(1001, 1036)) + list(range(2001, 2036))) - - wf.connect(registration, 'outputspec.aparc', sampleaparc, - 'segmentation_file') - wf.connect(collector, 'out', sampleaparc, 'in_file') - - def get_names(files, suffix): - """Generate appropriate names for output files - """ - from nipype.utils.filemanip import (split_filename, filename_to_list, - list_to_filename) - import os - out_names = [] - for filename in files: - path, name, _ = split_filename(filename) - out_names.append(os.path.join(path, name + suffix)) - return list_to_filename(out_names) - - wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc, - 'avgwf_txt_file') - wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc, - 'summary_file') - - # Sample the time series onto the surface of the target surface. Performs - # sampling into left and right hemisphere - target = Node(IdentityInterface(fields=['target_subject']), name='target') - target.iterables = ('target_subject', filename_to_list(target_subject)) - - samplerlh = MapNode( - freesurfer.SampleToSurface(), - iterfield=['source_file'], - name='sampler_lh') - samplerlh.inputs.sampling_method = "average" - samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1) - samplerlh.inputs.sampling_units = "frac" - samplerlh.inputs.interp_method = "trilinear" - samplerlh.inputs.smooth_surf = surf_fwhm - # samplerlh.inputs.cortex_mask = True - samplerlh.inputs.out_type = 'niigz' - samplerlh.inputs.subjects_dir = subjects_dir - - samplerrh = samplerlh.clone('sampler_rh') - - samplerlh.inputs.hemi = 'lh' - wf.connect(collector, 'out', samplerlh, 'source_file') - wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file') - wf.connect(target, 'target_subject', samplerlh, 'target_subject') - - samplerrh.set_input('hemi', 'rh') - wf.connect(collector, 'out', samplerrh, 'source_file') - wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file') - wf.connect(target, 'target_subject', samplerrh, 'target_subject') - - # Combine left and right hemisphere to text file - combiner = MapNode( - Function( - input_names=['left', 'right'], - output_names=['out_file'], - function=combine_hemi, - imports=imports), - iterfield=['left', 'right'], - name="combiner") - wf.connect(samplerlh, 'out_file', combiner, 'left') - wf.connect(samplerrh, 'out_file', combiner, 'right') - - # Sample the time series file for each subcortical roi - ts2txt = MapNode( - Function( - input_names=['timeseries_file', 'label_file', 'indices'], - output_names=['out_file'], - function=extract_subrois, - imports=imports), - iterfield=['timeseries_file'], - name='getsubcortts') - ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\ - list(range(49, 55)) + [58] - ts2txt.inputs.label_file = \ - os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_' - '2mm_v2.nii.gz')) - wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file') - - ###### - - substitutions = [ - ('_target_subject_', ''), - ('_filtermotart_cleaned_bp_trans_masked', ''), - ('_filtermotart_cleaned_bp', ''), - ] - substitutions += [("_smooth%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_ts_masker%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_getsubcortts%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_combiner%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_filtermotion%d" % i, "") for i in range(11)[::-1]] - substitutions += [("_filter_noise_nosmooth%d" % i, "") - for i in range(11)[::-1]] - substitutions += [("_makecompcorfilter%d" % i, "") - for i in range(11)[::-1]] - substitutions += [("_get_aparc_tsnr%d/" % i, "run%d_" % (i + 1)) - for i in range(11)[::-1]] - - substitutions += [("T1_out_brain_pve_0_maths_warped", "compcor_csf"), - ("T1_out_brain_pve_1_maths_warped", - "compcor_gm"), ("T1_out_brain_pve_2_maths_warped", - "compcor_wm"), - ("output_warped_image_maths", - "target_brain_mask"), ("median_brain_mask", - "native_brain_mask"), ("corr_", - "")] - - regex_subs = [ - ('_combiner.*/sar', '/smooth/'), - ('_combiner.*/ar', '/unsmooth/'), - ('_aparc_ts.*/sar', '/smooth/'), - ('_aparc_ts.*/ar', '/unsmooth/'), - ('_getsubcortts.*/sar', '/smooth/'), - ('_getsubcortts.*/ar', '/unsmooth/'), - ('series/sar', 'series/smooth/'), - ('series/ar', 'series/unsmooth/'), - ('_inverse_transform./', ''), - ] - # Save the relevant data into an output directory - datasink = Node(interface=DataSink(), name="datasink") - datasink.inputs.base_directory = sink_directory - datasink.inputs.container = subject_id - datasink.inputs.substitutions = substitutions - datasink.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2') - wf.connect(realign, 'par_file', datasink, 'resting.qa.motion') - wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm') - wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity') - wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files') - wf.connect(registration, 'outputspec.segmentation_files', datasink, - 'resting.mask_files') - wf.connect(registration, 'outputspec.anat2target', datasink, - 'resting.qa.ants') - wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask') - wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target') - wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F') - wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF') - wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps') - wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p') - wf.connect(registration, 'outputspec.min_cost_file', datasink, - 'resting.qa.mincost') - wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr.@map') - wf.connect([(get_roi_tsnr, datasink, - [('avgwf_txt_file', 'resting.qa.tsnr'), - ('summary_file', 'resting.qa.tsnr.@summary')])]) - - wf.connect(bandpass, 'out_files', datasink, - 'resting.timeseries.@bandpassed') - wf.connect(smooth, 'out_file', datasink, 'resting.timeseries.@smoothed') - wf.connect(createfilter1, 'out_files', datasink, - 'resting.regress.@regressors') - wf.connect(createfilter2, 'components_file', datasink, - 'resting.regress.@compcorr') - wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target') - wf.connect(sampleaparc, 'summary_file', datasink, - 'resting.parcellations.aparc') - wf.connect(sampleaparc, 'avgwf_txt_file', datasink, - 'resting.parcellations.aparc.@avgwf') - wf.connect(ts2txt, 'out_file', datasink, - 'resting.parcellations.grayo.@subcortical') - - datasink2 = Node(interface=DataSink(), name="datasink2") - datasink2.inputs.base_directory = sink_directory - datasink2.inputs.container = subject_id - datasink2.inputs.substitutions = substitutions - datasink2.inputs.regexp_substitutions = regex_subs # (r'(/_.*(\d+/))', r'/run\2') - wf.connect(combiner, 'out_file', datasink2, - 'resting.parcellations.grayo.@surface') - return wf - - -""" -Creates the full workflow including getting information from dicom files -""" - - -def create_resting_workflow(args, name=None): - TR = args.TR - slice_times = args.slice_times - if args.dicom_file: - TR, slice_times, slice_thickness = get_info(args.dicom_file) - slice_times = (np.array(slice_times) / 1000.).tolist() - - if name is None: - name = 'resting_' + args.subject_id - kwargs = dict( - files=[os.path.abspath(filename) for filename in args.files], - target_file=os.path.abspath(args.target_file), - subject_id=args.subject_id, - TR=TR, - slice_times=slice_times, - vol_fwhm=args.vol_fwhm, - surf_fwhm=args.surf_fwhm, - norm_threshold=2., - subjects_dir=os.path.abspath(args.fsdir), - target_subject=args.target_surfs, - lowpass_freq=args.lowpass_freq, - highpass_freq=args.highpass_freq, - sink_directory=os.path.abspath(args.sink), - name=name) - wf = create_workflow(**kwargs) - return wf - - -if __name__ == "__main__": - from argparse import ArgumentParser, RawTextHelpFormatter - defstr = ' (default %(default)s)' - parser = ArgumentParser( - description=__doc__, formatter_class=RawTextHelpFormatter) - parser.add_argument( - "-d", - "--dicom_file", - dest="dicom_file", - help="a SIEMENS example dicom file from the resting series") - parser.add_argument( - "-f", - "--files", - dest="files", - nargs="+", - help="4d nifti files for resting state", - required=True) - parser.add_argument( - "-t", - "--target", - dest="target_file", - help=("Target in MNI space. Best to use the MindBoggle " - "template - " - "OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"), - required=True) - parser.add_argument( - "-s", - "--subject_id", - dest="subject_id", - help="FreeSurfer subject id", - required=True) - parser.add_argument( - "--subjects_dir", - dest="fsdir", - help="FreeSurfer subject directory", - required=True) - parser.add_argument( - "--target_surfaces", - dest="target_surfs", - nargs="+", - default=['fsaverage5'], - help="FreeSurfer target surfaces" + defstr) - parser.add_argument( - "--TR", - dest="TR", - default=None, - type=float, - help="TR if dicom not provided in seconds") - parser.add_argument( - "--slice_times", - dest="slice_times", - nargs="+", - type=float, - help="Slice onset times in seconds") - parser.add_argument( - '--vol_fwhm', - default=6., - dest='vol_fwhm', - type=float, - help="Spatial FWHM" + defstr) - parser.add_argument( - '--surf_fwhm', - default=15., - dest='surf_fwhm', - type=float, - help="Spatial FWHM" + defstr) - parser.add_argument( - "-l", - "--lowpass_freq", - dest="lowpass_freq", - default=0.1, - type=float, - help="Low pass frequency (Hz)" + defstr) - parser.add_argument( - "-u", - "--highpass_freq", - dest="highpass_freq", - default=0.01, - type=float, - help="High pass frequency (Hz)" + defstr) - parser.add_argument( - "-o", - "--output_dir", - dest="sink", - help="Output directory base", - required=True) - parser.add_argument( - "-w", "--work_dir", dest="work_dir", help="Output directory base") - parser.add_argument( - "-p", - "--plugin", - dest="plugin", - default='Linear', - help="Plugin to use") - parser.add_argument( - "--plugin_args", dest="plugin_args", help="Plugin arguments") - args = parser.parse_args() - - wf = create_resting_workflow(args) - - if args.work_dir: - work_dir = os.path.abspath(args.work_dir) - else: - work_dir = os.getcwd() - - wf.base_dir = work_dir - if args.plugin_args: - wf.run(args.plugin, plugin_args=eval(args.plugin_args)) - else: - wf.run(args.plugin) diff --git a/examples/smri_ants_build_template.py b/examples/smri_ants_build_template.py deleted file mode 100644 index 53f3981428..0000000000 --- a/examples/smri_ants_build_template.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -=============================================== -sMRI: Using new ANTS for creating a T1 template -=============================================== - -In this tutorial we will use ANTS (old version aka "ANTS") based workflow to -create a template out of multiple T1 volumes. - -1. Tell python where to find the appropriate functions. -""" - -from __future__ import print_function, unicode_literals -from builtins import open -from future import standard_library -standard_library.install_aliases() - -import os -import nipype.interfaces.utility as util -import nipype.interfaces.ants as ants -import nipype.interfaces.io as io -import nipype.pipeline.engine as pe # pypeline engine - -from niflow.nipype1.workflows.smri.ants import ANTSTemplateBuildSingleIterationWF -""" -2. Download T1 volumes into home directory -""" - -import urllib.request -import urllib.error -import urllib.parse -homeDir = os.getenv("HOME") -requestedPath = os.path.join(homeDir, 'nipypeTestPath') -mydatadir = os.path.realpath(requestedPath) -if not os.path.exists(mydatadir): - os.makedirs(mydatadir) -print(mydatadir) - -MyFileURLs = [ - ('http://slicer.kitware.com/midas3/download?bitstream=13121', - '01_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13122', - '02_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13124', - '03_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13128', - '01_T1_inv_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13123', - '02_T1_inv_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13125', - '03_T1_inv_half.nii.gz'), -] -for tt in MyFileURLs: - myURL = tt[0] - localFilename = os.path.join(mydatadir, tt[1]) - if not os.path.exists(localFilename): - remotefile = urllib.request.urlopen(myURL) - - localFile = open(localFilename, 'wb') - localFile.write(remotefile.read()) - localFile.close() - print("Downloaded file: {0}".format(localFilename)) - else: - print("File previously downloaded {0}".format(localFilename)) - -input_images = [ - os.path.join(mydatadir, '01_T1_half.nii.gz'), - os.path.join(mydatadir, '02_T1_half.nii.gz'), - os.path.join(mydatadir, '03_T1_half.nii.gz') -] -input_passive_images = [{ - 'INV_T1': - os.path.join(mydatadir, '01_T1_inv_half.nii.gz') -}, { - 'INV_T1': - os.path.join(mydatadir, '02_T1_inv_half.nii.gz') -}, { - 'INV_T1': - os.path.join(mydatadir, '03_T1_inv_half.nii.gz') -}] -""" -3. Define the workflow and its working directory -""" - -tbuilder = pe.Workflow(name="ANTSTemplateBuilder") -tbuilder.base_dir = requestedPath -""" -4. Define data sources. In real life these would be replace by DataGrabbers -""" - -datasource = pe.Node( - interface=util.IdentityInterface( - fields=['imageList', 'passiveImagesDictionariesList']), - run_without_submitting=True, - name='InputImages') -datasource.inputs.imageList = input_images -datasource.inputs.passiveImagesDictionariesList = input_passive_images -datasource.inputs.sort_filelist = True -""" -5. Template is initialized by a simple average -""" - -initAvg = pe.Node(interface=ants.AverageImages(), name='initAvg') -initAvg.inputs.dimension = 3 -initAvg.inputs.normalize = True - -tbuilder.connect(datasource, "imageList", initAvg, "images") -""" -6. Define the first iteration of template building -""" - -buildTemplateIteration1 = ANTSTemplateBuildSingleIterationWF('iteration01') -tbuilder.connect(initAvg, 'output_average_image', buildTemplateIteration1, - 'inputspec.fixed_image') -tbuilder.connect(datasource, 'imageList', buildTemplateIteration1, - 'inputspec.images') -tbuilder.connect(datasource, 'passiveImagesDictionariesList', - buildTemplateIteration1, - 'inputspec.ListOfPassiveImagesDictionaries') -""" -7. Define the second iteration of template building -""" - -buildTemplateIteration2 = ANTSTemplateBuildSingleIterationWF('iteration02') -tbuilder.connect(buildTemplateIteration1, 'outputspec.template', - buildTemplateIteration2, 'inputspec.fixed_image') -tbuilder.connect(datasource, 'imageList', buildTemplateIteration2, - 'inputspec.images') -tbuilder.connect(datasource, 'passiveImagesDictionariesList', - buildTemplateIteration2, - 'inputspec.ListOfPassiveImagesDictionaries') -""" -8. Move selected files to a designated results folder -""" - -datasink = pe.Node(io.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.join(requestedPath, "results") - -tbuilder.connect(buildTemplateIteration2, 'outputspec.template', datasink, - 'PrimaryTemplate') -tbuilder.connect(buildTemplateIteration2, - 'outputspec.passive_deformed_templates', datasink, - 'PassiveTemplate') -tbuilder.connect(initAvg, 'output_average_image', datasink, - 'PreRegisterAverage') -""" -8. Run the workflow -""" - -tbuilder.run() diff --git a/examples/smri_ants_registration.py b/examples/smri_ants_registration.py deleted file mode 100644 index e7050b05b7..0000000000 --- a/examples/smri_ants_registration.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -================================== -sMRI: Using ANTS for registration -================================== - -In this simple tutorial we will use the Registration interface from ANTS to -coregister two T1 volumes. - -1. Tell python where to find the appropriate functions. - -""" - -from __future__ import print_function, unicode_literals -from builtins import open - -from future import standard_library -standard_library.install_aliases() - -import os -import urllib.request -import urllib.error -import urllib.parse -from nipype.interfaces.ants import Registration -from nipype.testing import example_data -""" -2. Download T1 volumes into home directory - -""" - -homeDir = os.getenv("HOME") -requestedPath = os.path.join(homeDir, 'nipypeTestPath') -mydatadir = os.path.realpath(requestedPath) -if not os.path.exists(mydatadir): - os.makedirs(mydatadir) -print(mydatadir) - -MyFileURLs = [ - ('http://slicer.kitware.com/midas3/download?bitstream=13121', - '01_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13122', - '02_T1_half.nii.gz'), -] -for tt in MyFileURLs: - myURL = tt[0] - localFilename = os.path.join(mydatadir, tt[1]) - if not os.path.exists(localFilename): - remotefile = urllib.request.urlopen(myURL) - - localFile = open(localFilename, 'wb') - localFile.write(remotefile.read()) - localFile.close() - print("Downloaded file: {0}".format(localFilename)) - else: - print("File previously downloaded {0}".format(localFilename)) - -input_images = [ - os.path.join(mydatadir, '01_T1_half.nii.gz'), - os.path.join(mydatadir, '02_T1_half.nii.gz'), -] -""" -3. Define the parameters of the registration. Settings are -found in the file ``smri_ants_registration_settings.json`` -distributed with the ``example_data`` of `nipype`. - -""" - -reg = Registration( - from_file=example_data('smri_ants_registration_settings.json')) -reg.inputs.fixed_image = input_images[0] -reg.inputs.moving_image = input_images[1] -""" -Alternatively to the use of the ``from_file`` feature to load ANTs settings, -the user can manually set all those inputs instead:: - - reg.inputs.output_transform_prefix = 'thisTransform' - reg.inputs.output_warped_image = 'INTERNAL_WARPED.nii.gz' - reg.inputs.output_transform_prefix = "output_" - reg.inputs.transforms = ['Translation', 'Rigid', 'Affine', 'SyN'] - reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.1,), (0.2, 3.0, 0.0)] - reg.inputs.number_of_iterations = ([[10000, 111110, 11110]] * 3 + - [[100, 50, 30]]) - reg.inputs.dimension = 3 - reg.inputs.write_composite_transform = True - reg.inputs.collapse_output_transforms = False - reg.inputs.metric = ['Mattes'] * 3 + [['Mattes', 'CC']] - reg.inputs.metric_weight = [1] * 3 + [[0.5, 0.5]] - reg.inputs.radius_or_number_of_bins = [32] * 3 + [[32, 4]] - reg.inputs.sampling_strategy = ['Regular'] * 3 + [[None, None]] - reg.inputs.sampling_percentage = [0.3] * 3 + [[None, None]] - reg.inputs.convergence_threshold = [1.e-8] * 3 + [-0.01] - reg.inputs.convergence_window_size = [20] * 3 + [5] - reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 3 + [[1, 0.5, 0]] - reg.inputs.sigma_units = ['vox'] * 4 - reg.inputs.shrink_factors = [[6, 4, 2]] + [[3, 2, 1]] * 2 + [[4, 2, 1]] - reg.inputs.use_estimate_learning_rate_once = [True] * 4 - reg.inputs.use_histogram_matching = [False] * 3 + [True] - reg.inputs.initial_moving_transform_com = True - -""" - -print(reg.cmdline) -""" -3. Run the registration -""" - -reg.run() diff --git a/examples/smri_antsregistration_build_template.py b/examples/smri_antsregistration_build_template.py deleted file mode 100644 index e84fc5b509..0000000000 --- a/examples/smri_antsregistration_build_template.py +++ /dev/null @@ -1,222 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -====================================================== -sMRI: Using new ANTS for creating a T1 template (ITK4) -====================================================== - -In this tutorial we will use ANTS (new ITK4 version aka "antsRegistration") based workflow to -create a template out of multiple T1 volumes. We will also showcase how to fine tune SGE jobs requirements. - -1. Tell python where to find the appropriate functions. -""" - -from __future__ import print_function -from future import standard_library -standard_library.install_aliases() - -import os -import nipype.interfaces.utility as util -import nipype.interfaces.ants as ants -import nipype.interfaces.io as io -import nipype.pipeline.engine as pe # pypeline engine - -from niflow.nipype1.workflows.smri.ants import antsRegistrationTemplateBuildSingleIterationWF -""" -2. Download T1 volumes into home directory -""" - -import urllib.request -import urllib.error -import urllib.parse -homeDir = os.getenv("HOME") -requestedPath = os.path.join(homeDir, 'nipypeTestPath') -mydatadir = os.path.realpath(requestedPath) -if not os.path.exists(mydatadir): - os.makedirs(mydatadir) -print(mydatadir) - -MyFileURLs = [ - ('http://slicer.kitware.com/midas3/download?bitstream=13121', - '01_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13122', - '02_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13124', - '03_T1_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13128', - '01_T1_inv_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13123', - '02_T1_inv_half.nii.gz'), - ('http://slicer.kitware.com/midas3/download?bitstream=13125', - '03_T1_inv_half.nii.gz'), -] -for tt in MyFileURLs: - myURL = tt[0] - localFilename = os.path.join(mydatadir, tt[1]) - if not os.path.exists(localFilename): - remotefile = urllib.request.urlopen(myURL) - - localFile = open(localFilename, 'wb') - localFile.write(remotefile.read()) - localFile.close() - print("Downloaded file: {0}".format(localFilename)) - else: - print("File previously downloaded {0}".format(localFilename)) -""" -ListOfImagesDictionaries - a list of dictionaries where each dictionary is -for one scan session, and the mappings in the dictionary are for all the -co-aligned images for that one scan session -""" - -ListOfImagesDictionaries = [{ - 'T1': - os.path.join(mydatadir, '01_T1_half.nii.gz'), - 'INV_T1': - os.path.join(mydatadir, '01_T1_inv_half.nii.gz'), - 'LABEL_MAP': - os.path.join(mydatadir, '01_T1_inv_half.nii.gz') -}, { - 'T1': - os.path.join(mydatadir, '02_T1_half.nii.gz'), - 'INV_T1': - os.path.join(mydatadir, '02_T1_inv_half.nii.gz'), - 'LABEL_MAP': - os.path.join(mydatadir, '02_T1_inv_half.nii.gz') -}, { - 'T1': - os.path.join(mydatadir, '03_T1_half.nii.gz'), - 'INV_T1': - os.path.join(mydatadir, '03_T1_inv_half.nii.gz'), - 'LABEL_MAP': - os.path.join(mydatadir, '03_T1_inv_half.nii.gz') -}] -input_passive_images = [{ - 'INV_T1': - os.path.join(mydatadir, '01_T1_inv_half.nii.gz') -}, { - 'INV_T1': - os.path.join(mydatadir, '02_T1_inv_half.nii.gz') -}, { - 'INV_T1': - os.path.join(mydatadir, '03_T1_inv_half.nii.gz') -}] -""" -registrationImageTypes - A list of the image types to be used actively during -the estimation process of registration, any image type not in this list -will be passively resampled with the estimated transforms. -['T1','T2'] -""" - -registrationImageTypes = ['T1'] -""" -interpolationMap - A map of image types to interpolation modes. If an -image type is not listed, it will be linearly interpolated. -{ 'labelmap':'NearestNeighbor', 'FLAIR':'WindowedSinc' } -""" - -interpolationMapping = { - 'INV_T1': 'LanczosWindowedSinc', - 'LABEL_MAP': 'NearestNeighbor', - 'T1': 'Linear' -} -""" -3. Define the workflow and its working directory -""" - -tbuilder = pe.Workflow(name="antsRegistrationTemplateBuilder") -tbuilder.base_dir = requestedPath -""" -4. Define data sources. In real life these would be replace by DataGrabbers -""" - -InitialTemplateInputs = [mdict['T1'] for mdict in ListOfImagesDictionaries] - -datasource = pe.Node( - interface=util.IdentityInterface(fields=[ - 'InitialTemplateInputs', 'ListOfImagesDictionaries', - 'registrationImageTypes', 'interpolationMapping' - ]), - run_without_submitting=True, - name='InputImages') -datasource.inputs.InitialTemplateInputs = InitialTemplateInputs -datasource.inputs.ListOfImagesDictionaries = ListOfImagesDictionaries -datasource.inputs.registrationImageTypes = registrationImageTypes -datasource.inputs.interpolationMapping = interpolationMapping -datasource.inputs.sort_filelist = True -""" -5. Template is initialized by a simple average in this simple example, - any reference image could be used (i.e. a previously created template) -""" - -initAvg = pe.Node(interface=ants.AverageImages(), name='initAvg') -initAvg.inputs.dimension = 3 -initAvg.inputs.normalize = True - -tbuilder.connect(datasource, "InitialTemplateInputs", initAvg, "images") -""" -6. Define the first iteration of template building -""" - -buildTemplateIteration1 = antsRegistrationTemplateBuildSingleIterationWF( - 'iteration01') -""" -Here we are fine tuning parameters of the SGE job (memory limit, numebr of cores etc.) -""" - -BeginANTS = buildTemplateIteration1.get_node("BeginANTS") -BeginANTS.plugin_args = { - 'qsub_args': - '-S /bin/bash -pe smp1 8-12 -l mem_free=6000M -o /dev/null -e /dev/null queue_name', - 'overwrite': - True -} - -tbuilder.connect(initAvg, 'output_average_image', buildTemplateIteration1, - 'inputspec.fixed_image') -tbuilder.connect(datasource, 'ListOfImagesDictionaries', - buildTemplateIteration1, 'inputspec.ListOfImagesDictionaries') -tbuilder.connect(datasource, 'registrationImageTypes', buildTemplateIteration1, - 'inputspec.registrationImageTypes') -tbuilder.connect(datasource, 'interpolationMapping', buildTemplateIteration1, - 'inputspec.interpolationMapping') -""" -7. Define the second iteration of template building -""" - -buildTemplateIteration2 = antsRegistrationTemplateBuildSingleIterationWF( - 'iteration02') -BeginANTS = buildTemplateIteration2.get_node("BeginANTS") -BeginANTS.plugin_args = { - 'qsub_args': - '-S /bin/bash -pe smp1 8-12 -l mem_free=6000M -o /dev/null -e /dev/null queue_name', - 'overwrite': - True -} -tbuilder.connect(buildTemplateIteration1, 'outputspec.template', - buildTemplateIteration2, 'inputspec.fixed_image') -tbuilder.connect(datasource, 'ListOfImagesDictionaries', - buildTemplateIteration2, 'inputspec.ListOfImagesDictionaries') -tbuilder.connect(datasource, 'registrationImageTypes', buildTemplateIteration2, - 'inputspec.registrationImageTypes') -tbuilder.connect(datasource, 'interpolationMapping', buildTemplateIteration2, - 'inputspec.interpolationMapping') -""" -8. Move selected files to a designated results folder -""" - -datasink = pe.Node(io.DataSink(), name="datasink") -datasink.inputs.base_directory = os.path.join(requestedPath, "results") - -tbuilder.connect(buildTemplateIteration2, 'outputspec.template', datasink, - 'PrimaryTemplate') -tbuilder.connect(buildTemplateIteration2, - 'outputspec.passive_deformed_templates', datasink, - 'PassiveTemplate') -tbuilder.connect(initAvg, 'output_average_image', datasink, - 'PreRegisterAverage') -""" -9. Run the workflow -""" - -tbuilder.run(plugin="SGE") diff --git a/examples/smri_cbs_skullstripping.py b/examples/smri_cbs_skullstripping.py deleted file mode 100644 index 1471496576..0000000000 --- a/examples/smri_cbs_skullstripping.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -""" -======================================== -sMRI: USing CBS Tools for skullstripping -======================================== - -This simple workflow uses SPECTRE2010 algorithm to skullstrip an MP2RAGE -anatomical scan. -""" - -import nipype.pipeline.engine as pe -from nipype.interfaces.mipav.developer import (JistIntensityMp2rageMasking, - MedicAlgorithmSPECTRE2010) - -wf = pe.Workflow("skullstripping") - -mask = pe.Node(JistIntensityMp2rageMasking(), name="masking") -folder_path = '/Users/filo/7t_trt/niftis/sub001/session_1/' -mask.inputs.inSecond = folder_path + "MP2RAGE_INV2.nii.gz" -mask.inputs.inQuantitative = folder_path + "MP2RAGE_UNI.nii.gz" -mask.inputs.inT1weighted = folder_path + "MP2RAGE_T1.nii.gz" -mask.inputs.outMasked = True -mask.inputs.outMasked2 = True -mask.inputs.outSignal = True -mask.inputs.outSignal2 = True - -skullstrip = pe.Node(MedicAlgorithmSPECTRE2010(), name="skullstrip") -skullstrip.inputs.outStripped = True -skullstrip.inputs.xDefaultMem = 6000 - -wf.connect(mask, 'outMasked', skullstrip, 'inInput') -wf.run() diff --git a/examples/smri_freesurfer.py b/examples/smri_freesurfer.py deleted file mode 100644 index d365b44dd5..0000000000 --- a/examples/smri_freesurfer.py +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python -""" -================ -sMRI: FreeSurfer -================ - -This script, smri_freesurfer.py, demonstrates the ability to call reconall on -a set of subjects and then make an average subject:: - - python smri_freesurfer.py - -Import necessary modules from nipype. -""" - -import os - -import nipype.pipeline.engine as pe -import nipype.interfaces.io as nio -from nipype.interfaces.freesurfer.preprocess import ReconAll -from nipype.interfaces.freesurfer.utils import MakeAverageSubject - -subject_list = ['s1', 's3'] -data_dir = os.path.abspath('data') -subjects_dir = os.path.abspath('amri_freesurfer_tutorial/subjects_dir') - -wf = pe.Workflow(name="l1workflow") -wf.base_dir = os.path.abspath('amri_freesurfer_tutorial/workdir') -""" -Grab data -""" - -datasource = pe.MapNode( - interface=nio.DataGrabber(infields=['subject_id'], outfields=['struct']), - name='datasource', - iterfield=['subject_id']) -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']]) -datasource.inputs.subject_id = subject_list -datasource.inputs.sort_filelist = True -""" -Run recon-all -""" - -recon_all = pe.MapNode( - interface=ReconAll(), - name='recon_all', - iterfield=['subject_id', 'T1_files']) -recon_all.inputs.subject_id = subject_list -if not os.path.exists(subjects_dir): - os.mkdir(subjects_dir) -recon_all.inputs.subjects_dir = subjects_dir - -wf.connect(datasource, 'struct', recon_all, 'T1_files') -""" -Make average subject -""" - -average = pe.Node(interface=MakeAverageSubject(), name="average") -average.inputs.subjects_dir = subjects_dir - -wf.connect(recon_all, 'subject_id', average, 'subjects_ids') - -wf.run("MultiProc", plugin_args={'n_procs': 4}) diff --git a/examples/smri_fsreconall.py b/examples/smri_fsreconall.py deleted file mode 100644 index 16d0b4c9f3..0000000000 --- a/examples/smri_fsreconall.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python -""" -================ -sMRI: FSReconAll -================ - -This script, smri_fsreconall.py, demonstrates the ability to use the -create_reconall_workflow function to create a workflow and then run it on a -set of subjects and then make an average subject:: - - python smri_fsreconall.py - -For an example on how to call FreeSurfer's reconall script in Nipype -see smri_freesurfer.py. - -Import necessary modules from nipype. -""" - -import os - -import nipype.pipeline.engine as pe -import nipype.interfaces.io as nio -from niflow.nipype1.workflows.smri.freesurfer import create_reconall_workflow -from nipype.interfaces.freesurfer.utils import MakeAverageSubject -from nipype.interfaces.utility import IdentityInterface -""" -Assign the tutorial directory -""" - -tutorial_dir = os.path.abspath('smri_fsreconall_tutorial') -if not os.path.isdir(tutorial_dir): - os.mkdir(tutorial_dir) -""" -Define the workflow directories -""" - -subject_list = ['s1', 's3'] -data_dir = os.path.abspath('data') -subjects_dir = os.path.join(tutorial_dir, 'subjects_dir') -if not os.path.exists(subjects_dir): - os.mkdir(subjects_dir) - -wf = pe.Workflow(name="l1workflow") -wf.base_dir = os.path.join(tutorial_dir, 'workdir') -""" -Create inputspec -""" - -inputspec = pe.Node( - interface=IdentityInterface(['subject_id']), name="inputspec") -inputspec.iterables = ("subject_id", subject_list) -""" -Grab data -""" - -datasource = pe.Node( - interface=nio.DataGrabber(infields=['subject_id'], outfields=['struct']), - name='datasource') -datasource.inputs.base_directory = data_dir -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']]) -datasource.inputs.subject_id = subject_list -datasource.inputs.sort_filelist = True - -wf.connect(inputspec, 'subject_id', datasource, 'subject_id') -""" -Run recon-all -""" - -recon_all = create_reconall_workflow() -recon_all.inputs.inputspec.subjects_dir = subjects_dir - -wf.connect(datasource, 'struct', recon_all, 'inputspec.T1_files') -wf.connect(inputspec, 'subject_id', recon_all, 'inputspec.subject_id') -""" -Make average subject -""" - -average = pe.JoinNode( - interface=MakeAverageSubject(), - joinsource="inputspec", - joinfield="subjects_ids", - name="average") -average.inputs.subjects_dir = subjects_dir - -wf.connect(recon_all, 'postdatasink_outputspec.subject_id', average, - 'subjects_ids') - -wf.run("MultiProc", plugin_args={'n_procs': 4}) diff --git a/examples/tessellation_tutorial.py b/examples/tessellation_tutorial.py deleted file mode 100644 index 58bae095cc..0000000000 --- a/examples/tessellation_tutorial.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python -""" -================================================= -sMRI: Regional Tessellation and Surface Smoothing -================================================= - -Introduction -============ - -This script, tessellation_tutorial.py, demonstrates the use of create_tessellation_flow from niflow.nipype1.workflows.smri.freesurfer, and it can be run with:: - - python tessellation_tutorial.py - -This example requires that the user has Freesurfer installed, and that the Freesurfer directory for 'fsaverage' is present. - -.. seealso:: - - ConnectomeViewer - The Connectome Viewer connects Multi-Modal Multi-Scale Neuroimaging and Network Datasets For Analysis and Visualization in Python. - - http://www.geuz.org/gmsh/ - Gmsh: a three-dimensional finite element mesh generator with built-in pre- and post-processing facilities - - http://www.blender.org/ - Blender is the free open source 3D content creation suite, available for all major operating systems under the GNU General Public License. - -.. warning:: - - This workflow will take several hours to finish entirely, since smoothing the larger cortical surfaces is very time consuming. - -Packages and Data Setup -======================= - -Import the necessary modules and workflow from nipype. -""" - -import nipype.pipeline.engine as pe # pypeline engine -import nipype.interfaces.cmtk as cmtk -import nipype.interfaces.io as nio # Data i/o -import os -import os.path as op -from niflow.nipype1.workflows.smri.freesurfer import create_tessellation_flow -""" -Directories -=========== - -Set the default directory and lookup table (LUT) paths -""" - -fs_dir = os.environ['FREESURFER_HOME'] -lookup_file = op.join(fs_dir, 'FreeSurferColorLUT.txt') -subjects_dir = op.join(fs_dir, 'subjects/') -output_dir = './tessellate_tutorial' -""" -Inputs -====== - -Create the tessellation workflow and set inputs -Here we will choose Gifti (gii) as the output format, because -we want to able to view the surface in ConnectomeViewer. - -In you intend to view the meshes in gmsh or Blender, you should change -the workflow creation to use stereolithographic (stl) format. -""" - -tessflow = create_tessellation_flow(name='tessflow', out_format='gii') -tessflow.inputs.inputspec.subject_id = 'fsaverage' -tessflow.inputs.inputspec.subjects_dir = subjects_dir -tessflow.inputs.inputspec.lookup_file = lookup_file -""" -We also create a conditional node to package the surfaces for ConnectomeViewer. -Simply set cff to "False" to ignore this step. -""" - -cff = True -if cff: - cff = pe.Node(interface=cmtk.CFFConverter(), name='cff') - cff.inputs.out_file = 'Meshes.cff' -""" -Outputs -======= - -Create a datasink to organize the smoothed meshes -Using regular-expression substitutions we can remove the extraneous folders generated by the mapnode. -""" - -datasink = pe.Node(interface=nio.DataSink(), name="datasink") -datasink.inputs.base_directory = 'meshes' -datasink.inputs.regexp_substitutions = [('_smoother[\d]*/', '')] -""" -Execution -========= - -Finally, create and run another pipeline that connects the workflow and datasink -""" - -tesspipe = pe.Workflow(name='tessellate_tutorial') -tesspipe.base_dir = output_dir -tesspipe.connect([(tessflow, datasink, [('outputspec.meshes', - '@meshes.all')])]) -""" -If the surfaces are to be packaged, this will connect the CFFConverter -node to the tessellation and smoothing workflow, as well as to the datasink. -""" - -if cff: - tesspipe.connect([(tessflow, cff, [('outputspec.meshes', - 'gifti_surfaces')])]) - tesspipe.connect([(cff, datasink, [('connectome_file', '@cff')])]) - -tesspipe.run() diff --git a/examples/test_spm.py b/examples/test_spm.py deleted file mode 100644 index 4c31f144ed..0000000000 --- a/examples/test_spm.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import division -from builtins import range -import nipype.pipeline.engine as pe -from nipype.interfaces import spm -from nipype.interfaces import fsl -from nipype.interfaces import utility as niu -from nipype.interfaces import io as nio -from nipype.algorithms.misc import Gunzip - - -def _get_first(inlist): - if isinstance(inlist, (list, tuple)): - return inlist[0] - return inlist - - -def test_spm(name='test_spm_3d'): - """ - A simple workflow to test SPM's installation. By default will split the 4D volume in - time-steps. - """ - workflow = pe.Workflow(name=name) - - inputnode = pe.Node( - niu.IdentityInterface(fields=['in_data']), name='inputnode') - dgr = pe.Node( - nio.DataGrabber( - template="feeds/data/fmri.nii.gz", - outfields=['out_file'], - sort_filelist=False), - name='datasource') - - stc = pe.Node( - spm.SliceTiming( - num_slices=21, - time_repetition=1.0, - time_acquisition=2. - 2. / 32, - slice_order=list(range(21, 0, -1)), - ref_slice=10), - name='stc') - realign_estimate = pe.Node( - spm.Realign(jobtype='estimate'), name='realign_estimate') - realign_write = pe.Node(spm.Realign(jobtype='write'), name='realign_write') - realign_estwrite = pe.Node( - spm.Realign(jobtype='estwrite'), name='realign_estwrite') - smooth = pe.Node(spm.Smooth(fwhm=[6, 6, 6]), name='smooth') - - if name == 'test_spm_3d': - split = pe.Node( - fsl.Split(dimension="t", output_type="NIFTI"), name="split") - workflow.connect([(dgr, split, [(('out_file', _get_first), - 'in_file')]), - (split, stc, [("out_files", "in_files")])]) - elif name == 'test_spm_4d': - gunzip = pe.Node(Gunzip(), name="gunzip") - workflow.connect([(dgr, gunzip, [(('out_file', _get_first), - 'in_file')]), - (gunzip, stc, [("out_file", "in_files")])]) - else: - raise NotImplementedError( - 'No implementation of the test workflow \'{}\' was found'.format( - name)) - - workflow.connect([(inputnode, dgr, [('in_data', 'base_directory')]), - (stc, realign_estimate, - [('timecorrected_files', - 'in_files')]), (realign_estimate, realign_write, - [('modified_in_files', 'in_files')]), - (stc, realign_estwrite, - [('timecorrected_files', - 'in_files')]), (realign_write, smooth, - [('realigned_files', 'in_files')])]) - return workflow - - -workflow3d = test_spm() -workflow4d = test_spm(name='test_spm_4d') diff --git a/examples/workshop_dartmouth_2010.py b/examples/workshop_dartmouth_2010.py deleted file mode 100644 index 931a633c52..0000000000 --- a/examples/workshop_dartmouth_2010.py +++ /dev/null @@ -1,288 +0,0 @@ -""" -================================ -Workshop: Dartmouth College 2010 -================================ - -First lets go to the directory with the data we'll be working on and start the interactive python interpreter -(with some nipype specific configuration). Note that nipype does not need to be run through ipython - it is -just much nicer to do interactive work in it. - -.. sourcecode:: bash - - cd $TDPATH - ipython -p nipype - -For every neuroimaging procedure supported by nipype there exists a wrapper - a small piece of code managing -the underlying software (FSL, SPM, AFNI etc.). We call those interfaces. They are standarised so we can hook them up -together. Lets have a look at some of them. - -.. sourcecode:: ipython - - In [1]: import nipype.interfaces.fsl as fsl - - In [2]: fsl.BET.help() - Inputs - ------ - - Mandatory: - in_file: input file to skull strip - - Optional: - args: Additional parameters to the command - center: center of gravity in voxels - environ: Environment variables (default={}) - frac: fractional intensity threshold - functional: apply to 4D fMRI data - mutually exclusive: functional, reduce_bias - mask: create binary mask image - mesh: generate a vtk mesh brain surface - no_output: Don't generate segmented output - out_file: name of output skull stripped image - outline: create surface outline image - output_type: FSL output type - radius: head radius - reduce_bias: bias field and neck cleanup - mutually exclusive: functional, reduce_bias - skull: create skull image - threshold: apply thresholding to segmented brain image and mask - vertical_gradient: vertical gradient in fractional intensity threshold (-1, 1) - - Outputs - ------- - mask_file: path/name of binary brain mask (if generated) - meshfile: path/name of vtk mesh file (if generated) - out_file: path/name of skullstripped file - outline_file: path/name of outline file (if generated) - - In [3]: import nipype.interfaces.freesurfer as fs - - In [4]: fs.Smooth.help() - Inputs - ------ - - Mandatory: - in_file: source volume - num_iters: number of iterations instead of fwhm - mutually exclusive: surface_fwhm - reg_file: registers volume to surface anatomical - surface_fwhm: surface FWHM in mm - mutually exclusive: num_iters - requires: reg_file - - Optional: - args: Additional parameters to the command - environ: Environment variables (default={}) - proj_frac: project frac of thickness a long surface normal - mutually exclusive: proj_frac_avg - proj_frac_avg: average a long normal min max delta - mutually exclusive: proj_frac - smoothed_file: output volume - subjects_dir: subjects directory - vol_fwhm: volumesmoothing outside of surface - - Outputs - ------- - args: Additional parameters to the command - environ: Environment variables - smoothed_file: smoothed input volume - subjects_dir: subjects directory - -You can read about all of the interfaces implemented in nipype at our online documentation at http://nipy.sourceforge.net/nipype/documentation.html#documentation . -Check it out now. - -Using interfaces ----------------- - -Having interfaces allows us to use third party software (like FSL BET) as function. Look how simple it is. -""" - -from __future__ import print_function -from builtins import str - -import nipype.interfaces.fsl as fsl -result = fsl.BET(in_file='data/s1/struct.nii').run() -print(result) -""" -Running a single program is not much of a breakthrough. Lets run motion correction followed by smoothing -(isotropic - in other words not using SUSAN). Notice that in the first line we are setting the output data type -for all FSL interfaces. -""" - -fsl.FSLCommand.set_default_output_type('NIFTI_GZ') -result1 = fsl.MCFLIRT(in_file='data/s1/f3.nii').run() -result2 = fsl.Smooth(in_file='f3_mcf.nii.gz', fwhm=6).run() -""" -Simple workflow ---------------- - -In the previous example we knew that fsl.MCFLIRT will produce a file called f3_mcf.nii.gz and we have hard coded -this as an input to fsl.Smooth. This is quite limited, but luckily nipype supports joining interfaces in pipelines. -This way output of one interface will be used as an input of another without having to hard code anything. Before -connecting Interfaces we need to put them into (separate) Nodes and give them unique names. This way every interface will -process data in a separate folder. -""" - -import nipype.pipeline.engine as pe -import os - -motion_correct = pe.Node( - interface=fsl.MCFLIRT(in_file=os.path.abspath('data/s1/f3.nii')), - name="motion_correct") -smooth = pe.Node(interface=fsl.Smooth(fwhm=6), name="smooth") - -motion_correct_and_smooth = pe.Workflow(name="motion_correct_and_smooth") -motion_correct_and_smooth.base_dir = os.path.abspath( - '.') # define where will be the root folder for the workflow -motion_correct_and_smooth.connect([(motion_correct, smooth, [('out_file', - 'in_file')])]) -# we are connecting 'out_file' output of motion_correct to 'in_file' input of smooth -motion_correct_and_smooth.run() -""" -Another workflow ----------------- - -Another example of a simple workflow (calculate the mean of fMRI signal and subtract it). -This time we'll be assigning inputs after defining the workflow. -""" - -calc_mean = pe.Node(interface=fsl.ImageMaths(), name="calc_mean") -calc_mean.inputs.op_string = "-Tmean" -subtract = pe.Node(interface=fsl.ImageMaths(), name="subtract") -subtract.inputs.op_string = "-sub" - -demean = pe.Workflow(name="demean") -demean.base_dir = os.path.abspath('.') -demean.connect([(calc_mean, subtract, [('out_file', 'in_file2')])]) - -demean.inputs.calc_mean.in_file = os.path.abspath('data/s1/f3.nii') -demean.inputs.subtract.in_file = os.path.abspath('data/s1/f3.nii') -demean.run() -""" -Reusing workflows ------------------ - -The beauty of the workflows is that they are reusable. We can just import a workflow made by someone -else and feed it with our data. -""" - -from fmri_fsl import preproc -preproc.base_dir = os.path.abspath('.') -preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii') -preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii') -preproc.run() -""" -... and we can run it again and it won't actually rerun anything because none of -the parameters have changed. -""" - -preproc.run() -""" -... and we can change a parameter and run it again. Only the dependent nodes -are rerun and that too only if the input state has changed. -""" - -preproc.inputs.meanfuncmask.frac = 0.5 -preproc.run() -""" -Visualizing workflows 1 ------------------------ - -So what did we run in this precanned workflow -""" - -preproc.write_graph() -""" -Datasink --------- - -Datasink is a special interface for copying and arranging results. -""" - -import nipype.interfaces.io as nio - -preproc.inputs.inputspec.func = os.path.abspath('data/s1/f3.nii') -preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii') -datasink = pe.Node(interface=nio.DataSink(), name='sinker') -preprocess = pe.Workflow(name='preprocout') -preprocess.base_dir = os.path.abspath('.') -preprocess.connect([(preproc, datasink, [('meanfunc2.out_file', 'meanfunc'), - ('maskfunc3.out_file', 'funcruns')])]) -preprocess.run() -""" -Datagrabber ------------ - -Datagrabber is (surprise, surprise) an interface for collecting files from hard drive. It is very flexible and -supports almost any file organisation of your data you can imagine. -""" - -datasource1 = nio.DataGrabber() -datasource1.inputs.template = 'data/s1/f3.nii' -datasource1.inputs.sort_filelist = True -results = datasource1.run() -print(results.outputs) - -datasource2 = nio.DataGrabber() -datasource2.inputs.template = 'data/s*/f*.nii' -datasource2.inputs.sort_filelist = True -results = datasource2.run() -print(results.outputs) - -datasource3 = nio.DataGrabber(infields=['run']) -datasource3.inputs.template = 'data/s1/f%d.nii' -datasource3.inputs.sort_filelist = True -datasource3.inputs.run = [3, 7] -results = datasource3.run() -print(results.outputs) - -datasource4 = nio.DataGrabber(infields=['subject_id', 'run']) -datasource4.inputs.template = 'data/%s/f%d.nii' -datasource4.inputs.sort_filelist = True -datasource4.inputs.run = [3, 7] -datasource4.inputs.subject_id = ['s1', 's3'] -results = datasource4.run() -print(results.outputs) -""" -Iterables ---------- - -Iterables is a special field of the Node class that enables to iterate all workfloes/nodes connected to it over -some parameters. Here we'll use it to iterate over two subjects. -""" - -import nipype.interfaces.utility as util -infosource = pe.Node( - interface=util.IdentityInterface(fields=['subject_id']), name="infosource") -infosource.iterables = ('subject_id', ['s1', 's3']) - -datasource = pe.Node( - nio.DataGrabber(infields=['subject_id'], outfields=['func', 'struct']), - name="datasource") -datasource.inputs.template = '%s/%s.nii' -datasource.inputs.base_directory = os.path.abspath('data') -datasource.inputs.template_args = dict( - func=[['subject_id', 'f3']], struct=[['subject_id', 'struct']]) -datasource.inputs.sort_filelist = True - -my_workflow = pe.Workflow(name="my_workflow") -my_workflow.base_dir = os.path.abspath('.') - -my_workflow.connect([(infosource, datasource, [('subject_id', 'subject_id')]), - (datasource, preproc, [('func', 'inputspec.func'), - ('struct', 'inputspec.struct')])]) -my_workflow.run() -""" -and we can change a node attribute and run it again - -""" - -smoothnode = my_workflow.get_node('preproc.smooth') -assert (str(smoothnode) == 'preproc.smooth') -smoothnode.iterables = ('fwhm', [5., 10.]) -my_workflow.run() -""" -Visualizing workflows 2 ------------------------ - -In the case of nested workflows, we might want to look at expanded forms of the workflow. -""" From f560ac7886d9f05a93e2ee66c51a304de8201d57 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 08:58:13 -0500 Subject: [PATCH 2/7] DOC: Initial README pointing to examples niflow --- examples/README | 4 ---- examples/README.md | 4 ++++ 2 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 examples/README create mode 100644 examples/README.md diff --git a/examples/README b/examples/README deleted file mode 100644 index 3e482bb975..0000000000 --- a/examples/README +++ /dev/null @@ -1,4 +0,0 @@ -A dataset for use with these scripts can be downloaded from the nipype -website. At the time of writing, it's at: - -http://nipype.readthedocs.io/en/0.12.0/users/pipeline_tutorial.html diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000..c22dfdcc67 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,4 @@ +The examples directory previously held a set of literate programming documents that demonstrated +solutions to various problems using Nipype. + +These examples have been moved to https://github.com/niflows/nipype1-examples. From 193918b65e512efbbe7f49976ec065dceb6cbe86 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 09:00:53 -0500 Subject: [PATCH 3/7] CI: Remove example tests from CircleCI --- .circleci/config.yml | 121 ------------------ .circleci/test_fmri_fsl_feeds_linear_l1.sh | 3 - .circleci/test_fmri_fsl_reuse_linear_l1.sh | 3 - .../test_fmri_spm_dartel_multiproc_l1.sh | 3 - .../test_fmri_spm_dartel_multiproc_l2.sh | 3 - .circleci/test_fmri_spm_linear_3d.sh | 3 - .circleci/test_fmri_spm_linear_4d.sh | 3 - .../test_fmri_spm_nested_multiproc_l1.sh | 3 - .../test_fmri_spm_nested_multiproc_l2.sh | 3 - 9 files changed, 145 deletions(-) delete mode 100644 .circleci/test_fmri_fsl_feeds_linear_l1.sh delete mode 100644 .circleci/test_fmri_fsl_reuse_linear_l1.sh delete mode 100644 .circleci/test_fmri_spm_dartel_multiproc_l1.sh delete mode 100644 .circleci/test_fmri_spm_dartel_multiproc_l2.sh delete mode 100644 .circleci/test_fmri_spm_linear_3d.sh delete mode 100644 .circleci/test_fmri_spm_linear_4d.sh delete mode 100644 .circleci/test_fmri_spm_nested_multiproc_l1.sh delete mode 100644 .circleci/test_fmri_spm_nested_multiproc_l2.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index c8058c48b7..902649cd03 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -172,103 +172,6 @@ jobs: paths: - docker - test_fmri_fsl_spm: - machine: *machine_kwds - working_directory: /home/circleci/nipype - steps: - - checkout: - path: /home/circleci/nipype - - attach_workspace: - at: /tmp - - run: *set_pr_number - - run: *generate_dockerfiles - - run: *modify_nipype_version - - run: *get_base_image - - run: *build_main_image_py36 - - run: *_get_codecov - - run: *_download_test_data - - run: *prepare_working_directory - - run: - name: Run FSL reuse pipeline - no_output_timeout: 40m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_fsl_reuse_linear_l1.sh - - run: - name: Run SPM test workflow - 3D inputs - no_output_timeout: 40m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_linear_3d.sh - - run: - name: Run SPM test workflow - 4D inputs - no_output_timeout: 40m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_linear_4d.sh - - run: *_run_codecov_smoke - - store_artifacts: *store_artifacts_kwds - - test_fmri_spm_dartel_multiproc: - machine: *machine_kwds - working_directory: /home/circleci/nipype - steps: - - checkout: - path: /home/circleci/nipype - - attach_workspace: - at: /tmp - - run: *set_pr_number - - run: *generate_dockerfiles - - run: *modify_nipype_version - - run: *get_base_image - - run: *build_main_image_py36 - - run: *_get_codecov - - run: *_download_test_data - - run: *prepare_working_directory - - run: - name: Run SPM DARTEL Level 1 pipeline - no_output_timeout: 1h - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_dartel_multiproc_l1.sh - - run: - name: Run SPM DARTEL Level 2 pipeline - no_output_timeout: 30m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_dartel_multiproc_l2.sh - - run: *_run_codecov_smoke - - store_artifacts: *store_artifacts_kwds - - test_fmri_spm_nested_fsl_feeds: - machine: *machine_kwds - working_directory: /home/circleci/nipype - steps: - - checkout: - path: /home/circleci/nipype - - attach_workspace: - at: /tmp - - run: *set_pr_number - - run: *generate_dockerfiles - - run: *modify_nipype_version - - run: *get_base_image - - run: *build_main_image_py36 - - run: *_get_codecov - - run: *_download_test_data - - run: *prepare_working_directory - - run: - name: Run SPM Nested Level 1 pipeline - no_output_timeout: 1h - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_nested_multiproc_l1.sh - - run: - name: Run SPM Nested Level 2 pipeline - no_output_timeout: 30m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_spm_nested_multiproc_l2.sh - - run: - name: Run FSL FEEDS pipeline - no_output_timeout: 40m - environment: *test_environment - command: bash -ux /home/circleci/nipype/.circleci/test_fmri_fsl_feeds_linear_l1.sh - - run: *_run_codecov_smoke - - store_artifacts: *store_artifacts_kwds - deploy_dockerhub: docker: - image: docker:17.10.0-ce-git @@ -446,36 +349,12 @@ workflows: only: /.*/ requires: - compare_base_dockerfiles - - test_fmri_fsl_spm: - filters: - branches: - ignore: - - /docs?\/.*/ - requires: - - compare_base_dockerfiles - - test_fmri_spm_dartel_multiproc: - filters: - branches: - ignore: - - /docs?\/.*/ - requires: - - compare_base_dockerfiles - - test_fmri_spm_nested_fsl_feeds: - filters: - branches: - ignore: - - /docs?\/.*/ - requires: - - compare_base_dockerfiles - deploy_dockerhub: filters: branches: only: master requires: - test_pytest - - test_fmri_spm_nested_fsl_feeds - - test_fmri_fsl_spm - - test_fmri_spm_dartel_multiproc - deploy_pypi: filters: branches: diff --git a/.circleci/test_fmri_fsl_feeds_linear_l1.sh b/.circleci/test_fmri_fsl_feeds_linear_l1.sh deleted file mode 100644 index 9666829b74..0000000000 --- a/.circleci/test_fmri_fsl_feeds_linear_l1.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_fsl_feeds Linear /data/examples/ l1pipeline diff --git a/.circleci/test_fmri_fsl_reuse_linear_l1.sh b/.circleci/test_fmri_fsl_reuse_linear_l1.sh deleted file mode 100644 index 48be49d80d..0000000000 --- a/.circleci/test_fmri_fsl_reuse_linear_l1.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_fsl_reuse Linear /data/examples/ level1_workflow diff --git a/.circleci/test_fmri_spm_dartel_multiproc_l1.sh b/.circleci/test_fmri_spm_dartel_multiproc_l1.sh deleted file mode 100644 index 4208eed506..0000000000 --- a/.circleci/test_fmri_spm_dartel_multiproc_l1.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_dartel MultiProc /data/examples/ level1 diff --git a/.circleci/test_fmri_spm_dartel_multiproc_l2.sh b/.circleci/test_fmri_spm_dartel_multiproc_l2.sh deleted file mode 100644 index 86119e7654..0000000000 --- a/.circleci/test_fmri_spm_dartel_multiproc_l2.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_dartel MultiProc /data/examples/ l2pipeline diff --git a/.circleci/test_fmri_spm_linear_3d.sh b/.circleci/test_fmri_spm_linear_3d.sh deleted file mode 100644 index 27c2c92a1a..0000000000 --- a/.circleci/test_fmri_spm_linear_3d.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh test_spm Linear /data/examples/ workflow3d diff --git a/.circleci/test_fmri_spm_linear_4d.sh b/.circleci/test_fmri_spm_linear_4d.sh deleted file mode 100644 index cd255d60ca..0000000000 --- a/.circleci/test_fmri_spm_linear_4d.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh test_spm Linear /data/examples/ workflow4d diff --git a/.circleci/test_fmri_spm_nested_multiproc_l1.sh b/.circleci/test_fmri_spm_nested_multiproc_l1.sh deleted file mode 100644 index a6d2133a42..0000000000 --- a/.circleci/test_fmri_spm_nested_multiproc_l1.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ level1 diff --git a/.circleci/test_fmri_spm_nested_multiproc_l2.sh b/.circleci/test_fmri_spm_nested_multiproc_l2.sh deleted file mode 100644 index c0926be148..0000000000 --- a/.circleci/test_fmri_spm_nested_multiproc_l2.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -docker run --rm=false -t -v $WORKDIR:/work -v $HOME/examples:/data/examples:ro -w /work -e NIPYPE_NUMBER_OF_CPUS=4 -e NIPYPE_RESOURCE_MONITOR=1 "${DOCKER_IMAGE}:py36" /usr/bin/run_examples.sh fmri_spm_nested MultiProc /data/examples/ l2pipeline From 7e1a423806e5b5b566fbff89b27b74a2e8fbaa81 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 11:53:15 -0500 Subject: [PATCH 4/7] ENH: Import examples from niflow --- doc/.gitignore | 1 + doc/Makefile | 2 +- doc/conf.py | 37 ++++++++++++++++++++++++++++--------- tools/ex2rst | 2 +- 4 files changed, 31 insertions(+), 11 deletions(-) diff --git a/doc/.gitignore b/doc/.gitignore index 1f812bd420..d396f26e2d 100644 --- a/doc/.gitignore +++ b/doc/.gitignore @@ -1 +1,2 @@ /documentation.zip +_static/python diff --git a/doc/Makefile b/doc/Makefile index bcb7ac2e8f..9b3e3783ef 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -24,7 +24,7 @@ help: @echo " doctest run all doctests embedded in the documentation" clean: - -rm -rf _build/* *~ api/generated interfaces/generated users/examples documentation.zip + -rm -rf _build/* *~ api/generated interfaces/generated users/examples documentation.zip _static/python htmlonly: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html diff --git a/doc/conf.py b/doc/conf.py index 56d6935270..a75887431d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -13,18 +13,37 @@ # serve to show the default. import os +from pathlib import Path +from tempfile import TemporaryDirectory +import shutil from packaging.version import Version import nipype +import subprocess as sp -doc_path = os.path.abspath(os.path.dirname(__file__)) -os.makedirs('users/examples', exist_ok=True) -os.chdir(os.path.join(doc_path, 'users', 'examples')) -os.system("""python ../../../tools/ex2rst -x ../../../examples/test_spm.py \ ---project Nipype --outdir . ../../../examples""") -os.system("""python ../../../tools/ex2rst --project Nipype --outdir . \ -../../../examples/frontiers_paper""") -os.chdir(doc_path) +conf_py = Path(__file__) + +example_dir = conf_py.parent / 'users' / 'examples' +shutil.rmtree(example_dir, ignore_errors=True) +example_dir.mkdir(parents=True) +python_dir = conf_py.parent / "_static" / "python" +shutil.rmtree(python_dir, ignore_errors=True) + +ex2rst = str(conf_py.parent.parent / "tools" / "ex2rst") + +with TemporaryDirectory() as tmpdir: + sp.run(["git", "clone", "--depth", "1", "https://github.com/niflows/nipype1-examples.git", + tmpdir], check=True) + source_dir = Path(tmpdir) / "package" / "niflow" / "nipype1" / "examples" + shutil.copytree(source_dir, python_dir) + +sp.run(["python", ex2rst, "--project", "Nipype", "--outdir", str(example_dir), + "-x", str(python_dir / "test_spm.py"), + "-x", str(python_dir / "__init__.py"), + "-x", str(python_dir / "cli.py"), + str(python_dir)], check=True) +sp.run(["python", ex2rst, "--project", "Nipype", "--outdir", str(example_dir), + str(python_dir / "frontiers_paper")], check=True) # If extensions (or modules to document with autodoc) are in another directory, @@ -98,7 +117,7 @@ # General information about the project. project = u'nipype' -copyright = u'2009-19, Neuroimaging in Python team' +copyright = u'2009-20, Neuroimaging in Python team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/tools/ex2rst b/tools/ex2rst index dc3c6d5f37..346a725466 100755 --- a/tools/ex2rst +++ b/tools/ex2rst @@ -190,7 +190,7 @@ def exfile2rstfile(filename, opts): This same script is also included in the %s source distribution under the :file:`examples` directory. -""" % (filename, opts.project) +""" % (os.path.relpath(filename, opts.outdir), opts.project) dfile.write(msg) From 219f377e10aea533d4e797eb3ce73b22fa07831e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Fri, 21 Feb 2020 12:38:23 -0500 Subject: [PATCH 5/7] DOC: Refer to niflow instead of source distribution --- doc/conf.py | 10 +++++----- tools/ex2rst | 9 ++++++--- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index a75887431d..c5898d1cfd 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -37,13 +37,13 @@ source_dir = Path(tmpdir) / "package" / "niflow" / "nipype1" / "examples" shutil.copytree(source_dir, python_dir) -sp.run(["python", ex2rst, "--project", "Nipype", "--outdir", str(example_dir), +sp.run(["python", ex2rst, "--outdir", str(example_dir), str(python_dir), "-x", str(python_dir / "test_spm.py"), "-x", str(python_dir / "__init__.py"), - "-x", str(python_dir / "cli.py"), - str(python_dir)], check=True) -sp.run(["python", ex2rst, "--project", "Nipype", "--outdir", str(example_dir), - str(python_dir / "frontiers_paper")], check=True) + "-x", str(python_dir / "cli.py")], + check=True) +sp.run(["python", ex2rst, "--outdir", str(example_dir), str(python_dir / "frontiers_paper")], + check=True) # If extensions (or modules to document with autodoc) are in another directory, diff --git a/tools/ex2rst b/tools/ex2rst index 346a725466..82653f80e5 100755 --- a/tools/ex2rst +++ b/tools/ex2rst @@ -187,10 +187,13 @@ def exfile2rstfile(filename, opts): .. admonition:: Example source code You can download :download:`the full source code of this example <%s>`. - This same script is also included in the %s source distribution under the - :file:`examples` directory. + This same script is also included in `%s <%s>`__ under the :file:`%s` + directory. -""" % (os.path.relpath(filename, opts.outdir), opts.project) +""" % (os.path.relpath(filename, opts.outdir), + "Nipype1 Examples Niflow", + "https://github.com/niflows/nipype1-examples", + "package/niflow/nipype1/examples") dfile.write(msg) From 2e135f6866210c1d73210b83e4a81fee66510806 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 22 Feb 2020 08:16:27 -0500 Subject: [PATCH 6/7] ENH: Make run_examples.py give helpful hints --- tools/run_examples.py | 71 +++++-------------------------------------- 1 file changed, 7 insertions(+), 64 deletions(-) diff --git a/tools/run_examples.py b/tools/run_examples.py index 09a97cc2d9..20382ef74d 100644 --- a/tools/run_examples.py +++ b/tools/run_examples.py @@ -1,69 +1,12 @@ # -*- coding: utf-8 -*- -import os import sys -from shutil import rmtree -from multiprocessing import cpu_count - - -def run_examples(example, pipelines, data_path, plugin=None, rm_base_dir=True): - from nipype import config - from nipype.interfaces.base import CommandLine - - if plugin is None: - plugin = "MultiProc" - - print("running example: %s with plugin: %s" % (example, plugin)) - config.enable_debug_mode() - config.enable_provenance() - CommandLine.set_default_terminal_output("stream") - - plugin_args = {} - if plugin == "MultiProc": - plugin_args["n_procs"] = int(os.getenv("NIPYPE_NUMBER_OF_CPUS", cpu_count())) - - __import__(example) - for pipeline in pipelines: - wf = getattr(sys.modules[example], pipeline) - wf.base_dir = os.path.join(os.getcwd(), "output", example, plugin) - - results_dir = os.path.join(wf.base_dir, wf.name) - if rm_base_dir and os.path.exists(results_dir): - rmtree(results_dir) - - # Handle a logging directory - log_dir = os.path.join(os.getcwd(), "logs", example) - if not os.path.exists(log_dir): - os.makedirs(log_dir) - wf.config = { - "execution": { - "hash_method": "timestamp", - "stop_on_first_rerun": "true", - "write_provenance": "true", - "poll_sleep_duration": 2, - }, - "logging": {"log_directory": log_dir, "log_to_file": True}, - } - try: - wf.inputs.inputnode.in_data = os.path.abspath(data_path) - except AttributeError: - pass # the workflow does not have inputnode.in_data - - wf.run(plugin=plugin, plugin_args=plugin_args) - # run twice to check if nothing is rerunning - wf.run(plugin=plugin) +from textwrap import dedent if __name__ == "__main__": - path, file = os.path.split(__file__) - sys.path.insert(0, os.path.realpath(os.path.join(path, "..", "examples"))) - examples = { - "fmri_fsl_reuse": ["level1_workflow"], - "fmri_spm_nested": ["level1", "l2pipeline"], - # 'fmri_spm_dartel':['level1','l2pipeline'], - # 'fmri_fsl_feeds':['l1pipeline'] - } - example = sys.argv[1] - plugin = sys.argv[2] - data_path = sys.argv[3] - pipelines = sys.argv[4:] - run_examples(example, pipelines, data_path, plugin) + print(dedent("""Nipype examples have been moved to niflow-nipype1-examples. + +Install with: pip install niflow-nipype1-examples""")) + if sys.argv[1:]: + print("Run this command with: niflow-nipype1-examples " + " ".join(sys.argv[1:])) + sys.exit(1) From 22cbbb8320c29c83a1dbfba11b5e6dd3b75c55f6 Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Sat, 22 Feb 2020 08:18:07 -0500 Subject: [PATCH 7/7] DOC: Add links to examples/README --- examples/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/README.md b/examples/README.md index c22dfdcc67..c2e506e87a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,4 +1,7 @@ -The examples directory previously held a set of literate programming documents that demonstrated -solutions to various problems using Nipype. +The examples directory previously held a set of +[literate programming](https://en.wikipedia.org/wiki/Literate_programming) documents that +demonstrated solutions to various problems using Nipype. -These examples have been moved to https://github.com/niflows/nipype1-examples. +These examples have been moved to the +[Nipype1 Examples Niflow](https://github.com/niflows/nipype1-examples). Please refer to +that repository for more information, and report any issues with the examples there.