diff --git a/.github/workflows/cache-branch-binder-build.yml b/.github/workflows/cache-branch-binder-build.yml
new file mode 100644
index 0000000..5b4dab6
--- /dev/null
+++ b/.github/workflows/cache-branch-binder-build.yml
@@ -0,0 +1,17 @@
+# Copyright (C) 2024 C-PAC Developers
+# This file is part of C-PAC_tutorials.
+# C-PAC_tutorials is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
+# C-PAC_tutorials is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+# You should have received a copy of the GNU Lesser General Public License along with C-PAC. If not, see .
+name: Cache branch Binder build
+on: [push]
+
+jobs:
+ Create-MyBinderOrg-Cache:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Cache Binder build on mybinder.org
+ uses: jupyterhub/repo2docker-action@master
+ with:
+ NO_PUSH: true
+ MYBINDERORG_TAG: ${{ github.event.ref }}
diff --git a/.github/workflows/link-pr-to-binder.yml b/.github/workflows/link-pr-to-binder.yml
new file mode 100644
index 0000000..24b2112
--- /dev/null
+++ b/.github/workflows/link-pr-to-binder.yml
@@ -0,0 +1,28 @@
+# Copyright (C) 2024 C-PAC Developers
+# This file is part of C-PAC_tutorials.
+# C-PAC_tutorials is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
+# C-PAC_tutorials is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+# You should have received a copy of the GNU Lesser General Public License along with C-PAC. If not, see .
+name: Link PR to Binder
+on:
+ pull_request:
+ types: [opened, reopened]
+
+jobs:
+ Create-Binder-Badge:
+ runs-on: ubuntu-latest
+ steps:
+ - name: comment on PR with Binder link
+ uses: actions/github-script@v1
+ with:
+ github-token: ${{secrets.GITHUB_TOKEN}}
+ script: |
+ var BRANCH_NAME = process.env.BRANCH_NAME;
+ github.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body: `[](https://mybinder.org/v2/gh/${context.repo.owner}/${context.repo.repo}/${BRANCH_NAME}) :point_left: Launch a binder notebook on this branch`
+ })
+ env:
+ BRANCH_NAME: ${{ github.event.pull_request.head.ref }}
diff --git a/C-PAC-requirements.txt b/C-PAC-requirements.txt
new file mode 100644
index 0000000..dce1541
--- /dev/null
+++ b/C-PAC-requirements.txt
@@ -0,0 +1,7 @@
+# Copyright (C) 2024 C-PAC Developers
+# This file is part of C-PAC_tutorials.
+# C-PAC_tutorials is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
+# C-PAC_tutorials is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+# You should have received a copy of the GNU Lesser General Public License along with C-PAC. If not, see .
+CPAC @ git+https://github.com/FCP-INDI/C-PAC.git
+ipykernel
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index daa5423..facd064 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -11,7 +11,8 @@ You should have received a copy of the GNU Lesser General Public License along w
1. Check out a feature branch, and, if relevant, open an issue and/or a draft PR.
1. Create one or more notebooks, either in the top level of this repository (for a single-notebook tutorial) or in a subdirectory (for a tutorial with one or more notebooks).
-1. If a notebook has any requirements that aren't already included in this repository's [`requirements.txt`](./requirements.txt), add them to that file.
+1. If a notebook has any requirements that aren't already included in an environment in this repository's `requirements.txt`(./requirements.txt) or [`postBuild`](./postBuild), add them to the appropriate file, creating an environment in `postBuild` if necessary.
+1. Make sure the `metadata.kernelspec` in each `.ipynb` file is configured to the correct kernel.
1. If any of the output cells in a notebook are too long, edit the JSON in the raw `*.ipynb` file to abridge or truncate the overlong output(s) to reduce the file size and emphasize the relevant portion(s) of the output. For example, several sections of [this output](https://github.com/FCP-INDI/C-PAC_tutorials/blob/ba88d7b91513bfba0d67eeae51fdaba29f84bb10/observed_usage.ipynb?short_path=b38a683#L24-L60) are replaced with `[…]`.
1. If you want the tutorial to appear in the user docs, add it to the TOC tree in [`index.rst`](./index.rst)
1. (optionally), add a download link to the top of your notebook.
diff --git a/FROM/index.ipynb b/FROM/index.ipynb
new file mode 100644
index 0000000..699931b
--- /dev/null
+++ b/FROM/index.ipynb
@@ -0,0 +1,3570 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "fc28acd3-f749-4669-94c3-a6f971f7aba2",
+ "metadata": {},
+ "source": [
+ "# Parsing minified pipelines (`FROM`: key)\n",
+ "\n",
+ "Letting the C-PAC pipeline configuration code parse minified pipelines can save time rather than tracing a chain of `FROM` imports.\n",
+ "\n",
+ "First we'll create a little function to\n",
+ "* take a path to a YAML file as a positional argument or a raw YAML string as a keyword argument,\n",
+ "* print the full config with default comments as YAML,\n",
+ " and\n",
+ "* return the Configuration object."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "44a607ef-b398-4020-9abc-80642c2d5afc",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pathlib import Path\n",
+ "from typing import Optional\n",
+ "import yaml\n",
+ "from CPAC.utils.configuration import Configuration, Preconfiguration\n",
+ "from CPAC.utils.configuration.yaml_template import create_yaml_from_template\n",
+ "\n",
+ "\n",
+ "def show_full_config(path_to_config: Optional[Path | str] = None, *, full_yaml: Optional[str] = None) -> dict:\n",
+ " \"\"\"Given a path to a minified C-PAC participant pipeline configuration, return the full loaded config.\"\"\"\n",
+ " # load the config\n",
+ " if path_to_config:\n",
+ " if isinstance(path_to_config, str):\n",
+ " path_to_config = Path(path_to_config)\n",
+ " with path_to_config.open(\"r\", encoding=\"utf-8\") as _config_file:\n",
+ " full_yaml = _config_file.read()\n",
+ " full_configuration = Configuration(yaml.safe_load(full_yaml))\n",
+ " # display the config\n",
+ " print(create_yaml_from_template(full_configuration, \"default\", skip_env_check=True))\n",
+ " # return the config\n",
+ " return full_configuration"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "94e12d3e-6439-4030-94fa-4b47cdbf2afb",
+ "metadata": {},
+ "source": [
+ "First we'll load and print the `fmriprep-options` preconfig by just importing it and making no changes:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "01f2e83b-393e-4f13-8a07-c134832d3d53",
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "%YAML 1.1\n",
+ "---\n",
+ "# CPAC Pipeline Configuration YAML file\n",
+ "# Version 1.8.7.dev1\n",
+ "#\n",
+ "# http://fcp-indi.github.io for more info.\n",
+ "#\n",
+ "# Tip: This file can be edited manually with a text editor for quick modifications.\n",
+ "pipeline_setup:\n",
+ "\n",
+ " # Name for this pipeline configuration - useful for identification.\n",
+ " # This string will be sanitized and used in filepaths\n",
+ " pipeline_name: cpac_fmriprep-options\n",
+ " output_directory:\n",
+ "\n",
+ " # Quality control outputs\n",
+ " quality_control:\n",
+ "\n",
+ " # Generate eXtensible Connectivity Pipeline-style quality control files\n",
+ " generate_xcpqc_files: Off\n",
+ "\n",
+ " # Generate quality control pages containing preprocessing and derivative outputs.\n",
+ " generate_quality_control_images: Off\n",
+ "\n",
+ " # Directory where C-PAC should write out processed data, logs, and crash reports.\n",
+ " # - If running in a container (Singularity/Docker), you can simply set this to an arbitrary\n",
+ " # name like '/outputs', and then map (-B/-v) your desired output directory to that label.\n",
+ " # - If running outside a container, this should be a full path to a directory.\n",
+ " path: /outputs/output\n",
+ "\n",
+ " # (Optional) Path to a BIDS-Derivatives directory that already has outputs.\n",
+ " # - This option is intended to ingress already-existing resources from an output\n",
+ " # directory without writing new outputs back into the same directory.\n",
+ " # - If provided, C-PAC will ingress the already-computed outputs from this directory and\n",
+ " # continue the pipeline from where they leave off.\n",
+ " # - If left as 'None', C-PAC will ingress any already-computed outputs from the\n",
+ " # output directory you provide above in 'path' instead, the default behavior.\n",
+ " source_outputs_dir:\n",
+ "\n",
+ " # Set to True to make C-PAC ingress the outputs from the primary output directory if they\n",
+ " # exist, even if a source_outputs_dir is provided\n",
+ " # - Setting to False will pull from source_outputs_dir every time, over-writing any\n",
+ " # calculated outputs in the main output directory\n",
+ " # - C-PAC will still pull from source_outputs_dir if the main output directory is\n",
+ " # empty, however\n",
+ " pull_source_once: On\n",
+ "\n",
+ " # Include extra versions and intermediate steps of functional preprocessing in the output directory.\n",
+ " write_func_outputs: Off\n",
+ "\n",
+ " # Include extra outputs in the output directory that may be of interest when more information is needed.\n",
+ " write_debugging_outputs: Off\n",
+ "\n",
+ " # Output directory format and structure.\n",
+ " # Options: default, ndmg\n",
+ " output_tree: default\n",
+ "\n",
+ " system_config:\n",
+ "\n",
+ " # Stop worklow execution on first crash?\n",
+ " fail_fast: Off\n",
+ "\n",
+ " # Random seed used to fix the state of execution.\n",
+ " # If unset, each process uses its own default.\n",
+ " # If set, a `random.log` file will be generated logging the random seed and each node to which that seed was applied.\n",
+ " # If set to a positive integer (up to 2147483647), that integer will be used to seed each process that accepts a random seed.\n",
+ " # If set to 'random', a random positive integer (up to 2147483647) will be generated and that seed will be used to seed each process that accepts a random seed.\n",
+ " random_seed:\n",
+ "\n",
+ " # Prior to running a pipeline C-PAC makes a rough estimate of a worst-case-scenario maximum concurrent memory usage with high-resoltion data, raising an exception describing the recommended minimum memory allocation for the given configuration.\n",
+ " # Turning this option off will allow pipelines to run without allocating the recommended minimum, allowing for more efficient runs at the risk of out-of-memory crashes (use at your own risk)\n",
+ " raise_insufficient: On\n",
+ "\n",
+ " # A callback.log file from a previous run can be provided to estimate memory usage based on that run.\n",
+ " observed_usage:\n",
+ "\n",
+ " # Path to callback log file with previously observed usage.\n",
+ " # Can be overridden with the commandline flag `--runtime_usage`.\n",
+ " callback_log:\n",
+ "\n",
+ " # Percent. E.g., `buffer: 10` would estimate 1.1 * the observed memory usage from the callback log provided in \"usage\".\n",
+ " # Can be overridden with the commandline flag `--runtime_buffer`.\n",
+ " buffer: 10\n",
+ "\n",
+ " # Select Off if you intend to run CPAC on a single machine.\n",
+ " # If set to On, CPAC will attempt to submit jobs through the job scheduler / resource manager selected below.\n",
+ " on_grid:\n",
+ " run: Off\n",
+ "\n",
+ " # Sun Grid Engine (SGE), Portable Batch System (PBS), or Simple Linux Utility for Resource Management (SLURM).\n",
+ " # Only applies if you are running on a grid or compute cluster.\n",
+ " resource_manager: SGE\n",
+ " SGE:\n",
+ "\n",
+ " # SGE Parallel Environment to use when running CPAC.\n",
+ " # Only applies when you are running on a grid or compute cluster using SGE.\n",
+ " parallel_environment: cpac\n",
+ "\n",
+ " # SGE Queue to use when running CPAC.\n",
+ " # Only applies when you are running on a grid or compute cluster using SGE.\n",
+ " queue: all.q\n",
+ "\n",
+ " # The maximum amount of memory each participant's workflow can allocate.\n",
+ " # Use this to place an upper bound of memory usage.\n",
+ " # - Warning: 'Memory Per Participant' multiplied by 'Number of Participants to Run Simultaneously'\n",
+ " # must not be more than the total amount of RAM.\n",
+ " # - Conversely, using too little RAM can impede the speed of a pipeline run.\n",
+ " # - It is recommended that you set this to a value that when multiplied by\n",
+ " # 'Number of Participants to Run Simultaneously' is as much RAM you can safely allocate.\n",
+ " maximum_memory_per_participant: 3\n",
+ "\n",
+ " # The maximum amount of cores (on a single machine) or slots on a node (on a cluster/grid)\n",
+ " # to allocate per participant.\n",
+ " # - Setting this above 1 will parallelize each participant's workflow where possible.\n",
+ " # If you wish to dedicate multiple cores to ANTS-based anatomical registration (below),\n",
+ " # this value must be equal or higher than the amount of cores provided to ANTS.\n",
+ " # - The maximum number of cores your run can possibly employ will be this setting multiplied\n",
+ " # by the number of participants set to run in parallel (the 'Number of Participants to Run\n",
+ " # Simultaneously' setting).\n",
+ " max_cores_per_participant: 1\n",
+ "\n",
+ " # The number of cores to allocate to ANTS-based anatomical registration per participant.\n",
+ " # - Multiple cores can greatly speed up this preprocessing step.\n",
+ " # - This number cannot be greater than the number of cores per participant.\n",
+ " num_ants_threads: 1\n",
+ "\n",
+ " # The number of cores to allocate to processes that use OpenMP.\n",
+ " num_OMP_threads: 1\n",
+ "\n",
+ " # The number of participant workflows to run at the same time.\n",
+ " # - The maximum number of cores your run can possibly employ will be this setting\n",
+ " # multiplied by the number of cores dedicated to each participant (the 'Maximum Number of Cores Per Participant' setting).\n",
+ " num_participants_at_once: 1\n",
+ "\n",
+ " # Full path to the FSL version to be used by CPAC.\n",
+ " # If you have specified an FSL path in your .bashrc file, this path will be set automatically.\n",
+ " FSLDIR: FSLDIR\n",
+ "\n",
+ " working_directory:\n",
+ "\n",
+ " # Directory where C-PAC should store temporary and intermediate files.\n",
+ " # - This directory must be saved if you wish to re-run your pipeline from where you left off (if not completed).\n",
+ " # - NOTE: As it stores all intermediate files, this directory can grow to become very\n",
+ " # large, especially for data with a large amount of TRs.\n",
+ " # - If running in a container (Singularity/Docker), you can simply set this to an arbitrary\n",
+ " # name like '/work', and then map (-B/-v) your desired output directory to that label.\n",
+ " # - If running outside a container, this should be a full path to a directory.\n",
+ " # - This can be written to '/tmp' if you do not intend to save your working directory.\n",
+ " path: /outputs/working\n",
+ "\n",
+ " # Deletes the contents of the Working Directory after running.\n",
+ " # This saves disk space, but any additional preprocessing or analysis will have to be completely re-run.\n",
+ " remove_working_dir: On\n",
+ "\n",
+ " log_directory:\n",
+ "\n",
+ " # Whether to write log details of the pipeline run to the logging files.\n",
+ " run_logging: On\n",
+ " path: /outputs/logs\n",
+ "\n",
+ " # Configuration options for logging visualizations of the workflow graph\n",
+ " graphviz:\n",
+ "\n",
+ " # Configuration for a graphviz visualization of the entire workflow. See https://fcp-indi.github.io/docs/developer/nodes#CPAC.pipeline.nipype_pipeline_engine.Workflow.write_graph for details about the various options\n",
+ " entire_workflow:\n",
+ "\n",
+ " # Whether to generate the graph visualization\n",
+ " generate: Off\n",
+ "\n",
+ " # Options: [orig, hierarchical, flat, exec, colored]\n",
+ " graph2use: []\n",
+ "\n",
+ " # Options: [svg, png]\n",
+ " format: []\n",
+ "\n",
+ " # The node name will be displayed in the form `nodename (package)` when On or `nodename.Class.package` when Off\n",
+ " simple_form: On\n",
+ "\n",
+ " crash_log_directory:\n",
+ "\n",
+ " # Directory where CPAC should write crash logs.\n",
+ " path: /outputs/crash\n",
+ "\n",
+ " outdir_ingress:\n",
+ " run: Off\n",
+ "\n",
+ " Amazon-AWS:\n",
+ "\n",
+ " # If setting the 'Output Directory' to an S3 bucket, insert the path to your AWS credentials file here.\n",
+ " aws_output_bucket_credentials:\n",
+ "\n",
+ " # Enable server-side 256-AES encryption on data to the S3 bucket\n",
+ " s3_encryption: On\n",
+ "\n",
+ " Debugging:\n",
+ "\n",
+ " # Verbose developer messages.\n",
+ " verbose: Off\n",
+ "\n",
+ "# PREPROCESSING\n",
+ "# -------------\n",
+ "surface_analysis:\n",
+ "\n",
+ " # Run freesurfer_abcd_preproc to obtain preprocessed T1w for reconall\n",
+ " abcd_prefreesurfer_prep:\n",
+ " run: Off\n",
+ "\n",
+ " # Will run Freesurfer for surface-based analysis. Will output traditional Freesurfer derivatives.\n",
+ " # If you wish to employ Freesurfer outputs for brain masking or tissue segmentation in the voxel-based pipeline,\n",
+ " # select those 'Freesurfer-' labeled options further below in anatomical_preproc.\n",
+ " freesurfer:\n",
+ " run_reconall: Off\n",
+ "\n",
+ " # Add extra arguments to recon-all command\n",
+ " reconall_args:\n",
+ "\n",
+ " # Ingress freesurfer recon-all folder\n",
+ " ingress_reconall: Off\n",
+ "\n",
+ " # Run ABCD-HCP post FreeSurfer and fMRISurface pipeline\n",
+ " post_freesurfer:\n",
+ " run: Off\n",
+ " subcortical_gray_labels: /opt/dcan-tools/pipeline/global/config/FreeSurferSubcorticalLabelTableLut.txt\n",
+ " freesurfer_labels: /opt/dcan-tools/pipeline/global/config/FreeSurferAllLut.txt\n",
+ " surf_atlas_dir: /opt/dcan-tools/pipeline/global/templates/standard_mesh_atlases\n",
+ " gray_ordinates_dir: /opt/dcan-tools/pipeline/global/templates/Greyordinates\n",
+ " gray_ordinates_res: 2\n",
+ " high_res_mesh: 164\n",
+ " low_res_mesh: 32\n",
+ " fmri_res: 2\n",
+ " smooth_fwhm: 2\n",
+ "\n",
+ " amplitude_low_frequency_fluctuation:\n",
+ " run: Off\n",
+ "\n",
+ " regional_homogeneity:\n",
+ " run: Off\n",
+ "\n",
+ " surface_connectivity:\n",
+ " run: Off\n",
+ " surface_parcellation_template: /cpac_templates/Schaefer2018_200Parcels_17Networks_order.dlabel.nii\n",
+ "\n",
+ "anatomical_preproc:\n",
+ " run: On\n",
+ " acpc_alignment:\n",
+ " T1w_brain_ACPC_template:\n",
+ "\n",
+ " # Choose a tool to crop the FOV in ACPC alignment.\n",
+ " # Using FSL's robustfov or flirt command.\n",
+ " # Default: robustfov for human data, flirt for monkey data.\n",
+ " FOV_crop: robustfov\n",
+ "\n",
+ " # Run ACPC alignment on brain mask\n",
+ " # If the brain mask is in native space, turn it on\n",
+ " # If the brain mask is ACPC aligned, turn it off\n",
+ " align_brain_mask: Off\n",
+ " T2w_ACPC_template:\n",
+ " T2w_brain_ACPC_template:\n",
+ " run: Off\n",
+ "\n",
+ " # Run ACPC alignment before non-local means filtering or N4 bias\n",
+ " # correction\n",
+ " run_before_preproc: On\n",
+ "\n",
+ " # ACPC size of brain in z-dimension in mm.\n",
+ " # Default: 150mm for human data.\n",
+ " brain_size: 150\n",
+ "\n",
+ " # ACPC Target\n",
+ " # options: 'brain' or 'whole-head'\n",
+ " # note: 'brain' requires T1w_brain_ACPC_template below to be populated\n",
+ " acpc_target: whole-head\n",
+ "\n",
+ " # ACPC aligned template\n",
+ " T1w_ACPC_template:\n",
+ "\n",
+ " brain_extraction:\n",
+ " run: On\n",
+ " FreeSurfer-BET:\n",
+ "\n",
+ " # Template to be used for FreeSurfer-BET brain extraction in CCS-options pipeline\n",
+ " T1w_brain_template_mask_ccs:\n",
+ "\n",
+ " # using: ['3dSkullStrip', 'BET', 'UNet', 'niworkflows-ants', 'FreeSurfer-ABCD', 'FreeSurfer-BET-Tight', 'FreeSurfer-BET-Loose', 'FreeSurfer-Brainmask']\n",
+ " # this is a fork option\n",
+ " using: [niworkflows-ants]\n",
+ "\n",
+ " # option parameters\n",
+ " AFNI-3dSkullStrip:\n",
+ "\n",
+ " # Output a mask volume instead of a skull-stripped volume. The mask volume containes 0 to 6, which represents voxel's postion. If set to True, C-PAC will use this output to generate anatomical brain mask for further analysis.\n",
+ " mask_vol: Off\n",
+ "\n",
+ " # Set the threshold value controlling the brain vs non-brain voxels. Default is 0.6.\n",
+ " shrink_factor: 0.6\n",
+ "\n",
+ " # Vary the shrink factor at every iteration of the algorithm. This prevents the likelihood of surface getting stuck in large pools of CSF before reaching the outer surface of the brain. Default is On.\n",
+ " var_shrink_fac: On\n",
+ "\n",
+ " # The shrink factor bottom limit sets the lower threshold when varying the shrink factor. Default is 0.4, for when edge detection is used (which is On by default), otherwise the default value is 0.65.\n",
+ " shrink_factor_bot_lim: 0.4\n",
+ "\n",
+ " # Avoids ventricles while skullstripping.\n",
+ " avoid_vent: On\n",
+ "\n",
+ " # Set the number of iterations. Default is 250.The number of iterations should depend upon the density of your mesh.\n",
+ " n_iterations: 250\n",
+ "\n",
+ " # While expanding, consider the voxels above and not only the voxels below\n",
+ " pushout: On\n",
+ "\n",
+ " # Perform touchup operations at the end to include areas not covered by surface expansion.\n",
+ " touchup: On\n",
+ "\n",
+ " # Give the maximum number of pixels on either side of the hole that can be filled. The default is 10 only if 'Touchup' is On - otherwise, the default is 0.\n",
+ " fill_hole: 10\n",
+ "\n",
+ " # Perform nearest neighbor coordinate interpolation every few iterations. Default is 72.\n",
+ " NN_smooth: 72\n",
+ "\n",
+ " # Perform final surface smoothing after all iterations. Default is 20.\n",
+ " smooth_final: 20\n",
+ "\n",
+ " # Avoid eyes while skull stripping. Default is On.\n",
+ " avoid_eyes: On\n",
+ "\n",
+ " # Use edge detection to reduce leakage into meninges and eyes. Default is On.\n",
+ " use_edge: On\n",
+ "\n",
+ " # Speed of expansion.\n",
+ " exp_frac: 0.1\n",
+ "\n",
+ " # Perform aggressive push to edge. This might cause leakage. Default is Off.\n",
+ " push_to_edge: Off\n",
+ "\n",
+ " # Use outer skull to limit expansion of surface into the skull in case of very strong shading artifacts. Use this only if you have leakage into the skull.\n",
+ " use_skull: Off\n",
+ "\n",
+ " # Percentage of segments allowed to intersect surface. It is typically a number between 0 and 0.1, but can include negative values (which implies no testing for intersection).\n",
+ " perc_int: 0\n",
+ "\n",
+ " # Number of iterations to remove intersection problems. With each iteration, the program automatically increases the amount of smoothing to get rid of intersections. Default is 4.\n",
+ " max_inter_iter: 4\n",
+ "\n",
+ " # Multiply input dataset by FAC if range of values is too small.\n",
+ " fac: 1\n",
+ "\n",
+ " # Blur dataset after spatial normalization. Recommended when you have lots of CSF in brain and when you have protruding gyri (finger like). If so, recommended value range is 2-4. Otherwise, leave at 0.\n",
+ " blur_fwhm: 0\n",
+ "\n",
+ " # Set it as True if processing monkey data with AFNI\n",
+ " monkey: Off\n",
+ "\n",
+ " FSL-BET:\n",
+ "\n",
+ " # Switch \"On\" to crop out neck regions before generating the mask (default: Off).\n",
+ " Robustfov: Off\n",
+ "\n",
+ " # Set the threshold value controling the brain vs non-brain voxels, default is 0.5\n",
+ " frac: 0.5\n",
+ "\n",
+ " # Mesh created along with skull stripping\n",
+ " mesh_boolean: Off\n",
+ "\n",
+ " # Create a surface outline image\n",
+ " outline: Off\n",
+ "\n",
+ " # Add padding to the end of the image, improving BET.Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " padding: Off\n",
+ "\n",
+ " # Integer value of head radius\n",
+ " radius: 0\n",
+ "\n",
+ " # Reduce bias and cleanup neck. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " reduce_bias: Off\n",
+ "\n",
+ " # Eyes and optic nerve cleanup. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " remove_eyes: Off\n",
+ "\n",
+ " # Robust brain center estimation. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " robust: Off\n",
+ "\n",
+ " # Create a skull image\n",
+ " skull: Off\n",
+ "\n",
+ " # Gets additional skull and scalp surfaces by running bet2 and betsurf. This is mutually exclusive with reduce_bias, robust, padding, remove_eyes\n",
+ " surfaces: Off\n",
+ "\n",
+ " # Apply thresholding to segmented brain image and mask\n",
+ " threshold: Off\n",
+ "\n",
+ " # Vertical gradient in fractional intensity threshold (-1,1)\n",
+ " vertical_gradient: 0.0\n",
+ "\n",
+ " UNet:\n",
+ "\n",
+ " # UNet model\n",
+ " unet_model: s3://fcp-indi/resources/cpac/resources/Site-All-T-epoch_36.model\n",
+ "\n",
+ " niworkflows-ants:\n",
+ "\n",
+ " # Template to be used during niworkflows-ants.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " # niworkflows-ants Brain extraction template\n",
+ " template_path: /ants_template/oasis/T_template0.nii.gz\n",
+ "\n",
+ " # niworkflows-ants probability mask\n",
+ " mask_path: /ants_template/oasis/T_template0_BrainCerebellumProbabilityMask.nii.gz\n",
+ "\n",
+ " # niworkflows-ants registration mask (can be optional)\n",
+ " regmask_path: /ants_template/oasis/T_template0_BrainCerebellumRegistrationMask.nii.gz\n",
+ "\n",
+ " run_t2: Off\n",
+ "\n",
+ " # Bias field correction based on square root of T1w * T2w\n",
+ " t1t2_bias_field_correction:\n",
+ " run: Off\n",
+ " BiasFieldSmoothingSigma: 5\n",
+ "\n",
+ " # Non-local means filtering via ANTs DenoiseImage\n",
+ " non_local_means_filtering:\n",
+ "\n",
+ " # this is a fork option\n",
+ " run: [Off]\n",
+ "\n",
+ " # options: 'Gaussian' or 'Rician'\n",
+ " noise_model: Gaussian\n",
+ "\n",
+ " # N4 bias field correction via ANTs\n",
+ " n4_bias_field_correction:\n",
+ "\n",
+ " # this is a fork option\n",
+ " run: [Off]\n",
+ "\n",
+ " # An integer to resample the input image to save computation time. Shrink factors <= 4 are commonly used.\n",
+ " shrink_factor: 2\n",
+ "\n",
+ "segmentation:\n",
+ "\n",
+ " # Automatically segment anatomical images into white matter, gray matter,\n",
+ " # and CSF based on prior probability maps.\n",
+ " run: On\n",
+ " tissue_segmentation:\n",
+ "\n",
+ " # using: ['FSL-FAST', 'Template_Based', 'ANTs_Prior_Based', 'FreeSurfer']\n",
+ " # this is a fork point\n",
+ " using: [FSL-FAST]\n",
+ "\n",
+ " # option parameters\n",
+ " FSL-FAST:\n",
+ " thresholding:\n",
+ "\n",
+ " # thresholding of the tissue segmentation probability maps\n",
+ " # options: 'Auto', 'Custom'\n",
+ " use: Custom\n",
+ " Custom:\n",
+ "\n",
+ " # Set the threshold value for the segmentation probability masks (CSF, White Matter, and Gray Matter)\n",
+ " # The values remaining will become the binary tissue masks.\n",
+ " # A good starting point is 0.95.\n",
+ " # CSF (cerebrospinal fluid) threshold.\n",
+ " CSF_threshold_value: 0.95\n",
+ "\n",
+ " # White matter threshold.\n",
+ " WM_threshold_value: 0.95\n",
+ "\n",
+ " # Gray matter threshold.\n",
+ " GM_threshold_value: 0.95\n",
+ "\n",
+ " use_priors:\n",
+ "\n",
+ " # Use template-space tissue priors to refine the binary tissue masks generated by segmentation.\n",
+ " run: Off\n",
+ "\n",
+ " # Full path to a directory containing binarized prior probability maps.\n",
+ " # These maps are included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard priors.\n",
+ " priors_path:\n",
+ "\n",
+ " # Full path to a binarized White Matter prior probability map.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard priors.\n",
+ " WM_path: $priors_path/avg152T1_white_bin.nii.gz\n",
+ "\n",
+ " # Full path to a binarized Gray Matter prior probability map.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard priors.\n",
+ " GM_path: $priors_path/avg152T1_gray_bin.nii.gz\n",
+ "\n",
+ " # Full path to a binarized CSF prior probability map.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard priors.\n",
+ " CSF_path: $priors_path/avg152T1_csf_bin.nii.gz\n",
+ "\n",
+ " Template_Based:\n",
+ "\n",
+ " # These masks should be in the same space of your registration template, e.g. if\n",
+ " # you choose 'EPI Template' , below tissue masks should also be EPI template tissue masks.\n",
+ " #\n",
+ " # Options: ['T1_Template', 'EPI_Template']\n",
+ " template_for_segmentation: []\n",
+ "\n",
+ " # These masks are included as part of the 'Image Resource Files' package available\n",
+ " # on the Install page of the User Guide.\n",
+ " # Full path to a binarized White Matter mask.\n",
+ " WHITE:\n",
+ "\n",
+ " # Full path to a binarized Gray Matter mask.\n",
+ " GRAY:\n",
+ "\n",
+ " # Full path to a binarized CSF mask.\n",
+ " CSF:\n",
+ "\n",
+ " ANTs_Prior_Based:\n",
+ "\n",
+ " # Generate white matter, gray matter, CSF masks based on antsJointLabelFusion\n",
+ " # ANTs Prior-based Segmentation workflow that has shown optimal results for non-human primate data.\n",
+ " # The atlas image assumed to be used in ANTs Prior-based Segmentation.\n",
+ " template_brain_list:\n",
+ "\n",
+ " # The atlas segmentation images.\n",
+ " # For performing ANTs Prior-based segmentation method\n",
+ " # the number of specified segmentations should be identical to the number of atlas brain image sets.\n",
+ " # eg.\n",
+ " # ANTs_prior_seg_template_brain_list :\n",
+ " # - atlas1.nii.gz\n",
+ " # - atlas2.nii.gz\n",
+ " # ANTs_prior_seg_template_segmentation_list:\n",
+ " # - segmentation1.nii.gz\n",
+ " # - segmentation1.nii.gz\n",
+ " template_segmentation_list:\n",
+ "\n",
+ " # Label values corresponding to Gray Matter in multiatlas file\n",
+ " GM_label: []\n",
+ "\n",
+ " # Label values corresponding to White Matter in multiatlas file\n",
+ " WM_label: []\n",
+ "\n",
+ " # Label values corresponding to CSF/GM/WM in atlas file\n",
+ " # It is not necessary to change this values unless your CSF/GM/WM label values are different from Freesurfer Color Lookup Table.\n",
+ " # https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/AnatomicalROI/FreeSurferColorLUT\n",
+ " # Label values corresponding to CSF in multiatlas file\n",
+ " CSF_label: []\n",
+ "\n",
+ " FreeSurfer:\n",
+ "\n",
+ " # Use mri_binarize --erode option to erode segmentation masks\n",
+ " erode:\n",
+ "\n",
+ " # Label values corresponding to CSF in FreeSurfer aseg segmentation file\n",
+ " CSF_label: []\n",
+ "\n",
+ " # Label values corresponding to Gray Matter in FreeSurfer aseg segmentation file\n",
+ " GM_label: []\n",
+ "\n",
+ " # Label values corresponding to White Matter in FreeSurfer aseg segmentation file\n",
+ " WM_label: []\n",
+ "\n",
+ "registration_workflows:\n",
+ " anatomical_registration:\n",
+ " run: On\n",
+ " registration:\n",
+ " FSL-FNIRT:\n",
+ "\n",
+ " # The resolution to which anatomical images should be transformed during registration.\n",
+ " # This is the resolution at which processed anatomical files will be output.\n",
+ " # specifically for monkey pipeline\n",
+ " ref_resolution: 2mm\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is for monkey pipeline specifically.\n",
+ " FNIRT_T1w_brain_template:\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is for monkey pipeline specifically.\n",
+ " FNIRT_T1w_template:\n",
+ "\n",
+ " # Reference mask with 2mm resolution to be used during FNIRT-based brain extraction in ABCD-options pipeline.\n",
+ " ref_mask_res-2:\n",
+ "\n",
+ " # Template with 2mm resolution to be used during FNIRT-based brain extraction in ABCD-options pipeline.\n",
+ " T1w_template_res-2:\n",
+ "\n",
+ " # Configuration file to be used by FSL to set FNIRT parameters.\n",
+ " # It is not necessary to change this path unless you intend to use custom FNIRT parameters or a non-standard template.\n",
+ " fnirt_config: T1_2_MNI152_2mm\n",
+ "\n",
+ " # Reference mask for FSL registration.\n",
+ " ref_mask:\n",
+ "\n",
+ " # Interpolation method for writing out transformed anatomical images.\n",
+ " # Possible values: trilinear, sinc, spline\n",
+ " interpolation: sinc\n",
+ "\n",
+ " # Identity matrix used during FSL-based resampling of anatomical-space data throughout the pipeline.\n",
+ " # It is not necessary to change this path unless you intend to use a different template.\n",
+ " identity_matrix: /Users/jon.clucas/fsl/etc/flirtsch/ident.mat\n",
+ "\n",
+ " # using: ['ANTS', 'FSL', 'FSL-linear']\n",
+ " # this is a fork point\n",
+ " # selecting both ['ANTS', 'FSL'] will run both and fork the pipeline\n",
+ " using: [ANTS]\n",
+ "\n",
+ " # option parameters\n",
+ " ANTs:\n",
+ "\n",
+ " # If a lesion mask is available for a T1w image, use it to improve the ANTs' registration\n",
+ " # ANTS registration only.\n",
+ " use_lesion_mask: Off\n",
+ "\n",
+ " # ANTs parameters for T1-template-based registration\n",
+ " T1_registration:\n",
+ " - collapse-output-transforms: 1\n",
+ " - dimensionality: 3\n",
+ " - initial-moving-transform:\n",
+ " initializationFeature: 0\n",
+ " - transforms:\n",
+ " - Rigid:\n",
+ " convergence:\n",
+ " convergenceThreshold: 1e-06\n",
+ " convergenceWindowSize: 20\n",
+ " iteration: 100x100\n",
+ " gradientStep: 0.05\n",
+ " metric:\n",
+ " metricWeight: 1\n",
+ " numberOfBins: 32\n",
+ " samplingPercentage: 0.25\n",
+ " samplingStrategy: Regular\n",
+ " type: MI\n",
+ " shrink-factors: 2x1\n",
+ " smoothing-sigmas: 2.0x1.0vox\n",
+ " use-histogram-matching: On\n",
+ " - Affine:\n",
+ " convergence:\n",
+ " convergenceThreshold: 1e-06\n",
+ " convergenceWindowSize: 20\n",
+ " iteration: 100x100\n",
+ " gradientStep: 0.08\n",
+ " metric:\n",
+ " metricWeight: 1\n",
+ " numberOfBins: 32\n",
+ " samplingPercentage: 0.25\n",
+ " samplingStrategy: Regular\n",
+ " type: MI\n",
+ " shrink-factors: 2x1\n",
+ " smoothing-sigmas: 1.0x0.0vox\n",
+ " use-histogram-matching: On\n",
+ " - SyN:\n",
+ " convergence:\n",
+ " convergenceThreshold: 1e-06\n",
+ " convergenceWindowSize: 10\n",
+ " iteration: 100x70x50x20\n",
+ " gradientStep: 0.1\n",
+ " metric:\n",
+ " metricWeight: 1\n",
+ " radius: 4\n",
+ " type: CC\n",
+ " shrink-factors: 8x4x2x1\n",
+ " smoothing-sigmas: 3.0x2.0x1.0x0.0vox\n",
+ " totalFieldVarianceInVoxelSpace: 0.0\n",
+ " updateFieldVarianceInVoxelSpace: 3.0\n",
+ " use-histogram-matching: On\n",
+ " winsorize-image-intensities:\n",
+ " lowerQuantile: 0.005\n",
+ " upperQuantile: 0.995\n",
+ "\n",
+ " # Interpolation method for writing out transformed anatomical images.\n",
+ " # Possible values: Linear, BSpline, LanczosWindowedSinc\n",
+ " interpolation: LanczosWindowedSinc\n",
+ "\n",
+ " overwrite_transform:\n",
+ " run: Off\n",
+ "\n",
+ " # Choose the tool to overwrite transform, currently only support 'FSL' to overwrite 'ANTs' transforms in ABCD-options pipeline.\n",
+ " # using: 'FSL'\n",
+ " using: FSL\n",
+ "\n",
+ " # The resolution to which anatomical images should be transformed during registration.\n",
+ " # This is the resolution at which processed anatomical files will be output.\n",
+ " resolution_for_anat: 1mm\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " T1w_brain_template: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_desc-brain_T1w.nii.gz\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " T1w_template: /code/CPAC/resources/templates/mni_icbm152_t1_tal_nlin_asym_09c.nii\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " T1w_brain_template_mask: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_desc-brain_mask.nii.gz\n",
+ "\n",
+ " # Register skull-on anatomical image to a template.\n",
+ " reg_with_skull: Off\n",
+ "\n",
+ " functional_registration:\n",
+ " coregistration:\n",
+ "\n",
+ " # functional (BOLD/EPI) registration to anatomical (structural/T1)\n",
+ " run: On\n",
+ " func_input_prep:\n",
+ "\n",
+ " # Choose whether to use functional brain or skull as the input to functional-to-anatomical registration\n",
+ " reg_with_skull: Off\n",
+ "\n",
+ " # Choose whether to use the mean of the functional/EPI as the input to functional-to-anatomical registration or one of the volumes from the functional 4D timeseries that you choose.\n",
+ " # input: ['Mean_Functional', 'Selected_Functional_Volume', 'fmriprep_reference']\n",
+ " input: [fmriprep_reference]\n",
+ " Mean Functional:\n",
+ "\n",
+ " # Run ANTs’ N4 Bias Field Correction on the input BOLD (EPI)\n",
+ " # this can increase tissue contrast which may improve registration quality in some data\n",
+ " n4_correct_func: Off\n",
+ "\n",
+ " Selected Functional Volume:\n",
+ "\n",
+ " # Only for when 'Use as Functional-to-Anatomical Registration Input' is set to 'Selected Functional Volume'.\n",
+ " #Input the index of which volume from the functional 4D timeseries input file you wish to use as the input for functional-to-anatomical registration.\n",
+ " func_reg_input_volume: 0\n",
+ "\n",
+ " boundary_based_registration:\n",
+ "\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [On]\n",
+ "\n",
+ " # reference for boundary based registration\n",
+ " # options: 'whole-head' or 'brain'\n",
+ " reference: brain\n",
+ "\n",
+ " # choose which FAST map to generate BBR WM mask\n",
+ " # options: 'probability_map', 'partial_volume_map'\n",
+ " bbr_wm_map: partial_volume_map\n",
+ "\n",
+ " # optional FAST arguments to generate BBR WM mask\n",
+ " bbr_wm_mask_args: -bin\n",
+ "\n",
+ " # Standard FSL 5.0 Scheduler used for Boundary Based Registration.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard MNI registration.\n",
+ " bbr_schedule: /Users/jon.clucas/fsl/etc/flirtsch/bbr.sch\n",
+ "\n",
+ " # reference: 'brain' or 'restore-brain'\n",
+ " # In ABCD-options pipeline, 'restore-brain' is used as coregistration reference\n",
+ " reference: brain\n",
+ "\n",
+ " # Choose FSL or ABCD as coregistration method\n",
+ " using: FSL\n",
+ "\n",
+ " # Choose brain or whole-head as coregistration input\n",
+ " input: brain\n",
+ "\n",
+ " # Choose coregistration interpolation\n",
+ " interpolation: trilinear\n",
+ "\n",
+ " # Choose coregistration cost function\n",
+ " cost: corratio\n",
+ "\n",
+ " # Choose coregistration degree of freedom\n",
+ " dof: 6\n",
+ "\n",
+ " # Extra arguments for FSL flirt\n",
+ " arguments:\n",
+ "\n",
+ " func_registration_to_template:\n",
+ "\n",
+ " # these options modify the application (to the functional data), not the calculation, of the\n",
+ " # T1-to-template and EPI-to-template transforms calculated earlier during registration\n",
+ " # apply the functional-to-template (T1 template) registration transform to the functional data\n",
+ " run: On\n",
+ "\n",
+ " # apply the functional-to-template (EPI template) registration transform to the functional data\n",
+ " run_EPI: Off\n",
+ " apply_transform:\n",
+ "\n",
+ " # options: 'default', 'abcd', 'single_step_resampling_from_stc', 'dcan_nhp'\n",
+ " # 'default': apply func-to-anat and anat-to-template transforms on motion corrected functional image.\n",
+ " # 'abcd': apply motion correction, func-to-anat and anat-to-template transforms on each of raw functional volume using FSL applywarp based on ABCD-HCP pipeline.\n",
+ " # 'single_step_resampling_from_stc': apply motion correction, func-to-anat and anat-to-template transforms on each of slice-time-corrected functional volume using ANTs antsApplyTransform based on fMRIPrep pipeline.\n",
+ " # - if 'single_step_resampling_from_stc', 'template' is the only valid option for ``nuisance_corrections: 2-nuisance_regression: space``\n",
+ " using: single_step_resampling_from_stc\n",
+ "\n",
+ " output_resolution:\n",
+ "\n",
+ " # The resolution (in mm) to which the preprocessed, registered functional timeseries outputs are written into.\n",
+ " # NOTE:\n",
+ " # selecting a 1 mm or 2 mm resolution might substantially increase your RAM needs- these resolutions should be selected with caution.\n",
+ " # for most cases, 3 mm or 4 mm resolutions are suggested.\n",
+ " # NOTE:\n",
+ " # this also includes the single-volume 3D preprocessed functional data,\n",
+ " # such as the mean functional (mean EPI) in template space\n",
+ " func_preproc_outputs: 3.438mmx3.438mmx3.4mm\n",
+ "\n",
+ " # The resolution (in mm) to which the registered derivative outputs are written into.\n",
+ " # NOTE:\n",
+ " # this is for the single-volume functional-space outputs (i.e. derivatives)\n",
+ " # thus, a higher resolution may not result in a large increase in RAM needs as above\n",
+ " func_derivative_outputs: 3.438mmx3.438mmx3.4mm\n",
+ "\n",
+ " target_template:\n",
+ "\n",
+ " # choose which template space to transform derivatives towards\n",
+ " # using: ['T1_template', 'EPI_template']\n",
+ " # this is a fork point\n",
+ " # NOTE:\n",
+ " # this will determine which registration transform to use to warp the functional\n",
+ " # outputs and derivatives to template space\n",
+ " using: [T1_template]\n",
+ " T1_template:\n",
+ "\n",
+ " # Standard Skull Stripped Template. Used as a reference image for functional registration.\n",
+ " # This can be different than the template used as the reference/fixed for T1-to-template registration.\n",
+ " T1w_brain_template_funcreg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_T1w_reference.nii.gz\n",
+ "\n",
+ " # Standard Anatomical Brain Image with Skull.\n",
+ " # This can be different than the template used as the reference/fixed for T1-to-template registration.\n",
+ " T1w_template_funcreg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_T1w_reference.nii.gz\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " T1w_brain_template_mask_funcreg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_desc-brain_mask.nii.gz\n",
+ "\n",
+ " # a standard template for resampling if using float resolution\n",
+ " T1w_template_for_resample: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_desc-brain_T1w.nii.gz\n",
+ "\n",
+ " EPI_template:\n",
+ "\n",
+ " # EPI template for direct functional-to-template registration\n",
+ " # (bypassing coregistration and the anatomical-to-template transforms)\n",
+ " EPI_template_funcreg:\n",
+ "\n",
+ " # EPI template mask.\n",
+ " EPI_template_mask_funcreg:\n",
+ "\n",
+ " # a standard template for resampling if using float resolution\n",
+ " EPI_template_for_resample:\n",
+ "\n",
+ " ANTs_pipelines:\n",
+ "\n",
+ " # Interpolation method for writing out transformed functional images.\n",
+ " # Possible values: Linear, BSpline, LanczosWindowedSinc\n",
+ " interpolation: LanczosWindowedSinc\n",
+ "\n",
+ " FNIRT_pipelines:\n",
+ "\n",
+ " # Interpolation method for writing out transformed functional images.\n",
+ " # Possible values: trilinear, sinc, spline\n",
+ " interpolation: sinc\n",
+ "\n",
+ " # Identity matrix used during FSL-based resampling of functional-space data throughout the pipeline.\n",
+ " # It is not necessary to change this path unless you intend to use a different template.\n",
+ " identity_matrix: /Users/jon.clucas/fsl/etc/flirtsch/ident.mat\n",
+ "\n",
+ " EPI_registration:\n",
+ "\n",
+ " # directly register the mean functional to an EPI template\n",
+ " # instead of applying the anatomical T1-to-template transform to the functional data that has been\n",
+ " # coregistered to anatomical/T1 space\n",
+ " run: Off\n",
+ "\n",
+ " # using: ['ANTS', 'FSL', 'FSL-linear']\n",
+ " # this is a fork point\n",
+ " # ex. selecting both ['ANTS', 'FSL'] will run both and fork the pipeline\n",
+ " using: [ANTS]\n",
+ "\n",
+ " # EPI template for direct functional-to-template registration\n",
+ " # (bypassing coregistration and the anatomical-to-template transforms)\n",
+ " EPI_template: s3://fcp-indi/resources/cpac/resources/epi_hbn.nii.gz\n",
+ "\n",
+ " # EPI template mask.\n",
+ " EPI_template_mask:\n",
+ " ANTs:\n",
+ "\n",
+ " # EPI registration configuration - synonymous with T1_registration\n",
+ " # parameters under anatomical registration above\n",
+ " parameters:\n",
+ "\n",
+ " # Interpolation method for writing out transformed EPI images.\n",
+ " # Possible values: Linear, BSpline, LanczosWindowedSinc\n",
+ " interpolation: LanczosWindowedSinc\n",
+ "\n",
+ " FSL-FNIRT:\n",
+ "\n",
+ " # Configuration file to be used by FSL to set FNIRT parameters.\n",
+ " # It is not necessary to change this path unless you intend to use custom FNIRT parameters or a non-standard template.\n",
+ " fnirt_config: T1_2_MNI152_2mm\n",
+ "\n",
+ " # Interpolation method for writing out transformed EPI images.\n",
+ " # Possible values: trilinear, sinc, spline\n",
+ " interpolation: sinc\n",
+ "\n",
+ " # Identity matrix used during FSL-based resampling of BOLD-space data throughout the pipeline.\n",
+ " # It is not necessary to change this path unless you intend to use a different template.\n",
+ " identity_matrix: /Users/jon.clucas/fsl/etc/flirtsch/ident.mat\n",
+ "\n",
+ "functional_preproc:\n",
+ " run: On\n",
+ " update_header:\n",
+ "\n",
+ " # Convert raw data from LPI to RPI\n",
+ " run: On\n",
+ "\n",
+ " slice_timing_correction:\n",
+ "\n",
+ " # Interpolate voxel time courses so they are sampled at the same time points.\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [On]\n",
+ "\n",
+ " # use specified slice time pattern rather than one in header\n",
+ " tpattern:\n",
+ "\n",
+ " # align each slice to given time offset\n",
+ " # The default alignment time is the average of the 'tpattern' values (either from the dataset header or from the tpattern option).\n",
+ " tzero:\n",
+ "\n",
+ " motion_estimates_and_correction:\n",
+ " run: On\n",
+ " motion_estimates:\n",
+ "\n",
+ " # calculate motion statistics BEFORE slice-timing correction\n",
+ " calculate_motion_first: On\n",
+ "\n",
+ " # calculate motion statistics AFTER motion correction\n",
+ " calculate_motion_after: On\n",
+ "\n",
+ " motion_correction:\n",
+ "\n",
+ " # using: ['3dvolreg', 'mcflirt']\n",
+ " # Forking is currently broken for this option.\n",
+ " # Please use separate configs if you want to use each of 3dvolreg and mcflirt.\n",
+ " # Follow https://github.com/FCP-INDI/C-PAC/issues/1935 to see when this issue is resolved.\n",
+ " using: [mcflirt]\n",
+ "\n",
+ " # option parameters\n",
+ " AFNI-3dvolreg:\n",
+ "\n",
+ " # This option is useful when aligning high-resolution datasets that may need more alignment than a few voxels.\n",
+ " functional_volreg_twopass: On\n",
+ "\n",
+ " # Choose motion correction reference. Options: mean, median, selected_volume, fmriprep_reference\n",
+ " motion_correction_reference: [fmriprep_reference]\n",
+ "\n",
+ " # Choose motion correction reference volume\n",
+ " motion_correction_reference_volume: 0\n",
+ "\n",
+ " motion_estimate_filter:\n",
+ "\n",
+ " # Filter physiological (respiration) artifacts from the head motion estimates.\n",
+ " # Adapted from DCAN Labs filter.\n",
+ " # https://www.ohsu.edu/school-of-medicine/developmental-cognition-and-neuroimaging-lab\n",
+ " # https://www.biorxiv.org/content/10.1101/337360v1.full.pdf\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [Off]\n",
+ " filters: []\n",
+ "\n",
+ " distortion_correction:\n",
+ "\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [On]\n",
+ " Blip-FSL-TOPUP:\n",
+ "\n",
+ " # (approximate) resolution (in mm) of warp basis for the different sub-sampling levels, default 10\n",
+ " warpres: 10\n",
+ "\n",
+ " # sub-sampling scheme, default 1\n",
+ " subsamp: 1\n",
+ "\n",
+ " # FWHM (in mm) of gaussian smoothing kernel, default 8\n",
+ " fwhm: 8\n",
+ "\n",
+ " # Max # of non-linear iterations, default 5\n",
+ " miter: 5\n",
+ "\n",
+ " # Weight of regularisation, default depending on --ssqlambda and --regmod switches. See user documentation.\n",
+ " lambda: 1\n",
+ "\n",
+ " # If set (=1), lambda is weighted by current ssq, default 1\n",
+ " ssqlambda: 1\n",
+ "\n",
+ " # Model for regularisation of warp-field [membrane_energy bending_energy], default bending_energy\n",
+ " regmod: bending_energy\n",
+ "\n",
+ " # Estimate movements if set, default 1 (true)\n",
+ " estmov: 1\n",
+ "\n",
+ " # Minimisation method 0=Levenberg-Marquardt, 1=Scaled Conjugate Gradient, default 0 (LM)\n",
+ " minmet: 0\n",
+ "\n",
+ " # Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3\n",
+ " splineorder: 3\n",
+ "\n",
+ " # Precision for representing Hessian, double or float. Default double\n",
+ " numprec: double\n",
+ "\n",
+ " # Image interpolation model, linear or spline. Default spline\n",
+ " interp: spline\n",
+ "\n",
+ " # If set (=1), the images are individually scaled to a common mean, default 0 (false)\n",
+ " scale: 0\n",
+ "\n",
+ " # If set (=1), the calculations are done in a different grid, default 1 (true)\n",
+ " regrid: 1\n",
+ "\n",
+ " # using: ['PhaseDiff', 'Blip', 'Blip-FSL-TOPUP']\n",
+ " # PhaseDiff - Perform field map correction using a single phase difference image, a subtraction of the two phase images from each echo. Default scanner for this method is SIEMENS.\n",
+ " # Blip - Uses AFNI 3dQWarp to calculate the distortion unwarp for EPI field maps of opposite/same phase encoding direction.\n",
+ " # Blip-FSL-TOPUP - Uses FSL TOPUP to calculate the distortion unwarp for EPI field maps of opposite/same phase encoding direction.\n",
+ " using: [PhaseDiff, Blip]\n",
+ "\n",
+ " # option parameters\n",
+ " PhaseDiff:\n",
+ "\n",
+ " # Since the quality of the distortion heavily relies on the skull-stripping step, we provide a choice of method ('AFNI' for AFNI 3dSkullStrip or 'BET' for FSL BET).\n",
+ " # Options: 'BET' or 'AFNI'\n",
+ " fmap_skullstrip_option: BET\n",
+ "\n",
+ " # Set the fraction value for the skull-stripping of the magnitude file. Depending on the data, a tighter extraction may be necessary in order to prevent noisy voxels from interfering with preparing the field map.\n",
+ " # The default value is 0.5.\n",
+ " fmap_skullstrip_BET_frac: 0.5\n",
+ "\n",
+ " # Set the threshold value for the skull-stripping of the magnitude file. Depending on the data, a tighter extraction may be necessary in order to prevent noisy voxels from interfering with preparing the field map.\n",
+ " # The default value is 0.6.\n",
+ " fmap_skullstrip_AFNI_threshold: 0.6\n",
+ "\n",
+ " func_masking:\n",
+ " run: On\n",
+ " FSL-BET:\n",
+ "\n",
+ " # Set an intensity threshold to improve skull stripping performances of FSL BET on rodent scans.\n",
+ " functional_mean_thr:\n",
+ " run: Off\n",
+ " threshold_value: 98\n",
+ "\n",
+ " # Bias correct the functional mean image to improve skull stripping performances of FSL BET on rodent scans\n",
+ " functional_mean_bias_correction: Off\n",
+ "\n",
+ " # Apply to 4D FMRI data, if bold_bet_functional_mean_boolean : Off.\n",
+ " # Mutually exclusive with functional, reduce_bias, robust, padding, remove_eyes, surfaces\n",
+ " # It must be 'on' if select 'reduce_bias', 'robust', 'padding', 'remove_eyes', or 'bet_surfaces' on\n",
+ " functional_mean_boolean: Off\n",
+ "\n",
+ " # Set the threshold value controling the brain vs non-brain voxels.\n",
+ " frac: 0.3\n",
+ "\n",
+ " # Mesh created along with skull stripping\n",
+ " mesh_boolean: Off\n",
+ "\n",
+ " # Create a surface outline image\n",
+ " outline: Off\n",
+ "\n",
+ " # Add padding to the end of the image, improving BET.Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " padding: Off\n",
+ "\n",
+ " # Integer value of head radius\n",
+ " radius: 0\n",
+ "\n",
+ " # Reduce bias and cleanup neck. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " reduce_bias: Off\n",
+ "\n",
+ " # Eyes and optic nerve cleanup. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " remove_eyes: Off\n",
+ "\n",
+ " # Robust brain center estimation. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " robust: Off\n",
+ "\n",
+ " # Create a skull image\n",
+ " skull: Off\n",
+ "\n",
+ " # Gets additional skull and scalp surfaces by running bet2 and betsurf. This is mutually exclusive with reduce_bias, robust, padding, remove_eyes\n",
+ " surfaces: Off\n",
+ "\n",
+ " # Apply thresholding to segmented brain image and mask\n",
+ " threshold: Off\n",
+ "\n",
+ " # Vertical gradient in fractional intensity threshold (-1,1)\n",
+ " vertical_gradient: 0.0\n",
+ "\n",
+ " FSL_AFNI:\n",
+ " bold_ref: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_desc-fMRIPrep_boldref.nii.gz\n",
+ " brain_mask: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_desc-brain_mask.nii.gz\n",
+ " brain_probseg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_label-brain_probseg.nii.gz\n",
+ "\n",
+ " # Apply functional mask in native space\n",
+ " apply_func_mask_in_native_space: On\n",
+ "\n",
+ " # using: ['AFNI', 'FSL', 'FSL_AFNI', 'Anatomical_Refined', 'Anatomical_Based', 'Anatomical_Resampled', 'CCS_Anatomical_Refined']\n",
+ " # FSL_AFNI: fMRIPrep-style BOLD mask. Ref: https://github.com/nipreps/niworkflows/blob/a221f612/niworkflows/func/util.py#L246-L514\n",
+ " # Anatomical_Refined: 1. binarize anat mask, in case it is not a binary mask. 2. fill holes of anat mask 3. init_bold_mask : input raw func → dilate init func brain mask 4. refined_bold_mask : input motion corrected func → dilate anatomical mask 5. get final func mask\n",
+ " # Anatomical_Based: Generate the BOLD mask by basing it off of the anatomical brain mask. Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline.\n",
+ " # Anatomical_Resampled: Resample anatomical brain mask in standard space to get BOLD brain mask in standard space. Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline. (\"Create fMRI resolution standard space files for T1w image, wmparc, and brain mask […] don't use FLIRT to do spline interpolation with -applyisoxfm for the 2mm and 1mm cases because it doesn't know the peculiarities of the MNI template FOVs\")\n",
+ " # CCS_Anatomical_Refined: Generate the BOLD mask by basing it off of the anatomical brain. Adapted from the BOLD mask method from the CCS pipeline.\n",
+ " # this is a fork point\n",
+ " using: [FSL_AFNI]\n",
+ " Anatomical_Refined:\n",
+ "\n",
+ " # Choose whether or not to dilate the anatomical mask if you choose 'Anatomical_Refined' as the functional masking option. It will dilate one voxel if enabled.\n",
+ " anatomical_mask_dilation: Off\n",
+ "\n",
+ " generate_func_mean:\n",
+ "\n",
+ " # Generate mean functional image\n",
+ " run: On\n",
+ "\n",
+ " normalize_func:\n",
+ "\n",
+ " # Normalize functional image\n",
+ " run: Off\n",
+ "\n",
+ " truncation:\n",
+ "\n",
+ " # First timepoint to include in analysis.\n",
+ " # Default is 0 (beginning of timeseries).\n",
+ " # First timepoint selection in the scan parameters in the data configuration file, if present, will over-ride this selection.\n",
+ " # Note: the selection here applies to all scans of all participants.\n",
+ " start_tr: 0\n",
+ "\n",
+ " # Last timepoint to include in analysis.\n",
+ " # Default is None or End (end of timeseries).\n",
+ " # Last timepoint selection in the scan parameters in the data configuration file, if present, will over-ride this selection.\n",
+ " # Note: the selection here applies to all scans of all participants.\n",
+ " stop_tr:\n",
+ "\n",
+ " scaling:\n",
+ "\n",
+ " # Scale functional raw data, usually used in rodent pipeline\n",
+ " run: Off\n",
+ "\n",
+ " # Scale the size of the dataset voxels by the factor.\n",
+ " scaling_factor: 10\n",
+ "\n",
+ " despiking:\n",
+ "\n",
+ " # Run AFNI 3dDespike\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [Off]\n",
+ " space: native\n",
+ "\n",
+ " coreg_prep:\n",
+ "\n",
+ " # Generate sbref\n",
+ " run: On\n",
+ "\n",
+ "nuisance_corrections:\n",
+ " 2-nuisance_regression:\n",
+ "\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [Off]\n",
+ "\n",
+ " # Select which nuisance signal corrections to apply\n",
+ " Regressors:\n",
+ " - Name: Regressor_1\n",
+ " Bandpass:\n",
+ " bottom_frequency: 0.01\n",
+ " top_frequency: 0.1\n",
+ " CerebrospinalFluid:\n",
+ " erode_mask: Off\n",
+ " extraction_resolution: 2\n",
+ " summary: Mean\n",
+ " GlobalSignal:\n",
+ " summary: Mean\n",
+ " Motion:\n",
+ " include_delayed: On\n",
+ " include_delayed_squared: On\n",
+ " include_squared: On\n",
+ " PolyOrt:\n",
+ " degree: 2\n",
+ " WhiteMatter:\n",
+ " erode_mask: Off\n",
+ " extraction_resolution: 2\n",
+ " summary: Mean\n",
+ " aCompCor:\n",
+ " extraction_resolution: 2\n",
+ " summary:\n",
+ " components: 5\n",
+ " filter: cosine\n",
+ " method: PC\n",
+ " tissues:\n",
+ " - WhiteMatter\n",
+ " - CerebrospinalFluid\n",
+ " tCompCor:\n",
+ " degree: 2\n",
+ " erode_mask_mm: On\n",
+ " summary:\n",
+ " components: 5\n",
+ " filter: cosine\n",
+ " method: PC\n",
+ " threshold: 5PCT\n",
+ "\n",
+ " # Process and refine masks used to produce regressors and time series for\n",
+ " # regression.\n",
+ " regressor_masks:\n",
+ " erode_anatomical_brain_mask:\n",
+ "\n",
+ " # Erode brain mask in millimeters, default for brain mask is 30 mm\n",
+ " # Brain erosion default is using millimeters.\n",
+ " brain_mask_erosion_mm: 30\n",
+ "\n",
+ " # Erode binarized anatomical brain mask. If choosing True, please also set regressor_masks['erode_csf']['run']: True; anatomical_preproc['brain_extraction']['using']: niworkflows-ants.\n",
+ " run: On\n",
+ "\n",
+ " # Target volume ratio, if using erosion.\n",
+ " # Default proportion is None for anatomical brain mask.\n",
+ " # If using erosion, using both proportion and millimeters is not recommended.\n",
+ " brain_mask_erosion_prop:\n",
+ "\n",
+ " # Erode binarized brain mask in millimeter\n",
+ " brain_erosion_mm:\n",
+ "\n",
+ " erode_csf:\n",
+ "\n",
+ " # Erode cerebrospinal fluid mask in millimeters, default for cerebrospinal fluid is 30mm\n",
+ " # Cerebrospinal fluid erosion default is using millimeters.\n",
+ " csf_mask_erosion_mm: 30\n",
+ "\n",
+ " # Erode binarized csf tissue mask.\n",
+ " run: On\n",
+ "\n",
+ " # Target volume ratio, if using erosion.\n",
+ " # Default proportion is None for cerebrospinal fluid mask.\n",
+ " # If using erosion, using both proportion and millimeters is not recommended.\n",
+ " csf_erosion_prop:\n",
+ "\n",
+ " # Erode binarized cerebrospinal fluid mask in millimeter\n",
+ " csf_erosion_mm:\n",
+ "\n",
+ " erode_wm:\n",
+ "\n",
+ " # Target volume ratio, if using erosion.\n",
+ " # Default proportion is 0.6 for white matter mask.\n",
+ " # If using erosion, using both proportion and millimeters is not recommended.\n",
+ " # White matter erosion default is using proportion erosion method when use erosion for white matter.\n",
+ " wm_erosion_prop: 0.6\n",
+ "\n",
+ " # Erode WM binarized tissue mask.\n",
+ " run: On\n",
+ "\n",
+ " # Erode white matter mask in millimeters, default for white matter is None\n",
+ " wm_mask_erosion_mm:\n",
+ "\n",
+ " # Erode binarized white matter mask in millimeters\n",
+ " wm_erosion_mm:\n",
+ "\n",
+ " erode_gm:\n",
+ "\n",
+ " # Target volume ratio, if using erosion.\n",
+ " # If using erosion, using both proportion and millimeters is not recommended.\n",
+ " gm_erosion_prop: 0.6\n",
+ "\n",
+ " # Erode gray matter binarized tissue mask.\n",
+ " run: Off\n",
+ "\n",
+ " # Erode gray matter mask in millimeters\n",
+ " gm_mask_erosion_mm:\n",
+ "\n",
+ " # Erode binarized gray matter mask in millimeters\n",
+ " gm_erosion_mm:\n",
+ "\n",
+ " # this is not a fork point\n",
+ " # Run nuisance regression in native or template space\n",
+ " # - If set to template, will use the brain mask configured in\n",
+ " # ``functional_preproc: func_masking: FSL_AFNI: brain_mask``\n",
+ " # - If ``registration_workflows: functional_registration: func_registration_to_template: apply_trasnform: using: single_step_resampling_from_stc``, this must be set to template\n",
+ " space: template\n",
+ " ingress_regressors:\n",
+ " run: Off\n",
+ " Regressors:\n",
+ " Name: default\n",
+ " Columns: [global_signal]\n",
+ "\n",
+ " # switch to Off if nuisance regression is off and you don't want to write out the regressors\n",
+ " create_regressors: On\n",
+ "\n",
+ " # Standard Lateral Ventricles Binary Mask\n",
+ " # used in CSF mask refinement for CSF signal-related regressions\n",
+ " lateral_ventricles_mask:\n",
+ "\n",
+ " # Whether to run frequency filtering before or after nuisance regression.\n",
+ " # Options: 'After' or 'Before'\n",
+ " bandpass_filtering_order: After\n",
+ "\n",
+ " 1-ICA-AROMA:\n",
+ "\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [Off]\n",
+ "\n",
+ " # Types of denoising strategy:\n",
+ " # nonaggr: nonaggressive-partial component regression\n",
+ " # aggr: aggressive denoising\n",
+ " denoising_type: nonaggr\n",
+ "\n",
+ "timeseries_extraction:\n",
+ " run: Off\n",
+ " connectivity_matrix:\n",
+ "\n",
+ " # Create a connectivity matrix from timeseries data\n",
+ " # Options:\n",
+ " # ['AFNI', 'Nilearn', 'ndmg']\n",
+ " using: [Nilearn, ndmg]\n",
+ "\n",
+ " # Options:\n",
+ " # ['Pearson', 'Partial']\n",
+ " # Note: These options are not configurable for ndmg, which will ignore these options\n",
+ " measure: [Pearson, Partial]\n",
+ "\n",
+ " # Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for time-series extraction, and then select which types of analyses to run.\n",
+ " # Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and SpatialReg, you would enter: '/path/to/ROI.nii.gz': Avg, SpatialReg\n",
+ " # available analyses:\n",
+ " # /path/to/atlas.nii.gz: Avg, Voxel, SpatialReg\n",
+ " tse_roi_paths:\n",
+ " /cpac_templates/CC400.nii.gz: Avg\n",
+ " /cpac_templates/aal_mask_pad.nii.gz: Avg\n",
+ " /cpac_templates/CC200.nii.gz: Avg\n",
+ " /cpac_templates/tt_mask_pad.nii.gz: Avg\n",
+ " /cpac_templates/PNAS_Smith09_rsn10.nii.gz: SpatialReg\n",
+ " /cpac_templates/ho_mask_pad.nii.gz: Avg\n",
+ " /cpac_templates/rois_3mm.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/AAL_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/CAPRSC_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/DKT_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/DesikanKlein_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/HarvardOxfordcort-maxprob-thr25_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/HarvardOxfordsub-maxprob-thr25_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Juelich_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/MICCAI_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /cpac_templates/Schaefer2018_space-FSLMNI152_res-2mm_desc-1000Parcels17NetworksOrder.nii.gz: Avg\n",
+ " /cpac_templates/Schaefer2018_space-FSLMNI152_res-2mm_desc-200Parcels17NetworksOrder.nii.gz: Avg\n",
+ " /cpac_templates/Schaefer2018_space-FSLMNI152_res-2mm_desc-300Parcels17NetworksOrder.nii.gz: Avg\n",
+ " /cpac_templates/Schaefer2018_space-FSLMNI152_res-2mm_desc-400Parcels17NetworksOrder.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Talairach_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Brodmann_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Desikan_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Glasser_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Slab907_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Yeo-17-liberal_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Yeo-17_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Yeo-7-liberal_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Yeo-7_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ "\n",
+ " # Functional time-series and ROI realignment method: ['ROI_to_func'] or ['func_to_ROI']\n",
+ " # 'ROI_to_func' will realign the atlas/ROI to functional space (fast)\n",
+ " # 'func_to_ROI' will realign the functional time series to the atlas/ROI space\n",
+ " #\n",
+ " # NOTE: in rare cases, realigning the ROI to the functional space may\n",
+ " # result in small misalignments for very small ROIs - please double\n",
+ " # check your data if you see issues\n",
+ " realignment: ROI_to_func\n",
+ "\n",
+ "amplitude_low_frequency_fluctuation:\n",
+ "\n",
+ " # ALFF & f/ALFF\n",
+ " # Calculate Amplitude of Low Frequency Fluctuations (ALFF) and fractional ALFF (f/ALFF) for all voxels.\n",
+ " run: Off\n",
+ "\n",
+ " # space: Template or Native\n",
+ " target_space: [Native]\n",
+ "\n",
+ " # Frequency cutoff (in Hz) for the high-pass filter used when calculating f/ALFF.\n",
+ " highpass_cutoff: [0.01]\n",
+ "\n",
+ " # Frequency cutoff (in Hz) for the low-pass filter used when calculating f/ALFF\n",
+ " lowpass_cutoff: [0.1]\n",
+ "\n",
+ "regional_homogeneity:\n",
+ "\n",
+ " # ReHo\n",
+ " # Calculate Regional Homogeneity (ReHo) for all voxels.\n",
+ " run: Off\n",
+ "\n",
+ " # space: Template or Native\n",
+ " target_space: [Native]\n",
+ "\n",
+ " # Number of neighboring voxels used when calculating ReHo\n",
+ " # 7 (Faces)\n",
+ " # 19 (Faces + Edges)\n",
+ " # 27 (Faces + Edges + Corners)\n",
+ " cluster_size: 27\n",
+ "\n",
+ "voxel_mirrored_homotopic_connectivity:\n",
+ "\n",
+ " # VMHC\n",
+ " # Calculate Voxel-mirrored Homotopic Connectivity (VMHC) for all voxels.\n",
+ " run: Off\n",
+ " symmetric_registration:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " T1w_brain_template_symmetric:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " T1w_brain_template_symmetric_funcreg: /Users/jon.clucas/fsl/data/standard/MNI152_T1_${func_resolution}_brain_symmetric.nii.gz\n",
+ "\n",
+ " # A reference symmetric brain template for resampling\n",
+ " T1w_brain_template_symmetric_for_resample:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " T1w_template_symmetric:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " T1w_template_symmetric_funcreg: /Users/jon.clucas/fsl/data/standard/MNI152_T1_${func_resolution}_symmetric.nii.gz\n",
+ "\n",
+ " # A reference symmetric skull template for resampling\n",
+ " T1w_template_symmetric_for_resample:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " dilated_symmetric_brain_mask:\n",
+ "\n",
+ " # A reference symmetric brain mask template for resampling\n",
+ " dilated_symmetric_brain_mask_for_resample:\n",
+ "\n",
+ "network_centrality:\n",
+ "\n",
+ " # Calculate Degree, Eigenvector Centrality, or Functional Connectivity Density.\n",
+ " run: Off\n",
+ "\n",
+ " # Maximum amount of RAM (in GB) to be used when calculating Degree Centrality.\n",
+ " # Calculating Eigenvector Centrality will require additional memory based on the size of the mask or number of ROI nodes.\n",
+ " memory_allocation: 3.0\n",
+ "\n",
+ " # Full path to a NIFTI file describing the mask. Centrality will be calculated for all voxels within the mask.\n",
+ " template_specification_file:\n",
+ " degree_centrality:\n",
+ "\n",
+ " # Enable/Disable degree centrality by selecting the connectivity weights\n",
+ " # weight_options: ['Binarized', 'Weighted']\n",
+ " # disable this type of centrality with:\n",
+ " # weight_options: []\n",
+ " weight_options: []\n",
+ "\n",
+ " # Select the type of threshold used when creating the degree centrality adjacency matrix.\n",
+ " # options:\n",
+ " # 'Significance threshold', 'Sparsity threshold', 'Correlation threshold'\n",
+ " correlation_threshold_option: Sparsity threshold\n",
+ "\n",
+ " # Based on the Threshold Type selected above, enter a Threshold Value.\n",
+ " # P-value for Significance Threshold\n",
+ " # Sparsity value for Sparsity Threshold\n",
+ " # Pearson's r value for Correlation Threshold\n",
+ " correlation_threshold: 0.001\n",
+ "\n",
+ " eigenvector_centrality:\n",
+ "\n",
+ " # Enable/Disable eigenvector centrality by selecting the connectivity weights\n",
+ " # weight_options: ['Binarized', 'Weighted']\n",
+ " # disable this type of centrality with:\n",
+ " # weight_options: []\n",
+ " weight_options: [Binarized, Weighted]\n",
+ "\n",
+ " # Select the type of threshold used when creating the eigenvector centrality adjacency matrix.\n",
+ " # options:\n",
+ " # 'Significance threshold', 'Sparsity threshold', 'Correlation threshold'\n",
+ " correlation_threshold_option: Sparsity threshold\n",
+ "\n",
+ " # Based on the Threshold Type selected above, enter a Threshold Value.\n",
+ " # P-value for Significance Threshold\n",
+ " # Sparsity value for Sparsity Threshold\n",
+ " # Pearson's r value for Correlation Threshold\n",
+ " correlation_threshold: 0.001\n",
+ "\n",
+ " local_functional_connectivity_density:\n",
+ "\n",
+ " # Enable/Disable lFCD by selecting the connectivity weights\n",
+ " # weight_options: ['Binarized', 'Weighted']\n",
+ " # disable this type of centrality with:\n",
+ " # weight_options: []\n",
+ " weight_options: []\n",
+ "\n",
+ " # Select the type of threshold used when creating the lFCD adjacency matrix.\n",
+ " # options:\n",
+ " # 'Significance threshold', 'Correlation threshold'\n",
+ " correlation_threshold_option: Significance threshold\n",
+ "\n",
+ " # Based on the Threshold Type selected above, enter a Threshold Value.\n",
+ " # P-value for Significance Threshold\n",
+ " # Sparsity value for Sparsity Threshold\n",
+ " # Pearson's r value for Correlation Threshold\n",
+ " correlation_threshold: 0.001\n",
+ "\n",
+ "longitudinal_template_generation:\n",
+ "\n",
+ " # If you have multiple T1w's, you can generate your own run-specific custom\n",
+ " # T1w template to serve as an intermediate to the standard template for\n",
+ " # anatomical registration.\n",
+ " # This runs before the main pipeline as it requires multiple T1w sessions\n",
+ " # at once.\n",
+ " run: Off\n",
+ "\n",
+ " # Freesurfer longitudinal template algorithm using FSL FLIRT\n",
+ " # Method to average the dataset at each iteration of the template creation\n",
+ " # Options: median, mean or std\n",
+ " average_method: median\n",
+ "\n",
+ " # Degree of freedom for FLIRT in the template creation\n",
+ " # Options: 12 (affine), 9 (traditional), 7 (global rescale) or 6 (rigid body)\n",
+ " dof: 12\n",
+ "\n",
+ " # Interpolation parameter for FLIRT in the template creation\n",
+ " # Options: trilinear, nearestneighbour, sinc or spline\n",
+ " interp: trilinear\n",
+ "\n",
+ " # Cost function for FLIRT in the template creation\n",
+ " # Options: corratio, mutualinfo, normmi, normcorr, leastsq, labeldiff or bbr\n",
+ " cost: corratio\n",
+ "\n",
+ " # Number of threads used for one run of the template generation algorithm\n",
+ " thread_pool: 2\n",
+ "\n",
+ " # Threshold of transformation distance to consider that the loop converged\n",
+ " # (-1 means numpy.finfo(np.float64).eps and is the default)\n",
+ " convergence_threshold: -1\n",
+ "\n",
+ "# OUTPUTS AND DERIVATIVES\n",
+ "# -----------------------\n",
+ "post_processing:\n",
+ " spatial_smoothing:\n",
+ " run: On\n",
+ "\n",
+ " # Smooth the derivative outputs.\n",
+ " # Set as ['nonsmoothed'] to disable smoothing. Set as ['smoothed', 'nonsmoothed'] to get both.\n",
+ " #\n",
+ " # Options:\n",
+ " # ['smoothed', 'nonsmoothed']\n",
+ " output: [smoothed]\n",
+ "\n",
+ " # Tool to use for smoothing.\n",
+ " # 'FSL' for FSL MultiImageMaths for FWHM provided\n",
+ " # 'AFNI' for AFNI 3dBlurToFWHM for FWHM provided\n",
+ " smoothing_method: [FSL]\n",
+ "\n",
+ " # Full Width at Half Maximum of the Gaussian kernel used during spatial smoothing.\n",
+ " # this is a fork point\n",
+ " # i.e. multiple kernels - fwhm: [4,6,8]\n",
+ " fwhm: [4]\n",
+ "\n",
+ " z-scoring:\n",
+ " run: On\n",
+ "\n",
+ " # z-score standardize the derivatives. This may be needed for group-level analysis.\n",
+ " # Set as ['raw'] to disable z-scoring. Set as ['z-scored', 'raw'] to get both.\n",
+ " #\n",
+ " # Options:\n",
+ " # ['z-scored', 'raw']\n",
+ " output: [z-scored]\n",
+ "\n",
+ "seed_based_correlation_analysis:\n",
+ "\n",
+ " # SCA - Seed-Based Correlation Analysis\n",
+ " # For each extracted ROI Average time series, CPAC will generate a whole-brain correlation map.\n",
+ " # It should be noted that for a given seed/ROI, SCA maps for ROI Average time series will be the same.\n",
+ " run: Off\n",
+ "\n",
+ " # Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for seed-based correlation analysis, and then select which types of analyses to run.\n",
+ " # Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and MultReg, you would enter: '/path/to/ROI.nii.gz': Avg, MultReg\n",
+ " # available analyses:\n",
+ " # /path/to/atlas.nii.gz: Avg, DualReg, MultReg\n",
+ " sca_roi_paths:\n",
+ " /cpac_templates/PNAS_Smith09_rsn10.nii.gz: DualReg\n",
+ " /cpac_templates/CC400.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/ez_mask_pad.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/aal_mask_pad.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/CC200.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/tt_mask_pad.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/ho_mask_pad.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/rois_3mm.nii.gz: Avg, MultReg\n",
+ "\n",
+ " # Normalize each time series before running Dual Regression SCA.\n",
+ " norm_timeseries_for_DR: On\n",
+ "\n",
+ "# PACKAGE INTEGRATIONS\n",
+ "# --------------------\n",
+ "PyPEER:\n",
+ "\n",
+ " # Training of eye-estimation models. Commonly used for movies data/naturalistic viewing.\n",
+ " run: Off\n",
+ "\n",
+ " # PEER scan names to use for training\n",
+ " # Example: ['peer_run-1', 'peer_run-2']\n",
+ " eye_scan_names: []\n",
+ "\n",
+ " # Naturalistic viewing data scan names to use for eye estimation\n",
+ " # Example: ['movieDM']\n",
+ " data_scan_names: []\n",
+ "\n",
+ " # Template-space eye mask\n",
+ " eye_mask_path:\n",
+ "\n",
+ " # PyPEER Stimulus File Path\n",
+ " # This is a file describing the stimulus locations from the calibration sequence.\n",
+ " stimulus_path:\n",
+ " minimal_nuisance_correction:\n",
+ "\n",
+ " # PyPEER Minimal nuisance regression\n",
+ " # Note: PyPEER employs minimal preprocessing - these choices do not reflect what runs in the main pipeline.\n",
+ " # PyPEER uses non-nuisance-regressed data from the main pipeline.\n",
+ " # Global signal regression (PyPEER only)\n",
+ " peer_gsr: On\n",
+ "\n",
+ " # Motion scrubbing (PyPEER only)\n",
+ " peer_scrub: Off\n",
+ "\n",
+ " # Motion scrubbing threshold (PyPEER only)\n",
+ " scrub_thresh: 0.2\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "fmriprep_options = show_full_config(full_yaml=\"FROM: fmriprep-options\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d04a70ab-9d59-41f0-a839-f16384b40153",
+ "metadata": {},
+ "source": [
+ "We'll confirm it's the same as loading the preconfig directly:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "bb8b2fff-e781-4f94-888f-018257d3648b",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{}"
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fmriprep_options - Preconfiguration(\"fmriprep-options\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "3a01a81b-0a2f-4981-9511-56e0b12aa89a",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{}"
+ ]
+ },
+ "execution_count": 4,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "Preconfiguration(\"fmriprep-options\") - fmriprep_options"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8dc75425-7e94-47d3-8e90-e8ea53bc3aeb",
+ "metadata": {},
+ "source": [
+ "Okay, good, no difference in either direction.\n",
+ "\n",
+ "Now we can try loading a file with some modifications:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "18859a5a-d6d8-4dc2-a3e2-ebd738da8956",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "FROM: fmriprep-options\n",
+ "segmentation:\n",
+ " tissue_segmentation:\n",
+ " FSL-FAST:\n",
+ " use_priors:\n",
+ " priors_path: /custom_fsl/data/standard/tissuepriors/2mm\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "priors_path_config = Path(\"priors_path.yml\")\n",
+ "with priors_path_config.open(\"r\", encoding=\"utf-8\") as _file:\n",
+ " print(_file.read())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "c3f34f21-060a-4e7b-b277-423ac2a89749",
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "%YAML 1.1\n",
+ "---\n",
+ "# CPAC Pipeline Configuration YAML file\n",
+ "# Version 1.8.7.dev1\n",
+ "#\n",
+ "# http://fcp-indi.github.io for more info.\n",
+ "#\n",
+ "# Tip: This file can be edited manually with a text editor for quick modifications.\n",
+ "pipeline_setup:\n",
+ "\n",
+ " # Name for this pipeline configuration - useful for identification.\n",
+ " # This string will be sanitized and used in filepaths\n",
+ " pipeline_name: cpac_fmriprep-options\n",
+ " output_directory:\n",
+ "\n",
+ " # Quality control outputs\n",
+ " quality_control:\n",
+ "\n",
+ " # Generate eXtensible Connectivity Pipeline-style quality control files\n",
+ " generate_xcpqc_files: Off\n",
+ "\n",
+ " # Generate quality control pages containing preprocessing and derivative outputs.\n",
+ " generate_quality_control_images: Off\n",
+ "\n",
+ " # Directory where C-PAC should write out processed data, logs, and crash reports.\n",
+ " # - If running in a container (Singularity/Docker), you can simply set this to an arbitrary\n",
+ " # name like '/outputs', and then map (-B/-v) your desired output directory to that label.\n",
+ " # - If running outside a container, this should be a full path to a directory.\n",
+ " path: /outputs/output\n",
+ "\n",
+ " # (Optional) Path to a BIDS-Derivatives directory that already has outputs.\n",
+ " # - This option is intended to ingress already-existing resources from an output\n",
+ " # directory without writing new outputs back into the same directory.\n",
+ " # - If provided, C-PAC will ingress the already-computed outputs from this directory and\n",
+ " # continue the pipeline from where they leave off.\n",
+ " # - If left as 'None', C-PAC will ingress any already-computed outputs from the\n",
+ " # output directory you provide above in 'path' instead, the default behavior.\n",
+ " source_outputs_dir:\n",
+ "\n",
+ " # Set to True to make C-PAC ingress the outputs from the primary output directory if they\n",
+ " # exist, even if a source_outputs_dir is provided\n",
+ " # - Setting to False will pull from source_outputs_dir every time, over-writing any\n",
+ " # calculated outputs in the main output directory\n",
+ " # - C-PAC will still pull from source_outputs_dir if the main output directory is\n",
+ " # empty, however\n",
+ " pull_source_once: On\n",
+ "\n",
+ " # Include extra versions and intermediate steps of functional preprocessing in the output directory.\n",
+ " write_func_outputs: Off\n",
+ "\n",
+ " # Include extra outputs in the output directory that may be of interest when more information is needed.\n",
+ " write_debugging_outputs: Off\n",
+ "\n",
+ " # Output directory format and structure.\n",
+ " # Options: default, ndmg\n",
+ " output_tree: default\n",
+ "\n",
+ " system_config:\n",
+ "\n",
+ " # Stop worklow execution on first crash?\n",
+ " fail_fast: Off\n",
+ "\n",
+ " # Random seed used to fix the state of execution.\n",
+ " # If unset, each process uses its own default.\n",
+ " # If set, a `random.log` file will be generated logging the random seed and each node to which that seed was applied.\n",
+ " # If set to a positive integer (up to 2147483647), that integer will be used to seed each process that accepts a random seed.\n",
+ " # If set to 'random', a random positive integer (up to 2147483647) will be generated and that seed will be used to seed each process that accepts a random seed.\n",
+ " random_seed:\n",
+ "\n",
+ " # Prior to running a pipeline C-PAC makes a rough estimate of a worst-case-scenario maximum concurrent memory usage with high-resoltion data, raising an exception describing the recommended minimum memory allocation for the given configuration.\n",
+ " # Turning this option off will allow pipelines to run without allocating the recommended minimum, allowing for more efficient runs at the risk of out-of-memory crashes (use at your own risk)\n",
+ " raise_insufficient: On\n",
+ "\n",
+ " # A callback.log file from a previous run can be provided to estimate memory usage based on that run.\n",
+ " observed_usage:\n",
+ "\n",
+ " # Path to callback log file with previously observed usage.\n",
+ " # Can be overridden with the commandline flag `--runtime_usage`.\n",
+ " callback_log:\n",
+ "\n",
+ " # Percent. E.g., `buffer: 10` would estimate 1.1 * the observed memory usage from the callback log provided in \"usage\".\n",
+ " # Can be overridden with the commandline flag `--runtime_buffer`.\n",
+ " buffer: 10\n",
+ "\n",
+ " # Select Off if you intend to run CPAC on a single machine.\n",
+ " # If set to On, CPAC will attempt to submit jobs through the job scheduler / resource manager selected below.\n",
+ " on_grid:\n",
+ " run: Off\n",
+ "\n",
+ " # Sun Grid Engine (SGE), Portable Batch System (PBS), or Simple Linux Utility for Resource Management (SLURM).\n",
+ " # Only applies if you are running on a grid or compute cluster.\n",
+ " resource_manager: SGE\n",
+ " SGE:\n",
+ "\n",
+ " # SGE Parallel Environment to use when running CPAC.\n",
+ " # Only applies when you are running on a grid or compute cluster using SGE.\n",
+ " parallel_environment: cpac\n",
+ "\n",
+ " # SGE Queue to use when running CPAC.\n",
+ " # Only applies when you are running on a grid or compute cluster using SGE.\n",
+ " queue: all.q\n",
+ "\n",
+ " # The maximum amount of memory each participant's workflow can allocate.\n",
+ " # Use this to place an upper bound of memory usage.\n",
+ " # - Warning: 'Memory Per Participant' multiplied by 'Number of Participants to Run Simultaneously'\n",
+ " # must not be more than the total amount of RAM.\n",
+ " # - Conversely, using too little RAM can impede the speed of a pipeline run.\n",
+ " # - It is recommended that you set this to a value that when multiplied by\n",
+ " # 'Number of Participants to Run Simultaneously' is as much RAM you can safely allocate.\n",
+ " maximum_memory_per_participant: 3\n",
+ "\n",
+ " # The maximum amount of cores (on a single machine) or slots on a node (on a cluster/grid)\n",
+ " # to allocate per participant.\n",
+ " # - Setting this above 1 will parallelize each participant's workflow where possible.\n",
+ " # If you wish to dedicate multiple cores to ANTS-based anatomical registration (below),\n",
+ " # this value must be equal or higher than the amount of cores provided to ANTS.\n",
+ " # - The maximum number of cores your run can possibly employ will be this setting multiplied\n",
+ " # by the number of participants set to run in parallel (the 'Number of Participants to Run\n",
+ " # Simultaneously' setting).\n",
+ " max_cores_per_participant: 1\n",
+ "\n",
+ " # The number of cores to allocate to ANTS-based anatomical registration per participant.\n",
+ " # - Multiple cores can greatly speed up this preprocessing step.\n",
+ " # - This number cannot be greater than the number of cores per participant.\n",
+ " num_ants_threads: 1\n",
+ "\n",
+ " # The number of cores to allocate to processes that use OpenMP.\n",
+ " num_OMP_threads: 1\n",
+ "\n",
+ " # The number of participant workflows to run at the same time.\n",
+ " # - The maximum number of cores your run can possibly employ will be this setting\n",
+ " # multiplied by the number of cores dedicated to each participant (the 'Maximum Number of Cores Per Participant' setting).\n",
+ " num_participants_at_once: 1\n",
+ "\n",
+ " # Full path to the FSL version to be used by CPAC.\n",
+ " # If you have specified an FSL path in your .bashrc file, this path will be set automatically.\n",
+ " FSLDIR: FSLDIR\n",
+ "\n",
+ " working_directory:\n",
+ "\n",
+ " # Directory where C-PAC should store temporary and intermediate files.\n",
+ " # - This directory must be saved if you wish to re-run your pipeline from where you left off (if not completed).\n",
+ " # - NOTE: As it stores all intermediate files, this directory can grow to become very\n",
+ " # large, especially for data with a large amount of TRs.\n",
+ " # - If running in a container (Singularity/Docker), you can simply set this to an arbitrary\n",
+ " # name like '/work', and then map (-B/-v) your desired output directory to that label.\n",
+ " # - If running outside a container, this should be a full path to a directory.\n",
+ " # - This can be written to '/tmp' if you do not intend to save your working directory.\n",
+ " path: /outputs/working\n",
+ "\n",
+ " # Deletes the contents of the Working Directory after running.\n",
+ " # This saves disk space, but any additional preprocessing or analysis will have to be completely re-run.\n",
+ " remove_working_dir: On\n",
+ "\n",
+ " log_directory:\n",
+ "\n",
+ " # Whether to write log details of the pipeline run to the logging files.\n",
+ " run_logging: On\n",
+ " path: /outputs/logs\n",
+ "\n",
+ " # Configuration options for logging visualizations of the workflow graph\n",
+ " graphviz:\n",
+ "\n",
+ " # Configuration for a graphviz visualization of the entire workflow. See https://fcp-indi.github.io/docs/developer/nodes#CPAC.pipeline.nipype_pipeline_engine.Workflow.write_graph for details about the various options\n",
+ " entire_workflow:\n",
+ "\n",
+ " # Whether to generate the graph visualization\n",
+ " generate: Off\n",
+ "\n",
+ " # Options: [orig, hierarchical, flat, exec, colored]\n",
+ " graph2use: []\n",
+ "\n",
+ " # Options: [svg, png]\n",
+ " format: []\n",
+ "\n",
+ " # The node name will be displayed in the form `nodename (package)` when On or `nodename.Class.package` when Off\n",
+ " simple_form: On\n",
+ "\n",
+ " crash_log_directory:\n",
+ "\n",
+ " # Directory where CPAC should write crash logs.\n",
+ " path: /outputs/crash\n",
+ "\n",
+ " outdir_ingress:\n",
+ " run: Off\n",
+ "\n",
+ " Amazon-AWS:\n",
+ "\n",
+ " # If setting the 'Output Directory' to an S3 bucket, insert the path to your AWS credentials file here.\n",
+ " aws_output_bucket_credentials:\n",
+ "\n",
+ " # Enable server-side 256-AES encryption on data to the S3 bucket\n",
+ " s3_encryption: On\n",
+ "\n",
+ " Debugging:\n",
+ "\n",
+ " # Verbose developer messages.\n",
+ " verbose: Off\n",
+ "\n",
+ "# PREPROCESSING\n",
+ "# -------------\n",
+ "surface_analysis:\n",
+ "\n",
+ " # Run freesurfer_abcd_preproc to obtain preprocessed T1w for reconall\n",
+ " abcd_prefreesurfer_prep:\n",
+ " run: Off\n",
+ "\n",
+ " # Will run Freesurfer for surface-based analysis. Will output traditional Freesurfer derivatives.\n",
+ " # If you wish to employ Freesurfer outputs for brain masking or tissue segmentation in the voxel-based pipeline,\n",
+ " # select those 'Freesurfer-' labeled options further below in anatomical_preproc.\n",
+ " freesurfer:\n",
+ " run_reconall: Off\n",
+ "\n",
+ " # Add extra arguments to recon-all command\n",
+ " reconall_args:\n",
+ "\n",
+ " # Ingress freesurfer recon-all folder\n",
+ " ingress_reconall: Off\n",
+ "\n",
+ " # Run ABCD-HCP post FreeSurfer and fMRISurface pipeline\n",
+ " post_freesurfer:\n",
+ " run: Off\n",
+ " subcortical_gray_labels: /opt/dcan-tools/pipeline/global/config/FreeSurferSubcorticalLabelTableLut.txt\n",
+ " freesurfer_labels: /opt/dcan-tools/pipeline/global/config/FreeSurferAllLut.txt\n",
+ " surf_atlas_dir: /opt/dcan-tools/pipeline/global/templates/standard_mesh_atlases\n",
+ " gray_ordinates_dir: /opt/dcan-tools/pipeline/global/templates/Greyordinates\n",
+ " gray_ordinates_res: 2\n",
+ " high_res_mesh: 164\n",
+ " low_res_mesh: 32\n",
+ " fmri_res: 2\n",
+ " smooth_fwhm: 2\n",
+ "\n",
+ " amplitude_low_frequency_fluctuation:\n",
+ " run: Off\n",
+ "\n",
+ " regional_homogeneity:\n",
+ " run: Off\n",
+ "\n",
+ " surface_connectivity:\n",
+ " run: Off\n",
+ " surface_parcellation_template: /cpac_templates/Schaefer2018_200Parcels_17Networks_order.dlabel.nii\n",
+ "\n",
+ "anatomical_preproc:\n",
+ " run: On\n",
+ " acpc_alignment:\n",
+ " T1w_brain_ACPC_template:\n",
+ "\n",
+ " # Choose a tool to crop the FOV in ACPC alignment.\n",
+ " # Using FSL's robustfov or flirt command.\n",
+ " # Default: robustfov for human data, flirt for monkey data.\n",
+ " FOV_crop: robustfov\n",
+ "\n",
+ " # Run ACPC alignment on brain mask\n",
+ " # If the brain mask is in native space, turn it on\n",
+ " # If the brain mask is ACPC aligned, turn it off\n",
+ " align_brain_mask: Off\n",
+ " T2w_ACPC_template:\n",
+ " T2w_brain_ACPC_template:\n",
+ " run: Off\n",
+ "\n",
+ " # Run ACPC alignment before non-local means filtering or N4 bias\n",
+ " # correction\n",
+ " run_before_preproc: On\n",
+ "\n",
+ " # ACPC size of brain in z-dimension in mm.\n",
+ " # Default: 150mm for human data.\n",
+ " brain_size: 150\n",
+ "\n",
+ " # ACPC Target\n",
+ " # options: 'brain' or 'whole-head'\n",
+ " # note: 'brain' requires T1w_brain_ACPC_template below to be populated\n",
+ " acpc_target: whole-head\n",
+ "\n",
+ " # ACPC aligned template\n",
+ " T1w_ACPC_template:\n",
+ "\n",
+ " brain_extraction:\n",
+ " run: On\n",
+ " FreeSurfer-BET:\n",
+ "\n",
+ " # Template to be used for FreeSurfer-BET brain extraction in CCS-options pipeline\n",
+ " T1w_brain_template_mask_ccs:\n",
+ "\n",
+ " # using: ['3dSkullStrip', 'BET', 'UNet', 'niworkflows-ants', 'FreeSurfer-ABCD', 'FreeSurfer-BET-Tight', 'FreeSurfer-BET-Loose', 'FreeSurfer-Brainmask']\n",
+ " # this is a fork option\n",
+ " using: [niworkflows-ants]\n",
+ "\n",
+ " # option parameters\n",
+ " AFNI-3dSkullStrip:\n",
+ "\n",
+ " # Output a mask volume instead of a skull-stripped volume. The mask volume containes 0 to 6, which represents voxel's postion. If set to True, C-PAC will use this output to generate anatomical brain mask for further analysis.\n",
+ " mask_vol: Off\n",
+ "\n",
+ " # Set the threshold value controlling the brain vs non-brain voxels. Default is 0.6.\n",
+ " shrink_factor: 0.6\n",
+ "\n",
+ " # Vary the shrink factor at every iteration of the algorithm. This prevents the likelihood of surface getting stuck in large pools of CSF before reaching the outer surface of the brain. Default is On.\n",
+ " var_shrink_fac: On\n",
+ "\n",
+ " # The shrink factor bottom limit sets the lower threshold when varying the shrink factor. Default is 0.4, for when edge detection is used (which is On by default), otherwise the default value is 0.65.\n",
+ " shrink_factor_bot_lim: 0.4\n",
+ "\n",
+ " # Avoids ventricles while skullstripping.\n",
+ " avoid_vent: On\n",
+ "\n",
+ " # Set the number of iterations. Default is 250.The number of iterations should depend upon the density of your mesh.\n",
+ " n_iterations: 250\n",
+ "\n",
+ " # While expanding, consider the voxels above and not only the voxels below\n",
+ " pushout: On\n",
+ "\n",
+ " # Perform touchup operations at the end to include areas not covered by surface expansion.\n",
+ " touchup: On\n",
+ "\n",
+ " # Give the maximum number of pixels on either side of the hole that can be filled. The default is 10 only if 'Touchup' is On - otherwise, the default is 0.\n",
+ " fill_hole: 10\n",
+ "\n",
+ " # Perform nearest neighbor coordinate interpolation every few iterations. Default is 72.\n",
+ " NN_smooth: 72\n",
+ "\n",
+ " # Perform final surface smoothing after all iterations. Default is 20.\n",
+ " smooth_final: 20\n",
+ "\n",
+ " # Avoid eyes while skull stripping. Default is On.\n",
+ " avoid_eyes: On\n",
+ "\n",
+ " # Use edge detection to reduce leakage into meninges and eyes. Default is On.\n",
+ " use_edge: On\n",
+ "\n",
+ " # Speed of expansion.\n",
+ " exp_frac: 0.1\n",
+ "\n",
+ " # Perform aggressive push to edge. This might cause leakage. Default is Off.\n",
+ " push_to_edge: Off\n",
+ "\n",
+ " # Use outer skull to limit expansion of surface into the skull in case of very strong shading artifacts. Use this only if you have leakage into the skull.\n",
+ " use_skull: Off\n",
+ "\n",
+ " # Percentage of segments allowed to intersect surface. It is typically a number between 0 and 0.1, but can include negative values (which implies no testing for intersection).\n",
+ " perc_int: 0\n",
+ "\n",
+ " # Number of iterations to remove intersection problems. With each iteration, the program automatically increases the amount of smoothing to get rid of intersections. Default is 4.\n",
+ " max_inter_iter: 4\n",
+ "\n",
+ " # Multiply input dataset by FAC if range of values is too small.\n",
+ " fac: 1\n",
+ "\n",
+ " # Blur dataset after spatial normalization. Recommended when you have lots of CSF in brain and when you have protruding gyri (finger like). If so, recommended value range is 2-4. Otherwise, leave at 0.\n",
+ " blur_fwhm: 0\n",
+ "\n",
+ " # Set it as True if processing monkey data with AFNI\n",
+ " monkey: Off\n",
+ "\n",
+ " FSL-BET:\n",
+ "\n",
+ " # Switch \"On\" to crop out neck regions before generating the mask (default: Off).\n",
+ " Robustfov: Off\n",
+ "\n",
+ " # Set the threshold value controling the brain vs non-brain voxels, default is 0.5\n",
+ " frac: 0.5\n",
+ "\n",
+ " # Mesh created along with skull stripping\n",
+ " mesh_boolean: Off\n",
+ "\n",
+ " # Create a surface outline image\n",
+ " outline: Off\n",
+ "\n",
+ " # Add padding to the end of the image, improving BET.Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " padding: Off\n",
+ "\n",
+ " # Integer value of head radius\n",
+ " radius: 0\n",
+ "\n",
+ " # Reduce bias and cleanup neck. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " reduce_bias: Off\n",
+ "\n",
+ " # Eyes and optic nerve cleanup. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " remove_eyes: Off\n",
+ "\n",
+ " # Robust brain center estimation. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " robust: Off\n",
+ "\n",
+ " # Create a skull image\n",
+ " skull: Off\n",
+ "\n",
+ " # Gets additional skull and scalp surfaces by running bet2 and betsurf. This is mutually exclusive with reduce_bias, robust, padding, remove_eyes\n",
+ " surfaces: Off\n",
+ "\n",
+ " # Apply thresholding to segmented brain image and mask\n",
+ " threshold: Off\n",
+ "\n",
+ " # Vertical gradient in fractional intensity threshold (-1,1)\n",
+ " vertical_gradient: 0.0\n",
+ "\n",
+ " UNet:\n",
+ "\n",
+ " # UNet model\n",
+ " unet_model: s3://fcp-indi/resources/cpac/resources/Site-All-T-epoch_36.model\n",
+ "\n",
+ " niworkflows-ants:\n",
+ "\n",
+ " # Template to be used during niworkflows-ants.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " # niworkflows-ants Brain extraction template\n",
+ " template_path: /ants_template/oasis/T_template0.nii.gz\n",
+ "\n",
+ " # niworkflows-ants probability mask\n",
+ " mask_path: /ants_template/oasis/T_template0_BrainCerebellumProbabilityMask.nii.gz\n",
+ "\n",
+ " # niworkflows-ants registration mask (can be optional)\n",
+ " regmask_path: /ants_template/oasis/T_template0_BrainCerebellumRegistrationMask.nii.gz\n",
+ "\n",
+ " run_t2: Off\n",
+ "\n",
+ " # Bias field correction based on square root of T1w * T2w\n",
+ " t1t2_bias_field_correction:\n",
+ " run: Off\n",
+ " BiasFieldSmoothingSigma: 5\n",
+ "\n",
+ " # Non-local means filtering via ANTs DenoiseImage\n",
+ " non_local_means_filtering:\n",
+ "\n",
+ " # this is a fork option\n",
+ " run: [Off]\n",
+ "\n",
+ " # options: 'Gaussian' or 'Rician'\n",
+ " noise_model: Gaussian\n",
+ "\n",
+ " # N4 bias field correction via ANTs\n",
+ " n4_bias_field_correction:\n",
+ "\n",
+ " # this is a fork option\n",
+ " run: [Off]\n",
+ "\n",
+ " # An integer to resample the input image to save computation time. Shrink factors <= 4 are commonly used.\n",
+ " shrink_factor: 2\n",
+ "\n",
+ "segmentation:\n",
+ "\n",
+ " # Automatically segment anatomical images into white matter, gray matter,\n",
+ " # and CSF based on prior probability maps.\n",
+ " run: On\n",
+ " tissue_segmentation:\n",
+ "\n",
+ " # using: ['FSL-FAST', 'Template_Based', 'ANTs_Prior_Based', 'FreeSurfer']\n",
+ " # this is a fork point\n",
+ " using: [FSL-FAST]\n",
+ "\n",
+ " # option parameters\n",
+ " FSL-FAST:\n",
+ " thresholding:\n",
+ "\n",
+ " # thresholding of the tissue segmentation probability maps\n",
+ " # options: 'Auto', 'Custom'\n",
+ " use: Custom\n",
+ " Custom:\n",
+ "\n",
+ " # Set the threshold value for the segmentation probability masks (CSF, White Matter, and Gray Matter)\n",
+ " # The values remaining will become the binary tissue masks.\n",
+ " # A good starting point is 0.95.\n",
+ " # CSF (cerebrospinal fluid) threshold.\n",
+ " CSF_threshold_value: 0.95\n",
+ "\n",
+ " # White matter threshold.\n",
+ " WM_threshold_value: 0.95\n",
+ "\n",
+ " # Gray matter threshold.\n",
+ " GM_threshold_value: 0.95\n",
+ "\n",
+ " use_priors:\n",
+ "\n",
+ " # Use template-space tissue priors to refine the binary tissue masks generated by segmentation.\n",
+ " run: Off\n",
+ "\n",
+ " # Full path to a directory containing binarized prior probability maps.\n",
+ " # These maps are included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard priors.\n",
+ " priors_path: /custom_fsl/data/standard/tissuepriors/2mm\n",
+ "\n",
+ " # Full path to a binarized White Matter prior probability map.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard priors.\n",
+ " WM_path: $priors_path/avg152T1_white_bin.nii.gz\n",
+ "\n",
+ " # Full path to a binarized Gray Matter prior probability map.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard priors.\n",
+ " GM_path: $priors_path/avg152T1_gray_bin.nii.gz\n",
+ "\n",
+ " # Full path to a binarized CSF prior probability map.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard priors.\n",
+ " CSF_path: $priors_path/avg152T1_csf_bin.nii.gz\n",
+ "\n",
+ " Template_Based:\n",
+ "\n",
+ " # These masks should be in the same space of your registration template, e.g. if\n",
+ " # you choose 'EPI Template' , below tissue masks should also be EPI template tissue masks.\n",
+ " #\n",
+ " # Options: ['T1_Template', 'EPI_Template']\n",
+ " template_for_segmentation: []\n",
+ "\n",
+ " # These masks are included as part of the 'Image Resource Files' package available\n",
+ " # on the Install page of the User Guide.\n",
+ " # Full path to a binarized White Matter mask.\n",
+ " WHITE:\n",
+ "\n",
+ " # Full path to a binarized Gray Matter mask.\n",
+ " GRAY:\n",
+ "\n",
+ " # Full path to a binarized CSF mask.\n",
+ " CSF:\n",
+ "\n",
+ " ANTs_Prior_Based:\n",
+ "\n",
+ " # Generate white matter, gray matter, CSF masks based on antsJointLabelFusion\n",
+ " # ANTs Prior-based Segmentation workflow that has shown optimal results for non-human primate data.\n",
+ " # The atlas image assumed to be used in ANTs Prior-based Segmentation.\n",
+ " template_brain_list:\n",
+ "\n",
+ " # The atlas segmentation images.\n",
+ " # For performing ANTs Prior-based segmentation method\n",
+ " # the number of specified segmentations should be identical to the number of atlas brain image sets.\n",
+ " # eg.\n",
+ " # ANTs_prior_seg_template_brain_list :\n",
+ " # - atlas1.nii.gz\n",
+ " # - atlas2.nii.gz\n",
+ " # ANTs_prior_seg_template_segmentation_list:\n",
+ " # - segmentation1.nii.gz\n",
+ " # - segmentation1.nii.gz\n",
+ " template_segmentation_list:\n",
+ "\n",
+ " # Label values corresponding to Gray Matter in multiatlas file\n",
+ " GM_label: []\n",
+ "\n",
+ " # Label values corresponding to White Matter in multiatlas file\n",
+ " WM_label: []\n",
+ "\n",
+ " # Label values corresponding to CSF/GM/WM in atlas file\n",
+ " # It is not necessary to change this values unless your CSF/GM/WM label values are different from Freesurfer Color Lookup Table.\n",
+ " # https://surfer.nmr.mgh.harvard.edu/fswiki/FsTutorial/AnatomicalROI/FreeSurferColorLUT\n",
+ " # Label values corresponding to CSF in multiatlas file\n",
+ " CSF_label: []\n",
+ "\n",
+ " FreeSurfer:\n",
+ "\n",
+ " # Use mri_binarize --erode option to erode segmentation masks\n",
+ " erode:\n",
+ "\n",
+ " # Label values corresponding to CSF in FreeSurfer aseg segmentation file\n",
+ " CSF_label: []\n",
+ "\n",
+ " # Label values corresponding to Gray Matter in FreeSurfer aseg segmentation file\n",
+ " GM_label: []\n",
+ "\n",
+ " # Label values corresponding to White Matter in FreeSurfer aseg segmentation file\n",
+ " WM_label: []\n",
+ "\n",
+ "registration_workflows:\n",
+ " anatomical_registration:\n",
+ " run: On\n",
+ " registration:\n",
+ " FSL-FNIRT:\n",
+ "\n",
+ " # The resolution to which anatomical images should be transformed during registration.\n",
+ " # This is the resolution at which processed anatomical files will be output.\n",
+ " # specifically for monkey pipeline\n",
+ " ref_resolution: 2mm\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is for monkey pipeline specifically.\n",
+ " FNIRT_T1w_brain_template:\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is for monkey pipeline specifically.\n",
+ " FNIRT_T1w_template:\n",
+ "\n",
+ " # Reference mask with 2mm resolution to be used during FNIRT-based brain extraction in ABCD-options pipeline.\n",
+ " ref_mask_res-2:\n",
+ "\n",
+ " # Template with 2mm resolution to be used during FNIRT-based brain extraction in ABCD-options pipeline.\n",
+ " T1w_template_res-2:\n",
+ "\n",
+ " # Configuration file to be used by FSL to set FNIRT parameters.\n",
+ " # It is not necessary to change this path unless you intend to use custom FNIRT parameters or a non-standard template.\n",
+ " fnirt_config: T1_2_MNI152_2mm\n",
+ "\n",
+ " # Reference mask for FSL registration.\n",
+ " ref_mask:\n",
+ "\n",
+ " # Interpolation method for writing out transformed anatomical images.\n",
+ " # Possible values: trilinear, sinc, spline\n",
+ " interpolation: sinc\n",
+ "\n",
+ " # Identity matrix used during FSL-based resampling of anatomical-space data throughout the pipeline.\n",
+ " # It is not necessary to change this path unless you intend to use a different template.\n",
+ " identity_matrix: /Users/jon.clucas/fsl/etc/flirtsch/ident.mat\n",
+ "\n",
+ " # using: ['ANTS', 'FSL', 'FSL-linear']\n",
+ " # this is a fork point\n",
+ " # selecting both ['ANTS', 'FSL'] will run both and fork the pipeline\n",
+ " using: [ANTS]\n",
+ "\n",
+ " # option parameters\n",
+ " ANTs:\n",
+ "\n",
+ " # If a lesion mask is available for a T1w image, use it to improve the ANTs' registration\n",
+ " # ANTS registration only.\n",
+ " use_lesion_mask: Off\n",
+ "\n",
+ " # ANTs parameters for T1-template-based registration\n",
+ " T1_registration:\n",
+ " - collapse-output-transforms: 1\n",
+ " - dimensionality: 3\n",
+ " - initial-moving-transform:\n",
+ " initializationFeature: 0\n",
+ " - transforms:\n",
+ " - Rigid:\n",
+ " convergence:\n",
+ " convergenceThreshold: 1e-06\n",
+ " convergenceWindowSize: 20\n",
+ " iteration: 100x100\n",
+ " gradientStep: 0.05\n",
+ " metric:\n",
+ " metricWeight: 1\n",
+ " numberOfBins: 32\n",
+ " samplingPercentage: 0.25\n",
+ " samplingStrategy: Regular\n",
+ " type: MI\n",
+ " shrink-factors: 2x1\n",
+ " smoothing-sigmas: 2.0x1.0vox\n",
+ " use-histogram-matching: On\n",
+ " - Affine:\n",
+ " convergence:\n",
+ " convergenceThreshold: 1e-06\n",
+ " convergenceWindowSize: 20\n",
+ " iteration: 100x100\n",
+ " gradientStep: 0.08\n",
+ " metric:\n",
+ " metricWeight: 1\n",
+ " numberOfBins: 32\n",
+ " samplingPercentage: 0.25\n",
+ " samplingStrategy: Regular\n",
+ " type: MI\n",
+ " shrink-factors: 2x1\n",
+ " smoothing-sigmas: 1.0x0.0vox\n",
+ " use-histogram-matching: On\n",
+ " - SyN:\n",
+ " convergence:\n",
+ " convergenceThreshold: 1e-06\n",
+ " convergenceWindowSize: 10\n",
+ " iteration: 100x70x50x20\n",
+ " gradientStep: 0.1\n",
+ " metric:\n",
+ " metricWeight: 1\n",
+ " radius: 4\n",
+ " type: CC\n",
+ " shrink-factors: 8x4x2x1\n",
+ " smoothing-sigmas: 3.0x2.0x1.0x0.0vox\n",
+ " totalFieldVarianceInVoxelSpace: 0.0\n",
+ " updateFieldVarianceInVoxelSpace: 3.0\n",
+ " use-histogram-matching: On\n",
+ " winsorize-image-intensities:\n",
+ " lowerQuantile: 0.005\n",
+ " upperQuantile: 0.995\n",
+ "\n",
+ " # Interpolation method for writing out transformed anatomical images.\n",
+ " # Possible values: Linear, BSpline, LanczosWindowedSinc\n",
+ " interpolation: LanczosWindowedSinc\n",
+ "\n",
+ " overwrite_transform:\n",
+ " run: Off\n",
+ "\n",
+ " # Choose the tool to overwrite transform, currently only support 'FSL' to overwrite 'ANTs' transforms in ABCD-options pipeline.\n",
+ " # using: 'FSL'\n",
+ " using: FSL\n",
+ "\n",
+ " # The resolution to which anatomical images should be transformed during registration.\n",
+ " # This is the resolution at which processed anatomical files will be output.\n",
+ " resolution_for_anat: 1mm\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " T1w_brain_template: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_desc-brain_T1w.nii.gz\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " T1w_template: /code/CPAC/resources/templates/mni_icbm152_t1_tal_nlin_asym_09c.nii\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " T1w_brain_template_mask: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_desc-brain_mask.nii.gz\n",
+ "\n",
+ " # Register skull-on anatomical image to a template.\n",
+ " reg_with_skull: Off\n",
+ "\n",
+ " functional_registration:\n",
+ " coregistration:\n",
+ "\n",
+ " # functional (BOLD/EPI) registration to anatomical (structural/T1)\n",
+ " run: On\n",
+ " func_input_prep:\n",
+ "\n",
+ " # Choose whether to use functional brain or skull as the input to functional-to-anatomical registration\n",
+ " reg_with_skull: Off\n",
+ "\n",
+ " # Choose whether to use the mean of the functional/EPI as the input to functional-to-anatomical registration or one of the volumes from the functional 4D timeseries that you choose.\n",
+ " # input: ['Mean_Functional', 'Selected_Functional_Volume', 'fmriprep_reference']\n",
+ " input: [fmriprep_reference]\n",
+ " Mean Functional:\n",
+ "\n",
+ " # Run ANTs’ N4 Bias Field Correction on the input BOLD (EPI)\n",
+ " # this can increase tissue contrast which may improve registration quality in some data\n",
+ " n4_correct_func: Off\n",
+ "\n",
+ " Selected Functional Volume:\n",
+ "\n",
+ " # Only for when 'Use as Functional-to-Anatomical Registration Input' is set to 'Selected Functional Volume'.\n",
+ " #Input the index of which volume from the functional 4D timeseries input file you wish to use as the input for functional-to-anatomical registration.\n",
+ " func_reg_input_volume: 0\n",
+ "\n",
+ " boundary_based_registration:\n",
+ "\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [On]\n",
+ "\n",
+ " # reference for boundary based registration\n",
+ " # options: 'whole-head' or 'brain'\n",
+ " reference: brain\n",
+ "\n",
+ " # choose which FAST map to generate BBR WM mask\n",
+ " # options: 'probability_map', 'partial_volume_map'\n",
+ " bbr_wm_map: partial_volume_map\n",
+ "\n",
+ " # optional FAST arguments to generate BBR WM mask\n",
+ " bbr_wm_mask_args: -bin\n",
+ "\n",
+ " # Standard FSL 5.0 Scheduler used for Boundary Based Registration.\n",
+ " # It is not necessary to change this path unless you intend to use non-standard MNI registration.\n",
+ " bbr_schedule: /Users/jon.clucas/fsl/etc/flirtsch/bbr.sch\n",
+ "\n",
+ " # reference: 'brain' or 'restore-brain'\n",
+ " # In ABCD-options pipeline, 'restore-brain' is used as coregistration reference\n",
+ " reference: brain\n",
+ "\n",
+ " # Choose FSL or ABCD as coregistration method\n",
+ " using: FSL\n",
+ "\n",
+ " # Choose brain or whole-head as coregistration input\n",
+ " input: brain\n",
+ "\n",
+ " # Choose coregistration interpolation\n",
+ " interpolation: trilinear\n",
+ "\n",
+ " # Choose coregistration cost function\n",
+ " cost: corratio\n",
+ "\n",
+ " # Choose coregistration degree of freedom\n",
+ " dof: 6\n",
+ "\n",
+ " # Extra arguments for FSL flirt\n",
+ " arguments:\n",
+ "\n",
+ " func_registration_to_template:\n",
+ "\n",
+ " # these options modify the application (to the functional data), not the calculation, of the\n",
+ " # T1-to-template and EPI-to-template transforms calculated earlier during registration\n",
+ " # apply the functional-to-template (T1 template) registration transform to the functional data\n",
+ " run: On\n",
+ "\n",
+ " # apply the functional-to-template (EPI template) registration transform to the functional data\n",
+ " run_EPI: Off\n",
+ " apply_transform:\n",
+ "\n",
+ " # options: 'default', 'abcd', 'single_step_resampling_from_stc', 'dcan_nhp'\n",
+ " # 'default': apply func-to-anat and anat-to-template transforms on motion corrected functional image.\n",
+ " # 'abcd': apply motion correction, func-to-anat and anat-to-template transforms on each of raw functional volume using FSL applywarp based on ABCD-HCP pipeline.\n",
+ " # 'single_step_resampling_from_stc': apply motion correction, func-to-anat and anat-to-template transforms on each of slice-time-corrected functional volume using ANTs antsApplyTransform based on fMRIPrep pipeline.\n",
+ " # - if 'single_step_resampling_from_stc', 'template' is the only valid option for ``nuisance_corrections: 2-nuisance_regression: space``\n",
+ " using: single_step_resampling_from_stc\n",
+ "\n",
+ " output_resolution:\n",
+ "\n",
+ " # The resolution (in mm) to which the preprocessed, registered functional timeseries outputs are written into.\n",
+ " # NOTE:\n",
+ " # selecting a 1 mm or 2 mm resolution might substantially increase your RAM needs- these resolutions should be selected with caution.\n",
+ " # for most cases, 3 mm or 4 mm resolutions are suggested.\n",
+ " # NOTE:\n",
+ " # this also includes the single-volume 3D preprocessed functional data,\n",
+ " # such as the mean functional (mean EPI) in template space\n",
+ " func_preproc_outputs: 3.438mmx3.438mmx3.4mm\n",
+ "\n",
+ " # The resolution (in mm) to which the registered derivative outputs are written into.\n",
+ " # NOTE:\n",
+ " # this is for the single-volume functional-space outputs (i.e. derivatives)\n",
+ " # thus, a higher resolution may not result in a large increase in RAM needs as above\n",
+ " func_derivative_outputs: 3.438mmx3.438mmx3.4mm\n",
+ "\n",
+ " target_template:\n",
+ "\n",
+ " # choose which template space to transform derivatives towards\n",
+ " # using: ['T1_template', 'EPI_template']\n",
+ " # this is a fork point\n",
+ " # NOTE:\n",
+ " # this will determine which registration transform to use to warp the functional\n",
+ " # outputs and derivatives to template space\n",
+ " using: [T1_template]\n",
+ " T1_template:\n",
+ "\n",
+ " # Standard Skull Stripped Template. Used as a reference image for functional registration.\n",
+ " # This can be different than the template used as the reference/fixed for T1-to-template registration.\n",
+ " T1w_brain_template_funcreg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_T1w_reference.nii.gz\n",
+ "\n",
+ " # Standard Anatomical Brain Image with Skull.\n",
+ " # This can be different than the template used as the reference/fixed for T1-to-template registration.\n",
+ " T1w_template_funcreg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_T1w_reference.nii.gz\n",
+ "\n",
+ " # Template to be used during registration.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard template.\n",
+ " T1w_brain_template_mask_funcreg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_desc-brain_mask.nii.gz\n",
+ "\n",
+ " # a standard template for resampling if using float resolution\n",
+ " T1w_template_for_resample: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_desc-brain_T1w.nii.gz\n",
+ "\n",
+ " EPI_template:\n",
+ "\n",
+ " # EPI template for direct functional-to-template registration\n",
+ " # (bypassing coregistration and the anatomical-to-template transforms)\n",
+ " EPI_template_funcreg:\n",
+ "\n",
+ " # EPI template mask.\n",
+ " EPI_template_mask_funcreg:\n",
+ "\n",
+ " # a standard template for resampling if using float resolution\n",
+ " EPI_template_for_resample:\n",
+ "\n",
+ " ANTs_pipelines:\n",
+ "\n",
+ " # Interpolation method for writing out transformed functional images.\n",
+ " # Possible values: Linear, BSpline, LanczosWindowedSinc\n",
+ " interpolation: LanczosWindowedSinc\n",
+ "\n",
+ " FNIRT_pipelines:\n",
+ "\n",
+ " # Interpolation method for writing out transformed functional images.\n",
+ " # Possible values: trilinear, sinc, spline\n",
+ " interpolation: sinc\n",
+ "\n",
+ " # Identity matrix used during FSL-based resampling of functional-space data throughout the pipeline.\n",
+ " # It is not necessary to change this path unless you intend to use a different template.\n",
+ " identity_matrix: /Users/jon.clucas/fsl/etc/flirtsch/ident.mat\n",
+ "\n",
+ " EPI_registration:\n",
+ "\n",
+ " # directly register the mean functional to an EPI template\n",
+ " # instead of applying the anatomical T1-to-template transform to the functional data that has been\n",
+ " # coregistered to anatomical/T1 space\n",
+ " run: Off\n",
+ "\n",
+ " # using: ['ANTS', 'FSL', 'FSL-linear']\n",
+ " # this is a fork point\n",
+ " # ex. selecting both ['ANTS', 'FSL'] will run both and fork the pipeline\n",
+ " using: [ANTS]\n",
+ "\n",
+ " # EPI template for direct functional-to-template registration\n",
+ " # (bypassing coregistration and the anatomical-to-template transforms)\n",
+ " EPI_template: s3://fcp-indi/resources/cpac/resources/epi_hbn.nii.gz\n",
+ "\n",
+ " # EPI template mask.\n",
+ " EPI_template_mask:\n",
+ " ANTs:\n",
+ "\n",
+ " # EPI registration configuration - synonymous with T1_registration\n",
+ " # parameters under anatomical registration above\n",
+ " parameters:\n",
+ "\n",
+ " # Interpolation method for writing out transformed EPI images.\n",
+ " # Possible values: Linear, BSpline, LanczosWindowedSinc\n",
+ " interpolation: LanczosWindowedSinc\n",
+ "\n",
+ " FSL-FNIRT:\n",
+ "\n",
+ " # Configuration file to be used by FSL to set FNIRT parameters.\n",
+ " # It is not necessary to change this path unless you intend to use custom FNIRT parameters or a non-standard template.\n",
+ " fnirt_config: T1_2_MNI152_2mm\n",
+ "\n",
+ " # Interpolation method for writing out transformed EPI images.\n",
+ " # Possible values: trilinear, sinc, spline\n",
+ " interpolation: sinc\n",
+ "\n",
+ " # Identity matrix used during FSL-based resampling of BOLD-space data throughout the pipeline.\n",
+ " # It is not necessary to change this path unless you intend to use a different template.\n",
+ " identity_matrix: /Users/jon.clucas/fsl/etc/flirtsch/ident.mat\n",
+ "\n",
+ "functional_preproc:\n",
+ " run: On\n",
+ " update_header:\n",
+ "\n",
+ " # Convert raw data from LPI to RPI\n",
+ " run: On\n",
+ "\n",
+ " slice_timing_correction:\n",
+ "\n",
+ " # Interpolate voxel time courses so they are sampled at the same time points.\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [On]\n",
+ "\n",
+ " # use specified slice time pattern rather than one in header\n",
+ " tpattern:\n",
+ "\n",
+ " # align each slice to given time offset\n",
+ " # The default alignment time is the average of the 'tpattern' values (either from the dataset header or from the tpattern option).\n",
+ " tzero:\n",
+ "\n",
+ " motion_estimates_and_correction:\n",
+ " run: On\n",
+ " motion_estimates:\n",
+ "\n",
+ " # calculate motion statistics BEFORE slice-timing correction\n",
+ " calculate_motion_first: On\n",
+ "\n",
+ " # calculate motion statistics AFTER motion correction\n",
+ " calculate_motion_after: On\n",
+ "\n",
+ " motion_correction:\n",
+ "\n",
+ " # using: ['3dvolreg', 'mcflirt']\n",
+ " # Forking is currently broken for this option.\n",
+ " # Please use separate configs if you want to use each of 3dvolreg and mcflirt.\n",
+ " # Follow https://github.com/FCP-INDI/C-PAC/issues/1935 to see when this issue is resolved.\n",
+ " using: [mcflirt]\n",
+ "\n",
+ " # option parameters\n",
+ " AFNI-3dvolreg:\n",
+ "\n",
+ " # This option is useful when aligning high-resolution datasets that may need more alignment than a few voxels.\n",
+ " functional_volreg_twopass: On\n",
+ "\n",
+ " # Choose motion correction reference. Options: mean, median, selected_volume, fmriprep_reference\n",
+ " motion_correction_reference: [fmriprep_reference]\n",
+ "\n",
+ " # Choose motion correction reference volume\n",
+ " motion_correction_reference_volume: 0\n",
+ "\n",
+ " motion_estimate_filter:\n",
+ "\n",
+ " # Filter physiological (respiration) artifacts from the head motion estimates.\n",
+ " # Adapted from DCAN Labs filter.\n",
+ " # https://www.ohsu.edu/school-of-medicine/developmental-cognition-and-neuroimaging-lab\n",
+ " # https://www.biorxiv.org/content/10.1101/337360v1.full.pdf\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [Off]\n",
+ " filters: []\n",
+ "\n",
+ " distortion_correction:\n",
+ "\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [On]\n",
+ " Blip-FSL-TOPUP:\n",
+ "\n",
+ " # (approximate) resolution (in mm) of warp basis for the different sub-sampling levels, default 10\n",
+ " warpres: 10\n",
+ "\n",
+ " # sub-sampling scheme, default 1\n",
+ " subsamp: 1\n",
+ "\n",
+ " # FWHM (in mm) of gaussian smoothing kernel, default 8\n",
+ " fwhm: 8\n",
+ "\n",
+ " # Max # of non-linear iterations, default 5\n",
+ " miter: 5\n",
+ "\n",
+ " # Weight of regularisation, default depending on --ssqlambda and --regmod switches. See user documentation.\n",
+ " lambda: 1\n",
+ "\n",
+ " # If set (=1), lambda is weighted by current ssq, default 1\n",
+ " ssqlambda: 1\n",
+ "\n",
+ " # Model for regularisation of warp-field [membrane_energy bending_energy], default bending_energy\n",
+ " regmod: bending_energy\n",
+ "\n",
+ " # Estimate movements if set, default 1 (true)\n",
+ " estmov: 1\n",
+ "\n",
+ " # Minimisation method 0=Levenberg-Marquardt, 1=Scaled Conjugate Gradient, default 0 (LM)\n",
+ " minmet: 0\n",
+ "\n",
+ " # Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3\n",
+ " splineorder: 3\n",
+ "\n",
+ " # Precision for representing Hessian, double or float. Default double\n",
+ " numprec: double\n",
+ "\n",
+ " # Image interpolation model, linear or spline. Default spline\n",
+ " interp: spline\n",
+ "\n",
+ " # If set (=1), the images are individually scaled to a common mean, default 0 (false)\n",
+ " scale: 0\n",
+ "\n",
+ " # If set (=1), the calculations are done in a different grid, default 1 (true)\n",
+ " regrid: 1\n",
+ "\n",
+ " # using: ['PhaseDiff', 'Blip', 'Blip-FSL-TOPUP']\n",
+ " # PhaseDiff - Perform field map correction using a single phase difference image, a subtraction of the two phase images from each echo. Default scanner for this method is SIEMENS.\n",
+ " # Blip - Uses AFNI 3dQWarp to calculate the distortion unwarp for EPI field maps of opposite/same phase encoding direction.\n",
+ " # Blip-FSL-TOPUP - Uses FSL TOPUP to calculate the distortion unwarp for EPI field maps of opposite/same phase encoding direction.\n",
+ " using: [PhaseDiff, Blip]\n",
+ "\n",
+ " # option parameters\n",
+ " PhaseDiff:\n",
+ "\n",
+ " # Since the quality of the distortion heavily relies on the skull-stripping step, we provide a choice of method ('AFNI' for AFNI 3dSkullStrip or 'BET' for FSL BET).\n",
+ " # Options: 'BET' or 'AFNI'\n",
+ " fmap_skullstrip_option: BET\n",
+ "\n",
+ " # Set the fraction value for the skull-stripping of the magnitude file. Depending on the data, a tighter extraction may be necessary in order to prevent noisy voxels from interfering with preparing the field map.\n",
+ " # The default value is 0.5.\n",
+ " fmap_skullstrip_BET_frac: 0.5\n",
+ "\n",
+ " # Set the threshold value for the skull-stripping of the magnitude file. Depending on the data, a tighter extraction may be necessary in order to prevent noisy voxels from interfering with preparing the field map.\n",
+ " # The default value is 0.6.\n",
+ " fmap_skullstrip_AFNI_threshold: 0.6\n",
+ "\n",
+ " func_masking:\n",
+ " run: On\n",
+ " FSL-BET:\n",
+ "\n",
+ " # Set an intensity threshold to improve skull stripping performances of FSL BET on rodent scans.\n",
+ " functional_mean_thr:\n",
+ " run: Off\n",
+ " threshold_value: 98\n",
+ "\n",
+ " # Bias correct the functional mean image to improve skull stripping performances of FSL BET on rodent scans\n",
+ " functional_mean_bias_correction: Off\n",
+ "\n",
+ " # Apply to 4D FMRI data, if bold_bet_functional_mean_boolean : Off.\n",
+ " # Mutually exclusive with functional, reduce_bias, robust, padding, remove_eyes, surfaces\n",
+ " # It must be 'on' if select 'reduce_bias', 'robust', 'padding', 'remove_eyes', or 'bet_surfaces' on\n",
+ " functional_mean_boolean: Off\n",
+ "\n",
+ " # Set the threshold value controling the brain vs non-brain voxels.\n",
+ " frac: 0.3\n",
+ "\n",
+ " # Mesh created along with skull stripping\n",
+ " mesh_boolean: Off\n",
+ "\n",
+ " # Create a surface outline image\n",
+ " outline: Off\n",
+ "\n",
+ " # Add padding to the end of the image, improving BET.Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " padding: Off\n",
+ "\n",
+ " # Integer value of head radius\n",
+ " radius: 0\n",
+ "\n",
+ " # Reduce bias and cleanup neck. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " reduce_bias: Off\n",
+ "\n",
+ " # Eyes and optic nerve cleanup. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " remove_eyes: Off\n",
+ "\n",
+ " # Robust brain center estimation. Mutually exclusive with functional,reduce_bias,robust,padding,remove_eyes,surfaces\n",
+ " robust: Off\n",
+ "\n",
+ " # Create a skull image\n",
+ " skull: Off\n",
+ "\n",
+ " # Gets additional skull and scalp surfaces by running bet2 and betsurf. This is mutually exclusive with reduce_bias, robust, padding, remove_eyes\n",
+ " surfaces: Off\n",
+ "\n",
+ " # Apply thresholding to segmented brain image and mask\n",
+ " threshold: Off\n",
+ "\n",
+ " # Vertical gradient in fractional intensity threshold (-1,1)\n",
+ " vertical_gradient: 0.0\n",
+ "\n",
+ " FSL_AFNI:\n",
+ " bold_ref: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_desc-fMRIPrep_boldref.nii.gz\n",
+ " brain_mask: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-02_desc-brain_mask.nii.gz\n",
+ " brain_probseg: /code/CPAC/resources/templates/tpl-MNI152NLin2009cAsym_res-01_label-brain_probseg.nii.gz\n",
+ "\n",
+ " # Apply functional mask in native space\n",
+ " apply_func_mask_in_native_space: On\n",
+ "\n",
+ " # using: ['AFNI', 'FSL', 'FSL_AFNI', 'Anatomical_Refined', 'Anatomical_Based', 'Anatomical_Resampled', 'CCS_Anatomical_Refined']\n",
+ " # FSL_AFNI: fMRIPrep-style BOLD mask. Ref: https://github.com/nipreps/niworkflows/blob/a221f612/niworkflows/func/util.py#L246-L514\n",
+ " # Anatomical_Refined: 1. binarize anat mask, in case it is not a binary mask. 2. fill holes of anat mask 3. init_bold_mask : input raw func → dilate init func brain mask 4. refined_bold_mask : input motion corrected func → dilate anatomical mask 5. get final func mask\n",
+ " # Anatomical_Based: Generate the BOLD mask by basing it off of the anatomical brain mask. Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline.\n",
+ " # Anatomical_Resampled: Resample anatomical brain mask in standard space to get BOLD brain mask in standard space. Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline. (\"Create fMRI resolution standard space files for T1w image, wmparc, and brain mask […] don't use FLIRT to do spline interpolation with -applyisoxfm for the 2mm and 1mm cases because it doesn't know the peculiarities of the MNI template FOVs\")\n",
+ " # CCS_Anatomical_Refined: Generate the BOLD mask by basing it off of the anatomical brain. Adapted from the BOLD mask method from the CCS pipeline.\n",
+ " # this is a fork point\n",
+ " using: [FSL_AFNI]\n",
+ " Anatomical_Refined:\n",
+ "\n",
+ " # Choose whether or not to dilate the anatomical mask if you choose 'Anatomical_Refined' as the functional masking option. It will dilate one voxel if enabled.\n",
+ " anatomical_mask_dilation: Off\n",
+ "\n",
+ " generate_func_mean:\n",
+ "\n",
+ " # Generate mean functional image\n",
+ " run: On\n",
+ "\n",
+ " normalize_func:\n",
+ "\n",
+ " # Normalize functional image\n",
+ " run: Off\n",
+ "\n",
+ " truncation:\n",
+ "\n",
+ " # First timepoint to include in analysis.\n",
+ " # Default is 0 (beginning of timeseries).\n",
+ " # First timepoint selection in the scan parameters in the data configuration file, if present, will over-ride this selection.\n",
+ " # Note: the selection here applies to all scans of all participants.\n",
+ " start_tr: 0\n",
+ "\n",
+ " # Last timepoint to include in analysis.\n",
+ " # Default is None or End (end of timeseries).\n",
+ " # Last timepoint selection in the scan parameters in the data configuration file, if present, will over-ride this selection.\n",
+ " # Note: the selection here applies to all scans of all participants.\n",
+ " stop_tr:\n",
+ "\n",
+ " scaling:\n",
+ "\n",
+ " # Scale functional raw data, usually used in rodent pipeline\n",
+ " run: Off\n",
+ "\n",
+ " # Scale the size of the dataset voxels by the factor.\n",
+ " scaling_factor: 10\n",
+ "\n",
+ " despiking:\n",
+ "\n",
+ " # Run AFNI 3dDespike\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [Off]\n",
+ " space: native\n",
+ "\n",
+ " coreg_prep:\n",
+ "\n",
+ " # Generate sbref\n",
+ " run: On\n",
+ "\n",
+ "nuisance_corrections:\n",
+ " 2-nuisance_regression:\n",
+ "\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [Off]\n",
+ "\n",
+ " # Select which nuisance signal corrections to apply\n",
+ " Regressors:\n",
+ " - Name: Regressor_1\n",
+ " Bandpass:\n",
+ " bottom_frequency: 0.01\n",
+ " top_frequency: 0.1\n",
+ " CerebrospinalFluid:\n",
+ " erode_mask: Off\n",
+ " extraction_resolution: 2\n",
+ " summary: Mean\n",
+ " GlobalSignal:\n",
+ " summary: Mean\n",
+ " Motion:\n",
+ " include_delayed: On\n",
+ " include_delayed_squared: On\n",
+ " include_squared: On\n",
+ " PolyOrt:\n",
+ " degree: 2\n",
+ " WhiteMatter:\n",
+ " erode_mask: Off\n",
+ " extraction_resolution: 2\n",
+ " summary: Mean\n",
+ " aCompCor:\n",
+ " extraction_resolution: 2\n",
+ " summary:\n",
+ " components: 5\n",
+ " filter: cosine\n",
+ " method: PC\n",
+ " tissues:\n",
+ " - WhiteMatter\n",
+ " - CerebrospinalFluid\n",
+ " tCompCor:\n",
+ " degree: 2\n",
+ " erode_mask_mm: On\n",
+ " summary:\n",
+ " components: 5\n",
+ " filter: cosine\n",
+ " method: PC\n",
+ " threshold: 5PCT\n",
+ "\n",
+ " # Process and refine masks used to produce regressors and time series for\n",
+ " # regression.\n",
+ " regressor_masks:\n",
+ " erode_anatomical_brain_mask:\n",
+ "\n",
+ " # Erode brain mask in millimeters, default for brain mask is 30 mm\n",
+ " # Brain erosion default is using millimeters.\n",
+ " brain_mask_erosion_mm: 30\n",
+ "\n",
+ " # Erode binarized anatomical brain mask. If choosing True, please also set regressor_masks['erode_csf']['run']: True; anatomical_preproc['brain_extraction']['using']: niworkflows-ants.\n",
+ " run: On\n",
+ "\n",
+ " # Target volume ratio, if using erosion.\n",
+ " # Default proportion is None for anatomical brain mask.\n",
+ " # If using erosion, using both proportion and millimeters is not recommended.\n",
+ " brain_mask_erosion_prop:\n",
+ "\n",
+ " # Erode binarized brain mask in millimeter\n",
+ " brain_erosion_mm:\n",
+ "\n",
+ " erode_csf:\n",
+ "\n",
+ " # Erode cerebrospinal fluid mask in millimeters, default for cerebrospinal fluid is 30mm\n",
+ " # Cerebrospinal fluid erosion default is using millimeters.\n",
+ " csf_mask_erosion_mm: 30\n",
+ "\n",
+ " # Erode binarized csf tissue mask.\n",
+ " run: On\n",
+ "\n",
+ " # Target volume ratio, if using erosion.\n",
+ " # Default proportion is None for cerebrospinal fluid mask.\n",
+ " # If using erosion, using both proportion and millimeters is not recommended.\n",
+ " csf_erosion_prop:\n",
+ "\n",
+ " # Erode binarized cerebrospinal fluid mask in millimeter\n",
+ " csf_erosion_mm:\n",
+ "\n",
+ " erode_wm:\n",
+ "\n",
+ " # Target volume ratio, if using erosion.\n",
+ " # Default proportion is 0.6 for white matter mask.\n",
+ " # If using erosion, using both proportion and millimeters is not recommended.\n",
+ " # White matter erosion default is using proportion erosion method when use erosion for white matter.\n",
+ " wm_erosion_prop: 0.6\n",
+ "\n",
+ " # Erode WM binarized tissue mask.\n",
+ " run: On\n",
+ "\n",
+ " # Erode white matter mask in millimeters, default for white matter is None\n",
+ " wm_mask_erosion_mm:\n",
+ "\n",
+ " # Erode binarized white matter mask in millimeters\n",
+ " wm_erosion_mm:\n",
+ "\n",
+ " erode_gm:\n",
+ "\n",
+ " # Target volume ratio, if using erosion.\n",
+ " # If using erosion, using both proportion and millimeters is not recommended.\n",
+ " gm_erosion_prop: 0.6\n",
+ "\n",
+ " # Erode gray matter binarized tissue mask.\n",
+ " run: Off\n",
+ "\n",
+ " # Erode gray matter mask in millimeters\n",
+ " gm_mask_erosion_mm:\n",
+ "\n",
+ " # Erode binarized gray matter mask in millimeters\n",
+ " gm_erosion_mm:\n",
+ "\n",
+ " # this is not a fork point\n",
+ " # Run nuisance regression in native or template space\n",
+ " # - If set to template, will use the brain mask configured in\n",
+ " # ``functional_preproc: func_masking: FSL_AFNI: brain_mask``\n",
+ " # - If ``registration_workflows: functional_registration: func_registration_to_template: apply_trasnform: using: single_step_resampling_from_stc``, this must be set to template\n",
+ " space: template\n",
+ " ingress_regressors:\n",
+ " run: Off\n",
+ " Regressors:\n",
+ " Name: default\n",
+ " Columns: [global_signal]\n",
+ "\n",
+ " # switch to Off if nuisance regression is off and you don't want to write out the regressors\n",
+ " create_regressors: On\n",
+ "\n",
+ " # Standard Lateral Ventricles Binary Mask\n",
+ " # used in CSF mask refinement for CSF signal-related regressions\n",
+ " lateral_ventricles_mask:\n",
+ "\n",
+ " # Whether to run frequency filtering before or after nuisance regression.\n",
+ " # Options: 'After' or 'Before'\n",
+ " bandpass_filtering_order: After\n",
+ "\n",
+ " 1-ICA-AROMA:\n",
+ "\n",
+ " # this is a fork point\n",
+ " # run: [On, Off] - this will run both and fork the pipeline\n",
+ " run: [Off]\n",
+ "\n",
+ " # Types of denoising strategy:\n",
+ " # nonaggr: nonaggressive-partial component regression\n",
+ " # aggr: aggressive denoising\n",
+ " denoising_type: nonaggr\n",
+ "\n",
+ "timeseries_extraction:\n",
+ " run: Off\n",
+ " connectivity_matrix:\n",
+ "\n",
+ " # Create a connectivity matrix from timeseries data\n",
+ " # Options:\n",
+ " # ['AFNI', 'Nilearn', 'ndmg']\n",
+ " using: [Nilearn, ndmg]\n",
+ "\n",
+ " # Options:\n",
+ " # ['Pearson', 'Partial']\n",
+ " # Note: These options are not configurable for ndmg, which will ignore these options\n",
+ " measure: [Pearson, Partial]\n",
+ "\n",
+ " # Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for time-series extraction, and then select which types of analyses to run.\n",
+ " # Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and SpatialReg, you would enter: '/path/to/ROI.nii.gz': Avg, SpatialReg\n",
+ " # available analyses:\n",
+ " # /path/to/atlas.nii.gz: Avg, Voxel, SpatialReg\n",
+ " tse_roi_paths:\n",
+ " /cpac_templates/CC400.nii.gz: Avg\n",
+ " /cpac_templates/aal_mask_pad.nii.gz: Avg\n",
+ " /cpac_templates/CC200.nii.gz: Avg\n",
+ " /cpac_templates/tt_mask_pad.nii.gz: Avg\n",
+ " /cpac_templates/PNAS_Smith09_rsn10.nii.gz: SpatialReg\n",
+ " /cpac_templates/ho_mask_pad.nii.gz: Avg\n",
+ " /cpac_templates/rois_3mm.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/AAL_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/CAPRSC_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/DKT_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/DesikanKlein_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/HarvardOxfordcort-maxprob-thr25_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/HarvardOxfordsub-maxprob-thr25_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Juelich_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/MICCAI_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /cpac_templates/Schaefer2018_space-FSLMNI152_res-2mm_desc-1000Parcels17NetworksOrder.nii.gz: Avg\n",
+ " /cpac_templates/Schaefer2018_space-FSLMNI152_res-2mm_desc-200Parcels17NetworksOrder.nii.gz: Avg\n",
+ " /cpac_templates/Schaefer2018_space-FSLMNI152_res-2mm_desc-300Parcels17NetworksOrder.nii.gz: Avg\n",
+ " /cpac_templates/Schaefer2018_space-FSLMNI152_res-2mm_desc-400Parcels17NetworksOrder.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Talairach_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Brodmann_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Desikan_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Glasser_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Slab907_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Yeo-17-liberal_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Yeo-17_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Yeo-7-liberal_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ " /ndmg_atlases/label/Human/Yeo-7_space-MNI152NLin6_res-1x1x1.nii.gz: Avg\n",
+ "\n",
+ " # Functional time-series and ROI realignment method: ['ROI_to_func'] or ['func_to_ROI']\n",
+ " # 'ROI_to_func' will realign the atlas/ROI to functional space (fast)\n",
+ " # 'func_to_ROI' will realign the functional time series to the atlas/ROI space\n",
+ " #\n",
+ " # NOTE: in rare cases, realigning the ROI to the functional space may\n",
+ " # result in small misalignments for very small ROIs - please double\n",
+ " # check your data if you see issues\n",
+ " realignment: ROI_to_func\n",
+ "\n",
+ "amplitude_low_frequency_fluctuation:\n",
+ "\n",
+ " # ALFF & f/ALFF\n",
+ " # Calculate Amplitude of Low Frequency Fluctuations (ALFF) and fractional ALFF (f/ALFF) for all voxels.\n",
+ " run: Off\n",
+ "\n",
+ " # space: Template or Native\n",
+ " target_space: [Native]\n",
+ "\n",
+ " # Frequency cutoff (in Hz) for the high-pass filter used when calculating f/ALFF.\n",
+ " highpass_cutoff: [0.01]\n",
+ "\n",
+ " # Frequency cutoff (in Hz) for the low-pass filter used when calculating f/ALFF\n",
+ " lowpass_cutoff: [0.1]\n",
+ "\n",
+ "regional_homogeneity:\n",
+ "\n",
+ " # ReHo\n",
+ " # Calculate Regional Homogeneity (ReHo) for all voxels.\n",
+ " run: Off\n",
+ "\n",
+ " # space: Template or Native\n",
+ " target_space: [Native]\n",
+ "\n",
+ " # Number of neighboring voxels used when calculating ReHo\n",
+ " # 7 (Faces)\n",
+ " # 19 (Faces + Edges)\n",
+ " # 27 (Faces + Edges + Corners)\n",
+ " cluster_size: 27\n",
+ "\n",
+ "voxel_mirrored_homotopic_connectivity:\n",
+ "\n",
+ " # VMHC\n",
+ " # Calculate Voxel-mirrored Homotopic Connectivity (VMHC) for all voxels.\n",
+ " run: Off\n",
+ " symmetric_registration:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " T1w_brain_template_symmetric:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " T1w_brain_template_symmetric_funcreg: /Users/jon.clucas/fsl/data/standard/MNI152_T1_${func_resolution}_brain_symmetric.nii.gz\n",
+ "\n",
+ " # A reference symmetric brain template for resampling\n",
+ " T1w_brain_template_symmetric_for_resample:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " T1w_template_symmetric:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " T1w_template_symmetric_funcreg: /Users/jon.clucas/fsl/data/standard/MNI152_T1_${func_resolution}_symmetric.nii.gz\n",
+ "\n",
+ " # A reference symmetric skull template for resampling\n",
+ " T1w_template_symmetric_for_resample:\n",
+ "\n",
+ " # Included as part of the 'Image Resource Files' package available on the Install page of the User Guide.\n",
+ " # It is not necessary to change this path unless you intend to use a non-standard symmetric template.\n",
+ " dilated_symmetric_brain_mask:\n",
+ "\n",
+ " # A reference symmetric brain mask template for resampling\n",
+ " dilated_symmetric_brain_mask_for_resample:\n",
+ "\n",
+ "network_centrality:\n",
+ "\n",
+ " # Calculate Degree, Eigenvector Centrality, or Functional Connectivity Density.\n",
+ " run: Off\n",
+ "\n",
+ " # Maximum amount of RAM (in GB) to be used when calculating Degree Centrality.\n",
+ " # Calculating Eigenvector Centrality will require additional memory based on the size of the mask or number of ROI nodes.\n",
+ " memory_allocation: 3.0\n",
+ "\n",
+ " # Full path to a NIFTI file describing the mask. Centrality will be calculated for all voxels within the mask.\n",
+ " template_specification_file:\n",
+ " degree_centrality:\n",
+ "\n",
+ " # Enable/Disable degree centrality by selecting the connectivity weights\n",
+ " # weight_options: ['Binarized', 'Weighted']\n",
+ " # disable this type of centrality with:\n",
+ " # weight_options: []\n",
+ " weight_options: []\n",
+ "\n",
+ " # Select the type of threshold used when creating the degree centrality adjacency matrix.\n",
+ " # options:\n",
+ " # 'Significance threshold', 'Sparsity threshold', 'Correlation threshold'\n",
+ " correlation_threshold_option: Sparsity threshold\n",
+ "\n",
+ " # Based on the Threshold Type selected above, enter a Threshold Value.\n",
+ " # P-value for Significance Threshold\n",
+ " # Sparsity value for Sparsity Threshold\n",
+ " # Pearson's r value for Correlation Threshold\n",
+ " correlation_threshold: 0.001\n",
+ "\n",
+ " eigenvector_centrality:\n",
+ "\n",
+ " # Enable/Disable eigenvector centrality by selecting the connectivity weights\n",
+ " # weight_options: ['Binarized', 'Weighted']\n",
+ " # disable this type of centrality with:\n",
+ " # weight_options: []\n",
+ " weight_options: [Binarized, Weighted]\n",
+ "\n",
+ " # Select the type of threshold used when creating the eigenvector centrality adjacency matrix.\n",
+ " # options:\n",
+ " # 'Significance threshold', 'Sparsity threshold', 'Correlation threshold'\n",
+ " correlation_threshold_option: Sparsity threshold\n",
+ "\n",
+ " # Based on the Threshold Type selected above, enter a Threshold Value.\n",
+ " # P-value for Significance Threshold\n",
+ " # Sparsity value for Sparsity Threshold\n",
+ " # Pearson's r value for Correlation Threshold\n",
+ " correlation_threshold: 0.001\n",
+ "\n",
+ " local_functional_connectivity_density:\n",
+ "\n",
+ " # Enable/Disable lFCD by selecting the connectivity weights\n",
+ " # weight_options: ['Binarized', 'Weighted']\n",
+ " # disable this type of centrality with:\n",
+ " # weight_options: []\n",
+ " weight_options: []\n",
+ "\n",
+ " # Select the type of threshold used when creating the lFCD adjacency matrix.\n",
+ " # options:\n",
+ " # 'Significance threshold', 'Correlation threshold'\n",
+ " correlation_threshold_option: Significance threshold\n",
+ "\n",
+ " # Based on the Threshold Type selected above, enter a Threshold Value.\n",
+ " # P-value for Significance Threshold\n",
+ " # Sparsity value for Sparsity Threshold\n",
+ " # Pearson's r value for Correlation Threshold\n",
+ " correlation_threshold: 0.001\n",
+ "\n",
+ "longitudinal_template_generation:\n",
+ "\n",
+ " # If you have multiple T1w's, you can generate your own run-specific custom\n",
+ " # T1w template to serve as an intermediate to the standard template for\n",
+ " # anatomical registration.\n",
+ " # This runs before the main pipeline as it requires multiple T1w sessions\n",
+ " # at once.\n",
+ " run: Off\n",
+ "\n",
+ " # Freesurfer longitudinal template algorithm using FSL FLIRT\n",
+ " # Method to average the dataset at each iteration of the template creation\n",
+ " # Options: median, mean or std\n",
+ " average_method: median\n",
+ "\n",
+ " # Degree of freedom for FLIRT in the template creation\n",
+ " # Options: 12 (affine), 9 (traditional), 7 (global rescale) or 6 (rigid body)\n",
+ " dof: 12\n",
+ "\n",
+ " # Interpolation parameter for FLIRT in the template creation\n",
+ " # Options: trilinear, nearestneighbour, sinc or spline\n",
+ " interp: trilinear\n",
+ "\n",
+ " # Cost function for FLIRT in the template creation\n",
+ " # Options: corratio, mutualinfo, normmi, normcorr, leastsq, labeldiff or bbr\n",
+ " cost: corratio\n",
+ "\n",
+ " # Number of threads used for one run of the template generation algorithm\n",
+ " thread_pool: 2\n",
+ "\n",
+ " # Threshold of transformation distance to consider that the loop converged\n",
+ " # (-1 means numpy.finfo(np.float64).eps and is the default)\n",
+ " convergence_threshold: -1\n",
+ "\n",
+ "# OUTPUTS AND DERIVATIVES\n",
+ "# -----------------------\n",
+ "post_processing:\n",
+ " spatial_smoothing:\n",
+ " run: On\n",
+ "\n",
+ " # Smooth the derivative outputs.\n",
+ " # Set as ['nonsmoothed'] to disable smoothing. Set as ['smoothed', 'nonsmoothed'] to get both.\n",
+ " #\n",
+ " # Options:\n",
+ " # ['smoothed', 'nonsmoothed']\n",
+ " output: [smoothed]\n",
+ "\n",
+ " # Tool to use for smoothing.\n",
+ " # 'FSL' for FSL MultiImageMaths for FWHM provided\n",
+ " # 'AFNI' for AFNI 3dBlurToFWHM for FWHM provided\n",
+ " smoothing_method: [FSL]\n",
+ "\n",
+ " # Full Width at Half Maximum of the Gaussian kernel used during spatial smoothing.\n",
+ " # this is a fork point\n",
+ " # i.e. multiple kernels - fwhm: [4,6,8]\n",
+ " fwhm: [4]\n",
+ "\n",
+ " z-scoring:\n",
+ " run: On\n",
+ "\n",
+ " # z-score standardize the derivatives. This may be needed for group-level analysis.\n",
+ " # Set as ['raw'] to disable z-scoring. Set as ['z-scored', 'raw'] to get both.\n",
+ " #\n",
+ " # Options:\n",
+ " # ['z-scored', 'raw']\n",
+ " output: [z-scored]\n",
+ "\n",
+ "seed_based_correlation_analysis:\n",
+ "\n",
+ " # SCA - Seed-Based Correlation Analysis\n",
+ " # For each extracted ROI Average time series, CPAC will generate a whole-brain correlation map.\n",
+ " # It should be noted that for a given seed/ROI, SCA maps for ROI Average time series will be the same.\n",
+ " run: Off\n",
+ "\n",
+ " # Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for seed-based correlation analysis, and then select which types of analyses to run.\n",
+ " # Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and MultReg, you would enter: '/path/to/ROI.nii.gz': Avg, MultReg\n",
+ " # available analyses:\n",
+ " # /path/to/atlas.nii.gz: Avg, DualReg, MultReg\n",
+ " sca_roi_paths:\n",
+ " /cpac_templates/PNAS_Smith09_rsn10.nii.gz: DualReg\n",
+ " /cpac_templates/CC400.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/ez_mask_pad.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/aal_mask_pad.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/CC200.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/tt_mask_pad.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/ho_mask_pad.nii.gz: Avg, MultReg\n",
+ " /cpac_templates/rois_3mm.nii.gz: Avg, MultReg\n",
+ "\n",
+ " # Normalize each time series before running Dual Regression SCA.\n",
+ " norm_timeseries_for_DR: On\n",
+ "\n",
+ "# PACKAGE INTEGRATIONS\n",
+ "# --------------------\n",
+ "PyPEER:\n",
+ "\n",
+ " # Training of eye-estimation models. Commonly used for movies data/naturalistic viewing.\n",
+ " run: Off\n",
+ "\n",
+ " # PEER scan names to use for training\n",
+ " # Example: ['peer_run-1', 'peer_run-2']\n",
+ " eye_scan_names: []\n",
+ "\n",
+ " # Naturalistic viewing data scan names to use for eye estimation\n",
+ " # Example: ['movieDM']\n",
+ " data_scan_names: []\n",
+ "\n",
+ " # Template-space eye mask\n",
+ " eye_mask_path:\n",
+ "\n",
+ " # PyPEER Stimulus File Path\n",
+ " # This is a file describing the stimulus locations from the calibration sequence.\n",
+ " stimulus_path:\n",
+ " minimal_nuisance_correction:\n",
+ "\n",
+ " # PyPEER Minimal nuisance regression\n",
+ " # Note: PyPEER employs minimal preprocessing - these choices do not reflect what runs in the main pipeline.\n",
+ " # PyPEER uses non-nuisance-regressed data from the main pipeline.\n",
+ " # Global signal regression (PyPEER only)\n",
+ " peer_gsr: On\n",
+ "\n",
+ " # Motion scrubbing (PyPEER only)\n",
+ " peer_scrub: Off\n",
+ "\n",
+ " # Motion scrubbing threshold (PyPEER only)\n",
+ " scrub_thresh: 0.2\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "priors_path = show_full_config(priors_path_config)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a58e8f64-12d0-49bd-9e2f-053cc31eb9ec",
+ "metadata": {},
+ "source": [
+ "And we can see that our modification is the only difference:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "1508b454-72de-4623-af3b-d10d27e5a707",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'segmentation': {'tissue_segmentation': {'FSL-FAST': {'use_priors': {'priors_path': ('/custom_fsl/data/standard/tissuepriors/2mm', None)}}}}}"
+ ]
+ },
+ "execution_count": 7,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "priors_path - fmriprep_options"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "fb28ba30-ccfa-48f0-8698-64c552cdb1c1",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'segmentation': {'tissue_segmentation': {'FSL-FAST': {'use_priors': {'priors_path': (None, '/custom_fsl/data/standard/tissuepriors/2mm')}}}}}"
+ ]
+ },
+ "execution_count": 8,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "fmriprep_options - priors_path"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "30b6ba30-4c04-4258-a6b8-d6c251e65de4",
+ "metadata": {},
+ "source": [
+ "(The 2-tuple for each nested key in a `DiffDict` is (the value in the `Configuration` left of the `-`, the value in the `Configuration` right of the `-`) "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "324decc0-d833-4649-829d-a0b8dc5c825d",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "('/custom_fsl/data/standard/tissuepriors/2mm', None)"
+ ]
+ },
+ "execution_count": 9,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "(priors_path - fmriprep_options)[\"segmentation\"][\"tissue_segmentation\"][\"FSL-FAST\"][\"use_priors\"][\"priors_path\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "id": "006e0284-ef93-481e-b892-5c103217d245",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'/custom_fsl/data/standard/tissuepriors/2mm'"
+ ]
+ },
+ "execution_count": 10,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "priors_path[\"segmentation\", \"tissue_segmentation\", \"FSL-FAST\", \"use_priors\", \"priors_path\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "id": "f1dc9fdd-2eeb-40fc-9196-3b5fb13271db",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "fmriprep_options[\"segmentation\", \"tissue_segmentation\", \"FSL-FAST\", \"use_priors\", \"priors_path\"]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "id": "71d5c0a8-d157-4d59-b0e9-dbd717e4d873",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "NoneType"
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "type(fmriprep_options[\"segmentation\", \"tissue_segmentation\", \"FSL-FAST\", \"use_priors\", \"priors_path\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f37c0897-41e8-44b2-9b61-699bbe019600",
+ "metadata": {},
+ "source": [
+ "You can paste some raw YAML between the `\"\"\"`s below to load a full config and try it out yourself:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "945811cd-eb20-4e70-a3e5-24c88de9c5c7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pasted_full_config = show_full_config(full_yaml=\"\"\"\n",
+ "\"\"\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "C-PAC (core software)",
+ "language": "python",
+ "name": "core"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/FROM/priors_path.yml b/FROM/priors_path.yml
new file mode 100644
index 0000000..5d88631
--- /dev/null
+++ b/FROM/priors_path.yml
@@ -0,0 +1,6 @@
+FROM: fmriprep-options
+segmentation:
+ tissue_segmentation:
+ FSL-FAST:
+ use_priors:
+ priors_path: /custom_fsl/data/standard/tissuepriors/2mm
diff --git a/apt.txt b/apt.txt
new file mode 100644
index 0000000..80a34e3
--- /dev/null
+++ b/apt.txt
@@ -0,0 +1,9 @@
+# Copyright (C) 2024 C-PAC Developers
+# This file is part of C-PAC_tutorials.
+# C-PAC_tutorials is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
+# C-PAC_tutorials is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+# You should have received a copy of the GNU Lesser General Public License along with C-PAC. If not, see .
+apt-utils
+graphviz
+graphviz-dev
+libhdf5-dev
diff --git a/cpac-requirements.txt b/cpac-requirements.txt
new file mode 100644
index 0000000..808be46
--- /dev/null
+++ b/cpac-requirements.txt
@@ -0,0 +1,12 @@
+# Copyright (C) 2024 C-PAC Developers
+# This file is part of C-PAC_tutorials.
+# C-PAC_tutorials is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
+# C-PAC_tutorials is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
+# You should have received a copy of the GNU Lesser General Public License along with C-PAC. If not, see .
+cpac>=0.5.0
+ipykernel
+jupyter
+matplotlib>=3.1.3
+matplotlib-inline>=0.1.2
+numpy==1.25.1
+pandas==2.0.3
diff --git a/index.rst b/index.rst
index f91d4c9..b6f0133 100644
--- a/index.rst
+++ b/index.rst
@@ -19,4 +19,5 @@ These tutorials can be viewed online here, or downloaded from `the FCP-INDI/C-PA
:includehidden:
:titlesonly:
+ FROM/index
observed_usage/index
diff --git a/observed_usage/visualize_observed_usage.ipynb b/observed_usage/visualize_observed_usage.ipynb
index 16b5abc..63e0c12 100644
--- a/observed_usage/visualize_observed_usage.ipynb
+++ b/observed_usage/visualize_observed_usage.ipynb
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {},
"source": [
"