%YAML 1.1
---
# CPAC Pipeline Configuration YAML file
# Version 1.8.8.dev1
#
# http://fcp-indi.github.io for more info.
#
# Tip: This file can be edited manually with a text editor for quick modifications.
FROM: blank

pipeline_setup:

  # Name for this pipeline configuration - useful for identification.
  # This string will be sanitized and used in filepaths
  pipeline_name: cpac_abcd-options
  system_config:

    # The maximum amount of memory each participant's workflow can allocate.
    # Use this to place an upper bound of memory usage.
    # - Warning: 'Memory Per Participant' multiplied by 'Number of Participants to Run Simultaneously'
    #   must not be more than the total amount of RAM.
    # - Conversely, using too little RAM can impede the speed of a pipeline run.
    # - It is recommended that you set this to a value that when multiplied by
    #   'Number of Participants to Run Simultaneously' is as much RAM you can safely allocate.
    maximum_memory_per_participant: 10.0

# PREPROCESSING
# -------------
surface_analysis:

  # Run freesurfer_abcd_preproc to obtain preprocessed T1w for reconall
  abcd_prefreesurfer_prep:
    run: On

  # Will run Freesurfer for surface-based analysis. Will output traditional Freesurfer derivatives.
  # If you wish to employ Freesurfer outputs for brain masking or tissue segmentation in the voxel-based pipeline,
  # select those 'Freesurfer-' labeled options further below in anatomical_preproc.
  freesurfer:
    run_reconall: On

    # Ingress freesurfer recon-all folder
    ingress_reconall: On

  # Run ABCD-HCP post FreeSurfer and fMRISurface pipeline
  post_freesurfer:
    run: On

  amplitude_low_frequency_fluctuation:
    run: On

  regional_homogeneity:
    run: On

  surface_connectivity:
    run: On

anatomical_preproc:
  run: On
  acpc_alignment:
    T1w_brain_ACPC_template: $FSLDIR/data/standard/MNI152_T1_1mm_brain.nii.gz
    run: On

    # Run ACPC alignment before non-local means filtering or N4 bias
    # correction
    run_before_preproc: Off

    # ACPC aligned template
    T1w_ACPC_template: /opt/dcan-tools/pipeline/global/templates/MNI152_T1_1mm.nii.gz

  brain_extraction:
    run: On

    # using: ['3dSkullStrip', 'BET', 'UNet', 'niworkflows-ants', 'FreeSurfer-ABCD', 'FreeSurfer-BET-Tight', 'FreeSurfer-BET-Loose', 'FreeSurfer-Brainmask']
    # this is a fork option
    using: [FreeSurfer-ABCD]

  # Non-local means filtering via ANTs DenoiseImage
  non_local_means_filtering:

    # this is a fork option
    run: [On]

    # options: 'Gaussian' or 'Rician'
    noise_model: Rician

  # N4 bias field correction via ANTs
  n4_bias_field_correction:

    # this is a fork option
    run: [On]

    # An integer to resample the input image to save computation time. Shrink factors <= 4 are commonly used.
    shrink_factor: 4

segmentation:

  # Automatically segment anatomical images into white matter, gray matter,
  # and CSF based on prior probability maps.
  run: On

registration_workflows:
  anatomical_registration:
    run: On
    registration:
      FSL-FNIRT:

        # Reference mask with 2mm resolution to be used during FNIRT-based brain extraction in ABCD-options pipeline.
        ref_mask_res-2: /opt/dcan-tools/pipeline/global/templates/MNI152_T1_2mm_brain_mask_dil.nii.gz

        # Template with 2mm resolution to be used during FNIRT-based brain extraction in ABCD-options pipeline.
        T1w_template_res-2: /opt/dcan-tools/pipeline/global/templates/MNI152_T1_2mm.nii.gz

      # option parameters
      ANTs:

        # ANTs parameters for T1-template-based registration
        T1_registration:
          - verbose: 1
          - float: 0
          - collapse-output-transforms: 0
          - dimensionality: 3
          - winsorize-image-intensities:
              lowerQuantile: 0.005
              upperQuantile: 0.995
          - initial-moving-transform:
              initializationFeature: 1
          - transforms:
            - Rigid:
                convergence:
                  convergenceThreshold: 1e-6
                  convergenceWindowSize: 10
                  iteration: 1000x500x250x100
                gradientStep: 0.1
                masks: Off
                metric:
                  metricWeight: 1
                  numberOfBins: 32
                  samplingPercentage: 0.25
                  samplingStrategy: Regular
                  type: MI
                shrink-factors: 8x4x2x1
                smoothing-sigmas: 3.0x2.0x1.0x0.0
                use-histogram-matching: Off
            - Affine:
                convergence:
                  convergenceThreshold: 1e-6
                  convergenceWindowSize: 10
                  iteration: 1000x500x250x100
                gradientStep: 0.1
                masks: Off
                metric:
                  metricWeight: 1
                  numberOfBins: 32
                  samplingPercentage: 0.25
                  samplingStrategy: Regular
                  type: MI
                shrink-factors: 8x4x2x1
                smoothing-sigmas: 3.0x2.0x1.0x0.0
                use-histogram-matching: Off
            - SyN:
                convergence:
                  convergenceThreshold: 1e-6
                  convergenceWindowSize: 10
                  iteration: 100x70x50x20
                gradientStep: 0.1
                masks: On
                metric:
                  metricWeight: 1
                  radius: 4
                  type: CC
                shrink-factors: 8x4x2x1
                smoothing-sigmas: 3.0x2.0x1.0x0.0
                totalFieldVarianceInVoxelSpace: 0.0
                updateFieldVarianceInVoxelSpace: 3.0
                use-histogram-matching: Off

        # Interpolation method for writing out transformed anatomical images.
        # Possible values: Linear, BSpline, LanczosWindowedSinc
        interpolation: Linear

    overwrite_transform:
      run: On

    # The resolution to which anatomical images should be transformed during registration.
    # This is the resolution at which processed anatomical files will be output.
    resolution_for_anat: 1mm

    # Template to be used during registration.
    # It is not necessary to change this path unless you intend to use a non-standard template.
    T1w_brain_template: /opt/dcan-tools/pipeline/global/templates/MNI152_T1_${resolution_for_anat}_brain.nii.gz

    # Template to be used during registration.
    # It is not necessary to change this path unless you intend to use a non-standard template.
    T1w_brain_template_mask: /opt/dcan-tools/pipeline/global/templates/MNI152_T1_${resolution_for_anat}_brain_mask.nii.gz

  functional_registration:
    coregistration:

      # functional (BOLD/EPI) registration to anatomical (structural/T1)
      run: On
      func_input_prep:

        # Choose whether to use functional brain or skull as the input to functional-to-anatomical registration
        reg_with_skull: On

        # Choose whether to use the mean of the functional/EPI as the input to functional-to-anatomical registration or one of the volumes from the functional 4D timeseries that you choose.
        # input: ['Mean_Functional', 'Selected_Functional_Volume', 'fmriprep_reference']
        input: [Selected_Functional_Volume]

      # reference: 'brain' or 'restore-brain'
      # In ABCD-options pipeline, 'restore-brain' is used as coregistration reference
      reference: restore-brain

      # Choose coregistration interpolation
      interpolation: spline

      # Choose coregistration degree of freedom
      dof: 12

    func_registration_to_template:

      # these options modify the application (to the functional data), not the calculation, of the
      # T1-to-template and EPI-to-template transforms calculated earlier during registration
      # apply the functional-to-template (T1 template) registration transform to the functional data
      run: On
      apply_transform:

        # options: 'default', 'abcd', 'single_step_resampling_from_stc', 'dcan_nhp'
        # 'default': apply func-to-anat and anat-to-template transforms on motion corrected functional image.
        # 'abcd': apply motion correction, func-to-anat and anat-to-template transforms on each of raw functional volume using FSL applywarp based on ABCD-HCP pipeline.
        # 'single_step_resampling_from_stc': apply motion correction, func-to-anat and anat-to-template transforms on each of slice-time-corrected functional volume using ANTs antsApplyTransform based on fMRIPrep pipeline.
        #   - if 'single_step_resampling_from_stc', 'template' is the only valid option for ``nuisance_corrections: 2-nuisance_regression: space``
        using: abcd

      output_resolution:

        # The resolution (in mm) to which the preprocessed, registered functional timeseries outputs are written into.
        # NOTE:
        #   selecting a 1 mm or 2 mm resolution might substantially increase your RAM needs- these resolutions should be selected with caution.
        #   for most cases, 3 mm or 4 mm resolutions are suggested.
        # NOTE:
        #   this also includes the single-volume 3D preprocessed functional data,
        #   such as the mean functional (mean EPI) in template space
        func_preproc_outputs: 2mm

        # The resolution (in mm) to which the registered derivative outputs are written into.
        # NOTE:
        #   this is for the single-volume functional-space outputs (i.e. derivatives)
        #   thus, a higher resolution may not result in a large increase in RAM needs as above
        func_derivative_outputs: 2mm

      ANTs_pipelines:

        # Interpolation method for writing out transformed functional images.
        # Possible values: Linear, BSpline, LanczosWindowedSinc
        interpolation: Linear

functional_preproc:
  run: On
  motion_estimates_and_correction:
    run: On
    motion_estimates:

      # calculate motion statistics BEFORE slice-timing correction
      calculate_motion_first: On

      # calculate motion statistics AFTER motion correction
      calculate_motion_after: Off

    motion_correction:

      # using: ['3dvolreg', 'mcflirt']
      # Forking is currently broken for this option.
      # Please use separate configs if you want to use each of 3dvolreg and mcflirt.
      # Follow https://github.com/FCP-INDI/C-PAC/issues/1935 to see when this issue is resolved.
      using: [mcflirt]

      # Choose motion correction reference. Options: mean, median, selected_volume, fmriprep_reference
      motion_correction_reference: [selected_volume]

  distortion_correction:

    # this is a fork point
    #   run: [On, Off] - this will run both and fork the pipeline
    run: [On]

    # using: ['PhaseDiff', 'Blip', 'Blip-FSL-TOPUP']
    #   PhaseDiff - Perform field map correction using a single phase difference image, a subtraction of the two phase images from each echo. Default scanner for this method is SIEMENS.
    #   Blip - Uses AFNI 3dQWarp to calculate the distortion unwarp for EPI field maps of opposite/same phase encoding direction.
    #   Blip-FSL-TOPUP - Uses FSL TOPUP to calculate the distortion unwarp for EPI field maps of opposite/same phase encoding direction.
    using: [PhaseDiff, Blip-FSL-TOPUP]

  func_masking:
    run: On

    # Apply functional mask in native space
    apply_func_mask_in_native_space: Off

    # using: ['AFNI', 'FSL', 'FSL_AFNI', 'Anatomical_Refined', 'Anatomical_Based', 'Anatomical_Resampled', 'CCS_Anatomical_Refined']
    # FSL_AFNI: fMRIPrep-style BOLD mask. Ref: https://github.com/nipreps/niworkflows/blob/a221f612/niworkflows/func/util.py#L246-L514
    # Anatomical_Refined: 1. binarize anat mask, in case it is not a binary mask. 2. fill holes of anat mask 3. init_bold_mask : input raw func → dilate init func brain mask 4. refined_bold_mask : input motion corrected func → dilate anatomical mask 5. get final func mask
    # Anatomical_Based: Generate the BOLD mask by basing it off of the anatomical brain mask. Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline.
    # Anatomical_Resampled: Resample anatomical brain mask in standard space to get BOLD brain mask in standard space. Adapted from DCAN Lab's BOLD mask method from the ABCD pipeline. ("Create fMRI resolution standard space files for T1w image, wmparc, and brain mask […] don't use FLIRT to do spline interpolation with -applyisoxfm for the 2mm and 1mm cases because it doesn't know the peculiarities of the MNI template FOVs")
    # CCS_Anatomical_Refined: Generate the BOLD mask by basing it off of the anatomical brain. Adapted from the BOLD mask method from the CCS pipeline.
    # this is a fork point
    using: [Anatomical_Resampled]

  generate_func_mean:

    # Generate mean functional image
    run: On

  coreg_prep:

    # Generate sbref
    run: On

nuisance_corrections:
  2-nuisance_regression:

    # Select which nuisance signal corrections to apply
    Regressors:
      - Name: default
        Bandpass:
          bottom_frequency: 0.01
          method: default
          top_frequency: 0.1
        Motion:
          include_delayed: On
          include_delayed_squared: On
          include_squared: On

    # Process and refine masks used to produce regressors and time series for
    # regression.
    regressor_masks:
      erode_anatomical_brain_mask:

        # Erode brain mask in millimeters, default for brain mask is 30 mm
        # Brain erosion default is using millimeters.
        brain_mask_erosion_mm: 30

      erode_csf:

        # Erode cerebrospinal fluid mask in millimeters, default for cerebrospinal fluid is 30mm
        # Cerebrospinal fluid erosion default is using millimeters.
        csf_mask_erosion_mm: 30

      erode_wm:

        # Target volume ratio, if using erosion.
        # Default proportion is 0.6 for white matter mask.
        # If using erosion, using both proportion and millimeters is not recommended.
        # White matter erosion default is using proportion erosion method when use erosion for white matter.
        wm_erosion_prop: 0.6

      erode_gm:

        # Target volume ratio, if using erosion.
        # If using erosion, using both proportion and millimeters is not recommended.
        gm_erosion_prop: 0.6

timeseries_extraction:
  connectivity_matrix:

    # Create a connectivity matrix from timeseries data
    # Options:
    #  ['AFNI', 'Nilearn', 'ndmg']
    using: [Nilearn, ndmg]

    # Options:
    #  ['Pearson', 'Partial']
    # Note: These options are not configurable for ndmg, which will ignore these options
    measure: [Pearson, Partial]

amplitude_low_frequency_fluctuation:

  # space: Template or Native
  target_space: [Native]

regional_homogeneity:

  # space: Template or Native
  target_space: [Native]

# OUTPUTS AND DERIVATIVES
# -----------------------
post_processing:
  spatial_smoothing:
    run: On

  z-scoring:
    run: On

seed_based_correlation_analysis:

  # Enter paths to region-of-interest (ROI) NIFTI files (.nii or .nii.gz) to be used for seed-based correlation analysis, and then select which types of analyses to run.
  # Denote which analyses to run for each ROI path by listing the names below. For example, if you wish to run Avg and MultReg, you would enter: '/path/to/ROI.nii.gz': Avg, MultReg
  # available analyses:
  #   /path/to/atlas.nii.gz: Avg, DualReg, MultReg
  sca_roi_paths:
    /cpac_templates/CC400.nii.gz: Avg