diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000..9ef38e9 --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,59 @@ +name: Benchmark + +on: + push: + branches: + - main + - maint/* + pull_request: + branches: + - main + - maint/* + # Allow job to be triggered manually from GitHub interface + workflow_dispatch: + +defaults: + run: + shell: bash + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + benchmark: + name: Linux + runs-on: ubuntu-latest + defaults: + run: + shell: bash + strategy: + fail-fast: false + matrix: + python-version: [ '3.11' ] + + steps: + - name: Set up system + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install .[antsopt,benchmark] + - name: Set threading parameters for reliable benchmarking + run: | + export OPENBLAS_NUM_THREADS=1 + export MKL_NUM_THREADS=1 + export OMP_NUM_THREADS=1 + - name: Run benchmarks + run: | + asv machine --yes --config benchmarks/asv.conf.json + asv run --config benchmarks/asv.conf.json --show-stderr diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 306adf2..43d95a5 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -70,6 +70,10 @@ jobs: with: path: /home/runner/nifreeze-tests/ key: data-v0 + - name: Install TeX Live + run: | + sudo apt-get update + sudo apt install texlive texlive-latex-extra texlive-fonts-recommended cm-super dvipng - name: Install tox run: | python -m pip install --upgrade pip diff --git a/benchmarks/README.rst b/benchmarks/README.rst new file mode 100644 index 0000000..54e47be --- /dev/null +++ b/benchmarks/README.rst @@ -0,0 +1,41 @@ +.. -*- rst -*- + +=================== +NiFreeze benchmarks +=================== +Benchmarking NiFreeze with Airspeed Velocity. + +Usage +----- +Airspeed Velocity manages building and Python environments by itself, +unless told otherwise. +To run the benchmarks, you do not need to install +a development version of *NiFreeze* on your current +*Python* environment. + +To run all benchmarks for the latest commit, navigate to *NiFreeze*'s root +``benchmarks`` directory and execute:: + + asv run + +For testing benchmarks locally, it may be better to run these without +replications:: + + export REGEXP="bench.*Ufunc" + asv run --dry-run --show-stderr --python=same --quick -b $REGEXP + +All of the commands above display the results in plain text in the console, +and the results are not saved for comparison with future commits. +For greater control, a graphical view, and to have results saved for future +comparisons, you can run ASV as follows to record results and generate +the HTML reports:: + + asv run --skip-existing-commits --steps 10 ALL + asv publish + asv preview + +More on how to use ``asv`` can be found in the `ASV documentation`_. +Command-line help is available as usual via ``asv --help`` and +``asv run --help``. + +.. _ASV documentation: https://asv.readthedocs.io/ diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json new file mode 100644 index 0000000..ae7621b --- /dev/null +++ b/benchmarks/asv.conf.json @@ -0,0 +1,98 @@ +{ + // The version of the config file format. Do not change, unless + // you know what you are doing. + "version": 1, + + // The name of the project being benchmarked + "project": "nifreeze", + + // The project's homepage + "project_url": "https://www.nipreps.org/nifreeze/", + + // The URL or local path of the source code repository for the + // project being benchmarked + "repo": "..", + + // List of branches to benchmark. If not provided, defaults to "master" + // (for git) or "tip" (for mercurial). + "branches": ["HEAD"], + + "build_command": [ + "python -m build --wheel -o {build_cache_dir} {build_dir}" + ], + + // The DVCS being used. If not set, it will be automatically + // determined from "repo" by looking at the protocol in the URL + // (if remote), or by looking for special directories, such as + // ".git" (if local). + "dvcs": "git", + + // The tool to use to create environments. May be "conda", + // "virtualenv" or other value depending on the plugins in use. + // If missing or the empty string, the tool will be automatically + // determined by looking for tools on the PATH environment + // variable. + "environment_type": "virtualenv", + + // the base URL to show a commit for the project. + "show_commit_url": "https://github.com/nipreps/nifreeze/commit/", + + // The Pythons you'd like to test against. If not provided, defaults + // to the current version of Python used to run `asv`. + // "pythons": ["3.12"], + + // The matrix of dependencies to test. Each key is the name of a + // package (in PyPI) and the values are version numbers. An empty + // list indicates to just test against the default (latest) + // version. + "matrix": { + "dipy": [], + "nipype": [], + "nest-asyncio": [], + "nitransforms": [], + "numpy": [], + "scikit_learn": [], + "scipy": [] + }, + + // The directory (relative to the current directory) that benchmarks are + // stored in. If not provided, defaults to "benchmarks" + "benchmark_dir": "benchmarks", + + // The directory (relative to the current directory) to cache the Python + // environments in. If not provided, defaults to "env" + "env_dir": "env", + + + // The directory (relative to the current directory) that raw benchmark + // results are stored in. If not provided, defaults to "results". + "results_dir": "results", + + // The directory (relative to the current directory) that the html tree + // should be written to. If not provided, defaults to "html". + "html_dir": "html", + + // The number of characters to retain in the commit hashes. + // "hash_length": 8, + + // `asv` will cache wheels of the recent builds in each + // environment, making them faster to install next time. This is + // number of builds to keep, per environment. + "build_cache_size": 8, + + // The commits after which the regression search in `asv publish` + // should start looking for regressions. Dictionary whose keys are + // regexps matching to benchmark names, and values corresponding to + // the commit (exclusive) after which to start looking for + // regressions. The default is to start from the first commit + // with results. If the commit is `null`, regression detection is + // skipped for the matching benchmark. + // + // "regressions_first_commits": { + // "some_benchmark": "352cdf", // Consider regressions only after this commit + // "another_benchmark": null, // Skip regression detection altogether + // } + + // Maximum time in seconds that a benchmark is allowed to run before it is terminated. + "default_benchmark_timeout": 240 +} diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/benchmarks/bench_model.py b/benchmarks/benchmarks/bench_model.py new file mode 100644 index 0000000..c650757 --- /dev/null +++ b/benchmarks/benchmarks/bench_model.py @@ -0,0 +1,102 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# +# Copyright The NiPreps Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# We support and encourage derived works from this project, please read +# about our expectations at +# +# https://www.nipreps.org/community/licensing/ +# +"""Benchmarking for nifreeze's models.""" + +from abc import ABC + +import dipy.data as dpd +import nibabel as nb +import numpy as np +from dipy.core.gradients import get_bval_indices +from dipy.io import read_bvals_bvecs +from dipy.segment.mask import median_otsu +from scipy.ndimage import binary_dilation +from skimage.morphology import ball + +from nifreeze.model.gpr import DiffusionGPR, SphericalKriging + + +class DiffusionGPRBenchmark(ABC): + def __init__(self): + self._estimator = None + self._X_train = None + self._y_train = None + self._X_test = None + self._y_test = None + + def setup(self, *args, **kwargs): + beta_a = 1.38 + beta_l = 1 / 2.1 + alpha = 0.1 + disp = True + optimizer = None + self.make_estimator((beta_a, beta_l, alpha, disp, optimizer)) + self.make_data() + + def make_estimator(self, params): + beta_a, beta_l, alpha, disp, optimizer = params + kernel = SphericalKriging(beta_a=beta_a, beta_l=beta_l) + self._estimator = DiffusionGPR( + kernel=kernel, + alpha=alpha, + disp=disp, + optimizer=optimizer, + ) + + def make_data(self): + name = "sherbrooke_3shell" + + dwi_fname, bval_fname, bvec_fname = dpd.get_fnames(name=name) + dwi_data = nb.load(dwi_fname).get_fdata() + bvals, bvecs = read_bvals_bvecs(bval_fname, bvec_fname) + + _, brain_mask = median_otsu(dwi_data, vol_idx=[0]) + brain_mask = binary_dilation(brain_mask, ball(8)) + + bval = 1000 + indices = get_bval_indices(bvals, bval, tol=20) + + bvecs_shell = bvecs[indices] + shell_data = dwi_data[..., indices] + dwi_vol_idx = len(indices) // 2 + + # Prepare a train/test mask (False for all directions except the left-out where it's true) + train_test_mask = np.zeros(bvecs_shell.shape[0], dtype=bool) + train_test_mask[dwi_vol_idx] = True + + # Generate train/test bvecs + self._X_train = bvecs_shell[~train_test_mask, :] + self._X_test = bvecs_shell[train_test_mask, :] + + # Select voxels within brain mask + y = shell_data[brain_mask] + + # Generate train/test data + self._y_train = y[:, ~train_test_mask] + self._y_test = y[:, train_test_mask] + + def time_fit(self, *args): + self._estimator = self._estimator.fit(self._X_train, self._y_train.T) + + def time_predict(self): + self._estimator.predict(self._X_test) diff --git a/docs/notebooks/bold_realignment.ipynb b/docs/notebooks/bold_realignment.ipynb index b9d9647..55bf791 100644 --- a/docs/notebooks/bold_realignment.ipynb +++ b/docs/notebooks/bold_realignment.ipynb @@ -34,7 +34,7 @@ "WORKDIR = Path.home() / \"tmp\" / \"nifreezedev\" / \"ismrm25\"\n", "WORKDIR.mkdir(parents=True, exist_ok=True)\n", "\n", - "OUTPUT_DIR = Path(\"/data/derivatives\") / \"nifreeze-ismrm25-exp2\"\n", + "OUTPUT_DIR = Path.home() / \"tmp\" / \"nifreezedev\" / \"ismrm25\" / \"nifreeze-ismrm25-exp2\"\n", "OUTPUT_DIR.mkdir(exist_ok=True, parents=True)" ] }, diff --git a/pyproject.toml b/pyproject.toml index d2f2f39..6252ce9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,6 +85,14 @@ types = [ "microsoft-python-type-stubs @ git+https://github.com/microsoft/python-type-stubs.git", ] +notebooks = [ + "jupyter", + "nbclient", + "nbmake", + "mriqc_learn", + "nipreps-synthstrip", +] + antsopt = [ "ConfigSpace", "nipreps", @@ -92,6 +100,12 @@ antsopt = [ "smac", ] +benchmark = [ + "asv", + "pyperf", + "virtualenv", +] + # Aliases docs = ["nifreeze[doc]"] tests = ["nifreeze[test]"] diff --git a/tools/run_notebooks.py b/tools/run_notebooks.py new file mode 100644 index 0000000..f9506f4 --- /dev/null +++ b/tools/run_notebooks.py @@ -0,0 +1,33 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# +# Copyright The NiPreps Developers +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# We support and encourage derived works from this project, please read +# about our expectations at +# +# https://www.nipreps.org/community/licensing/ +# + +import glob +import subprocess +import sys + +notebooks = glob.glob("docs/notebooks/*.ipynb") +# Make bold_realignment.ipynb Jupyter notebook an exception as it involves running a realignment +# process for several DataLad datasets, which requires long running times. +notebooks.remove("docs/notebooks/bold_realignment.ipynb") + +sys.exit(subprocess.call(["pytest", "--nbmake"] + notebooks)) diff --git a/tox.ini b/tox.ini index 443b772..089d30f 100644 --- a/tox.ini +++ b/tox.ini @@ -2,13 +2,13 @@ requires = tox>=4 envlist = - py312 + py312, notebooks skip_missing_interpreters = true # Configuration that allows us to split tests across GitHub runners effectively [gh-actions] python = - 3.12: py312 + 3.12: py312, notebooks [testenv] description = Pytest with coverage @@ -39,6 +39,35 @@ commands = pytest --doctest-modules --cov nifreeze -n auto --cov-report xml \ --junitxml=test-results.xml -v src test {posargs} +[testenv:notebooks] +description = Run notebooks +labels = notebooks +pass_env = + # getpass.getuser() sources for Windows: + LOGNAME + USER + LNAME + USERNAME + # Pass user color preferences through + PY_COLORS + FORCE_COLOR + NO_COLOR + CLICOLOR + CLICOLOR_FORCE + CURBRANCH + GITHUB_ACTIONS + TEST_DATA_HOME + TEST_OUTPUT_DIR + TEST_WORK_DIR + PYTHONHASHSEED + ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS + PATH +extras = notebooks +commands = + # pytest --nbmake docs/notebooks/*.ipynb + # not working due to https://github.com/tox-dev/tox/issues/1571 + python {toxinidir}/tools/run_notebooks.py + [testenv:docs] description = Build documentation site labels = docs