diff --git a/components/contents.js b/components/contents.js index f3d7abf..f69a0b9 100644 --- a/components/contents.js +++ b/components/contents.js @@ -1,19 +1,13 @@ export const contents = { Basics: { - Conduct: '/tech/conduct', + Conduct: + 'https://github.com/carbonplan/.github/blob/main/CODE_OF_CONDUCT.md', GitHub: '/tech/github', }, - 'Data Science': { - Overview: '/tech/data-science', - Contributing: '/tech/data-science/contributing', - Environments: '/tech/data-science/environments', - Style: '/tech/data-science/style', - Testing: '/tech/data-science/testing', - }, - 'Front-end': { - Overview: '/tech/front-end', - Articles: '/tech/front-end/articles', - Contributing: '/tech/front-end/contributing', - Deployment: '/tech/front-end/deployment', + Guides: { + Contributing: '/tech/contributing', + Environments: '/tech/environments', + Style: '/tech/style', + Testing: '/tech/testing', }, } diff --git a/pages/index.js b/pages/index.js index f16c199..6ed0dde 100644 --- a/pages/index.js +++ b/pages/index.js @@ -1,6 +1,7 @@ -import { Layout, Row, Column } from '@carbonplan/components' -import Heading from '../components/heading' +import { Column, Layout, Link, Row } from '@carbonplan/components' +import { Box, Text } from 'theme-ui' import Card from '../components/card' +import Heading from '../components/heading' const Index = () => { return ( @@ -12,7 +13,18 @@ const Index = () => { > Docs - + + + + + You can also check out our{' '} + contributor guide + on how to contribute to our projects. + + + + +
{children}
diff --git a/pages/tech/contributing.md b/pages/tech/contributing.md new file mode 100644 index 0000000..e72c0d4 --- /dev/null +++ b/pages/tech/contributing.md @@ -0,0 +1,87 @@ +import Section from '../../components/section' + +# Contribution Guide + +This page serves as a general contribution guide for CarbonPlan data science projects. Some projects within the organization may also provide customized contribution guides. + +## Getting Started + +If you're new to contributing to CarbonPlan projects, here are some resources to help you get started: + +- Familiarize yourself with our [GitHub organization](https://github.com/carbonplan) +- Read our [Code of Conduct](https://github.com/carbonplan/.github/blob/main/CODE_OF_CONDUCT.md) +- Check out our [style guide](./style) to understand our coding standards + +## Reporting Issues + +We use GitHub Issues to track bugs and feature requests. When submitting an issue: + +- Use clear, descriptive titles +- Provide detailed steps to reproduce bugs +- For feature requests, explain why the feature would be useful +- Include relevant information about your environment + +## Development Workflow + +### Setting Up Your Environment + +1. Fork the repository on GitHub +2. Clone your fork locally +3. Set up the development environment (project-specific instructions will be in each repository's README) + +### Branch Strategy + +- Work on a feature branch named appropriately (e.g., `feature/add-new-filter` or `fix/resolve-data-loading-issue`) +- Keep your branch focused on a single issue or feature +- Reference our [GitHub workflow](./github) for more details on branches and pull requests + +### Code Contributions + +When contributing code: + +- Follow our [style guide](./style) for code formatting +- Write [tests](./testing) for new functionality +- Update documentation to reflect your changes +- Use [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) style for Python docstrings + +### Documentation Contributions + +Good documentation is as important as good code: + +- Keep language clear and concise +- Update examples to reflect code changes +- Ensure documentation builds without errors +- Preview changes locally before submitting + +### Submitting Changes + +1. Commit your changes with descriptive messages +2. Push to your fork +3. Submit a pull request from your branch to the main repository +4. Address feedback during code review + +## Code Review Process + +All submissions go through a review process: + +- A maintainer will review your PR +- Automated tests must pass +- Changes may be requested before merging +- Be responsive to questions and feedback + +## Project Versioning + +The choice of versioning scheme depends on the project and its goals. We use two versioning schemes depending on the project type: + +- [Calendar Versioning (CalVer)](https://calver.org/) + - The format is `YYYY.MM.DD` or `YYYY.MM.DD.N` + - Used for: projects with frequent updates (e.g., [offsets-db-data](https://github.com/carbonplan/offsets-db-data)) + - When: release schedule is time-based rather than feature-based +- [Semantic Versioning (SemVer)](https://semver.org/) + - The format is `MAJOR.MINOR.PATCH` (e.g., `1.2.3`) + - Used for: projects with a more stable release cycle (e.g., [cmip6-downscaling](https://github.com/carbonplan/cmip6-downscaling)) + - When: breaking changes need to be communicated clearly and new features are added in a backwards-compatible manner. + +export default ({ children }) => ( + +
{children}
) diff --git a/pages/tech/data-science/contributing.md b/pages/tech/data-science/contributing.md deleted file mode 100644 index a8d9381..0000000 --- a/pages/tech/data-science/contributing.md +++ /dev/null @@ -1,26 +0,0 @@ -import Section from '../../../components/section' - -# Contribution Guide - -This page serves as a general contribution guide for CarbonPlan data science projects. Some projects within the organization may also provide customized contribution guides. - -- where to start -- bug reports and enhancement requests -- developing - - link to page on github - - creating a development environment - - work on a fork/branch - - contributing to the documentation (leave as a TODO block) - - contributing to the code base - - code standards (link to testing) - - code formatting (link to sytle) - - test driven development - - documenting your code (numpydoc) - - git workflow for committing and pushing your changes, open a pull request - - code review -- project versioning -- more? - -export default ({ children }) => ( -
{children}
-) diff --git a/pages/tech/data-science/environments.md b/pages/tech/data-science/environments.md deleted file mode 100644 index 1ca2185..0000000 --- a/pages/tech/data-science/environments.md +++ /dev/null @@ -1,11 +0,0 @@ -import Section from '../../../components/section' - -# Environments - -- local environments -- docker environments -- testing environments - -export default ({ children }) => ( -
{children}
-) diff --git a/pages/tech/data-science/index.md b/pages/tech/data-science/index.md deleted file mode 100644 index 3b96f6b..0000000 --- a/pages/tech/data-science/index.md +++ /dev/null @@ -1,30 +0,0 @@ -import Section from '../../../components/section' - -# Data Science Overview - -Our data science toolset is built on top of the open source Scientific Python ecosystem. We make extensive use of open source frameworks such as the [Pangeo Project](https://pangeo.io/) and open source cloud infrastructure such as [Kubernetes](https://kubernetes.io/). - -## Core projects - -We maintain a few core projects that help tie together CarbonPlan's data science work. - -1. [`carbonplan-python`](https://github.com/carbonplan/carbonplan-python): A lightweight namespace package for Python utilities and subprojects -1. [`carbonplan-data`](https://github.com/carbonplan/data): Cross-org data catalogs and utilities -1. [`carbonplan-styles`](https://github.com/carbonplan/styles): Plotting styles for Altair and Matplotlib - -All of these projects can be installed from [PyPI](https://pypi.org/search/?q=carbonplan): - -``` -pip install "carbonplan[data,styles]" -``` - -## Guides - -- [Contribution Guid](data-science/contributing) -- [Style Guide](data-science/style) -- [Testing Guide](data-science/testing) -- [Python Environments Guide](data-science/environments) - -export default ({ children }) => ( -
{children}
-) diff --git a/pages/tech/data-science/style.md b/pages/tech/data-science/style.md deleted file mode 100644 index adc0bfb..0000000 --- a/pages/tech/data-science/style.md +++ /dev/null @@ -1,139 +0,0 @@ -import Section from '../../../components/section' - -# Python Style Guide - -Python is the primary language used CarbonPlan for data science projects. This style guide describes the rules and frameworks we use for code formatting. - -## Style - -We mostly follow the standard Python style conventions from [PEP8](https://www.python.org/dev/peps/pep-0008/) for code, [Numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) for docstrings, and [PEP484](https://www.python.org/dev/peps/pep-0484/) for type hints. Rather than list the intricicies of each of these conventions, we instead provide an optinionated set of linter configurations that will help maintain style consistancy across all of our projects. - -## Linters - -We use a series of code linters to maintain consistent formatting across our projects. Most projects will also use `pre-commit` to automate regular execution of the linters. The linters are also regularly run as part of our continuous integration and [testing](testing) suite. - -### Black - -[Black](https://black.readthedocs.io/en/stable/index.html) is an opinionated PEP-compliant code formatter. We use Black's default settings with a few minor adjustments: - -Example `pyproject.toml`: - -```ini -[tool.black] -line-length = 100 -skip-string-normalization = true -``` - -### Flake8 - -[Flake8](https://flake8.pycqa.org/en/latest/) provides additional code formatting sytle checks. - -Example `setup.cfg`: - -```ini -[flake8] -ignore = E203,E266,E501,W503,E722,E402,C901 -max-line-length = 100 -max-complexity = 18 -select = B,C,E,F,W,T4,B9 -``` - -### isort - -[isort](https://pycqa.github.io/isort/) automatically sorts Python imports. - -Example `setup.cfg` - -```ini -[isort] -known_first_party= -known_third_party= -multi_line_output=3 -include_trailing_comma=True -force_grid_wrap=0 -combine_as_imports=True -line_length=100 -``` - -## Pre-commmit - -[Pre-commit](https://pre-commit.com/) is a framework for managing and mainting [pre-commit hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) that run as part of a Git repository. We use pre-commit to execute a set of standard code formatters and sytle linters (described above). - -### Installing the Pre-commit utility - -Before using Pre-commit, a command line utility needs to be added to your development environment. Pre-commit can be [installed](https://pre-commit.com/#installation) using a variety of package managers including PyPI, Homebrew, and Conda. - -``` -pip install pre-commit -# or -conda install -c conda-forge pre-commit -# or -brew install pre-commit -``` - -### Installing the pre-commit hook in a repository - -To enable the pre-commit hook in a Git repository, run: - -``` -pre-commit install -``` - -At this point, future commits to this Git repository will trigger the execution of the pre-commit script. - -### Running the pre-commit script manually - -It is often useful to run the pre-commit script during developemnt, even before you are ready to create a Git commit. - -``` -pre-commit run [--all-files] -``` - -The standard execution will only run pre-commit on modified files. Adding the `--all-files` option will run the pre-commit script on all files within the respository. - -### Pre-commit configuration - -The hooks included in the pre-commit script are defined in the `.pre-commit-config.yaml` file in each repository. Below is an example of a standard pre-commit configuration. - -```yaml -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-docstring-first - - id: check-json - - id: check-yaml - - id: pretty-format-json - args: ['--autofix', '--indent=2', '--no-sort-keys'] - - - repo: https://github.com/ambv/black - rev: 21.4b2 - hooks: - - id: black - args: ['--line-length', '100'] - - - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.1 - hooks: - - id: flake8 - - - repo: https://github.com/asottile/seed-isort-config - rev: v2.2.0 - hooks: - - id: seed-isort-config - - repo: https://github.com/pre-commit/mirrors-isort - rev: v5.8.0 - hooks: - - id: isort - - - repo: https://github.com/deathbeds/prenotebook - rev: f5bdb72a400f1a56fe88109936c83aa12cc349fa - hooks: - - id: prenotebook -``` - -export default ({ children }) => ( -
{children}
-) diff --git a/pages/tech/data-science/testing.md b/pages/tech/data-science/testing.md deleted file mode 100644 index ec74861..0000000 --- a/pages/tech/data-science/testing.md +++ /dev/null @@ -1,21 +0,0 @@ -import Section from '../../../components/section' - -# Testing - -## Continuous integration - -Continuous integration (CI) is the practice of regularly merging, deploying, and testing software. We utilize multiple CI services: - -- [GitHub Actions](https://github.com/features/actions) -- [Vercel](https://vercel.com) -- [Pre-commit.ci](https://pre-commit.ci/) - -## Testing Python code - -pytest - -## CI services - -- dependabot (and other services) - -export default ({ children }) =>
{children}
diff --git a/pages/tech/environments.md b/pages/tech/environments.md new file mode 100644 index 0000000..52938c9 --- /dev/null +++ b/pages/tech/environments.md @@ -0,0 +1,109 @@ +import Section from '../../components/section' +import { Table } from '@carbonplan/components' + +# Computational Environments + +## Overview + +At CarbonPlan, we use different types of computational environments to support our data science work: + +- **Local environments**: For day-to-day development +- **Docker containers**: For reproducibility and deployment + +These environments support various activities across our workflow: + + + +## Local environments + +We primarily use two tools for managing local environments, depending on project needs. + +### Using Pixi + +[Pixi](https://pixi.sh/latest/) is our recommended tool for managing local environments, especially for projects with complex geospatial dependencies. + +#### Advantages of Pixi + +- Faster dependency resolution than conda/mamba +- Simplified environment specification and isolation +- Compatibility with conda-forge packages (crucial for `GDAL`, `rasterio`, etc.) +- Deterministic builds with lockfiles + +#### Getting Started with Pixi + +Follow the [installation instructions](https://pixi.sh/latest/#installation) to set up Pixi: + +```bash +# Install Pixi +curl -fsSL https://pixi.sh/install.sh | bash + +# Initialize a new project +pixi init + +# Add dependencies (including conda-forge packages) +pixi add numpy pandas xarray +pixi add -c conda-forge gdal rasterio + +# Run commands within the environment +pixi run python my_script.py +``` + +### Alternative: Conda/Mamba + +For projects that benefit from the broader conda ecosystem, you can use conda or its faster alternative, mamba. + +## Docker environments + +We use [Docker](https://www.docker.com/) to create and manage containerized environments. These environments are used for deployment, testing, and reproducibility. We publish our docker images to [Quay.io](https://quay.io/organization/carbonplan). + +### When to use Docker + +Docker is particularly valuable in these scenarios: + +- **Reproducible research**: Docker allows future-proofing your work by encapsulating all dependencies in a container. This ensures that your code will run in the same environment, regardless of changes to the underlying system. +- **Dependecy isolation**: Sometimes, you may need to use a specific version of a library or tool that your code runs smoothly and consistently across different operating systems. +- **Cross-platform compatibility**: Works around OS-specific issues (Linux vs. macOS) +- **Collaboration**: Our JupyterHub environment allows users to run code in a containerized environment such as containers. Some projects may benefit from having a Docker container that can be shared with other users. This allows others to run your code in the same environment, without having to install all of the dependencies on their local machine. + +### Creating docker images using repo2docker + +We typically use [`repo2docker`](https://repo2docker.readthedocs.io/en/latest/) to create Docker images from GitHub repositories. + +1. Setup: Create environment files in your repository: + - `environment.yml` for conda dependencies + - `requirements.txt` for pip dependencies + - `apt.txt` for system dependencies +2. Building locally: + + ```bash + python -m pip install jupyter-repo2docker + repo2docker --no-run path/to/your/repo + ``` + +3. Automated builds: We use GitHub Actions to build and push images to Quay.io when changes are pushed. This approach allows us to automatically build and publish Docker images whenever we push changes to the repository. An example GitHub Action workflow for building and pushing a Docker image can be found in the [carbonplan/argo-docker repository](https://github.com/carbonplan/argo-docker) + +export default ({ children }) => ( + +
{children}
) diff --git a/pages/tech/front-end/articles.md b/pages/tech/front-end/articles.md deleted file mode 100644 index e40877b..0000000 --- a/pages/tech/front-end/articles.md +++ /dev/null @@ -1,8 +0,0 @@ -import Section from '../../../components/section' - -# Articles - -- research articles -- blog articles - -export default ({ children }) =>
{children}
diff --git a/pages/tech/front-end/contributing.md b/pages/tech/front-end/contributing.md deleted file mode 100644 index 09fdd47..0000000 --- a/pages/tech/front-end/contributing.md +++ /dev/null @@ -1,11 +0,0 @@ -import Section from '../../../components/section' - -# Contribution Guide - -- versioning -- linking projects during development -- releasing/publishing - -export default ({ children }) => ( -
{children}
-) diff --git a/pages/tech/front-end/deployment.md b/pages/tech/front-end/deployment.md deleted file mode 100644 index a986e93..0000000 --- a/pages/tech/front-end/deployment.md +++ /dev/null @@ -1,9 +0,0 @@ -import Section from '../../../components/section' - -# Deployment Guide - -- Vercel - -export default ({ children }) => ( -
{children}
-) diff --git a/pages/tech/front-end/index.md b/pages/tech/front-end/index.md deleted file mode 100644 index 4571963..0000000 --- a/pages/tech/front-end/index.md +++ /dev/null @@ -1,9 +0,0 @@ -import Section from '../../../components/section' - -# Front-end Overview - -- toolset -- core projects -- references to guides - -export default ({ children }) =>
{children}
diff --git a/pages/tech/front-end/style.md b/pages/tech/front-end/style.md deleted file mode 100644 index 73765bf..0000000 --- a/pages/tech/front-end/style.md +++ /dev/null @@ -1,11 +0,0 @@ -import Section from '../../../components/section' - -# Style Guide - -- eslint or prettier -- doc strings? -- naming conventions? - -export default ({ children }) => ( -
{children}
-) diff --git a/pages/tech/front-end/testing.md b/pages/tech/front-end/testing.md deleted file mode 100644 index 507c472..0000000 --- a/pages/tech/front-end/testing.md +++ /dev/null @@ -1,8 +0,0 @@ -import Section from '../../../components/section' - -# Testing - -- continuous integration -- \{unit, build, integration, deployment\} testing - -export default ({ children }) =>
{children}
diff --git a/pages/tech/github.md b/pages/tech/github.md index 4e51def..2e31641 100644 --- a/pages/tech/github.md +++ b/pages/tech/github.md @@ -10,7 +10,7 @@ There are many tutorials (i.e. [here](https://lab.github.com/githubtraining/intr ### Navigating the CarbonPlan Organization -GitHub Organizations are shared accounts that allow organizations, like CarbonPlan, collaborate on multiple projects at once. CarbonPlan's GitHub organization (https://github.com/carbonplan) is the central development location for all of our data-science and front-end projects. Important featurs at the organization level include: +GitHub Organizations are shared accounts that allow organizations, like CarbonPlan, collaborate on multiple projects at once. CarbonPlan's GitHub organization ([https://github.com/carbonplan](https://github.com/carbonplan)) is the central development location for all of our data-science and front-end projects. Important featurs at the organization level include: - [Repositories](https://github.com/orgs/carbonplan/repositories): a listing of our projects (aka repositories) - [People](https://github.com/orgs/carbonplan/people): a listing of members of the CarbonPlan GitHub organization and outside collaborators diff --git a/pages/tech/index.md b/pages/tech/index.md index eccd770..3c755db 100644 --- a/pages/tech/index.md +++ b/pages/tech/index.md @@ -2,8 +2,15 @@ import Section from '../../components/section' # Developer Docs -At CarbonPlan, we build a projects using a wide varity of open source tools and technologies. This site provides documentation aimed at supporting our core software, data, and science development activities. +At CarbonPlan, we build projects using a wide varity of open source tools and technologies. We make extensive use of open source frameworks such as the [Xarray](https://xarray.dev/) package for working with multi-dimensional arrays, and the [Dask](https://dask.org/) package for parallel computing. We also rely on a number of other open source packages for data analysis and visualization. -Our work can be roughly divided into two areas, [front-end](/front-end) and [data-science](/data-science). Because these areas often utilize very different toolsets and development approaches, this site provides individudal guides for each area. +This site provides documentation aimed at supporting our core software, data, and science development activities. + +## Guides + +- [Contribution Guide](/tech/contributing) +- [Style Guide](/tech/style) +- [Testing Guide](/tech/testing) +- [Computational Environments Guide](/tech/environments) export default ({ children }) =>
{children}
diff --git a/pages/tech/style.md b/pages/tech/style.md new file mode 100644 index 0000000..7d27dd4 --- /dev/null +++ b/pages/tech/style.md @@ -0,0 +1,140 @@ +import Section from '../../components/section' +import Sidenote from '../../components/sidenote' + +# Python Style Guide + +Python is the primary language used CarbonPlan for data science projects. This style guide describes the rules and frameworks we use for code formatting. + +## Style + +We mostly follow the standard Python style conventions from [PEP8](https://www.python.org/dev/peps/pep-0008/) for code, [Numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) for docstrings, and [PEP484](https://www.python.org/dev/peps/pep-0484/) for type hints. Rather than list the intricicies of each of these conventions, we instead provide an optinionated set of linter configurations that will help maintain style consistancy across all of our projects. + +## Linters + +We use a series of code linters to maintain consistent formatting across our projects. Most projects will also use `pre-commit` to automate regular execution of the linters. The linters are also regularly run as part of our continuous integration and [testing](testing) suite. + +### Ruff + +[Ruff](https://docs.astral.sh/ruff) is a Python linter and code formatter which supports a wide range of linting rules, many of which are derived from the popular tools like [Flake8](https://flake8.pycqa.org/en/latest/) and [isort](https://pycqa.github.io/isort/), [pyupgrade](https://github.com/asottile/pyupgrade) and others. Ruff provides a formatter designed to be used as a drop-in replacement for [Black](https://black.readthedocs.io/en/stable/index.html) -- an opinionated PEP-compliant code formatter. + +We use Ruff's default settings with a few minor adjustments: + +Example `pyproject.toml`: + +```ini +[tool.ruff] + extend-include = ["*.ipynb"] + line-length = 100 + target-version = "py310" + + builtins = ["ellipsis"] + # Exclude a variety of commonly ignored directories. + exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pants.d", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "site-packages", + "venv", + ] +[tool.ruff.lint] + ignore = [ + "E501", # Conflicts with ruff format + "E721", # Comparing types instead of isinstance + "E741", # Ambiguous variable names + ] + per-file-ignores = {} + select = [ + # Pyflakes + "F", + # Pycodestyle + "E", + "W", + # isort + "I", + # Pyupgrade + "UP", + ] + +[tool.ruff.lint.mccabe] + max-complexity = 18 + +[tool.ruff.lint.isort] + combine-as-imports = true + known-first-party = [] + +[tool.ruff.format] + docstring-code-format = true + quote-style = "single" + +[tool.ruff.lint.pydocstyle] + convention = "numpy" + +[tool.ruff.lint.pyupgrade] + # Preserve types, even if a file imports `from __future__ import annotations`. + keep-runtime-typing = true +``` + +## Pre-commmit + +[Pre-commit](https://pre-commit.com/) is a framework for managing and mainting [pre-commit hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) that run as part of a Git repository. We use pre-commit to execute a set of standard code formatters and sytle linters (described above). + +### Installing the Pre-commit utility + +Before using Pre-commit, a command line utility needs to be added to your development environment. Pre-commit can be [installed](https://pre-commit.com/#installation) using a variety of package managers including PyPI, Homebrew, and Conda. + +``` +python -m pip install pre-commit +# or +conda install -c conda-forge pre-commit +# or +brew install pre-commit +``` + +### Installing the pre-commit hook in a repository + +To enable the pre-commit hook in a Git repository, run: + +``` +pre-commit install +``` + +At this point, future commits to this Git repository will trigger the execution of the pre-commit script. + +### Running the pre-commit script manually + +It is often useful to run the pre-commit script during developemnt, even before you are ready to create a Git commit. + +``` +pre-commit run +``` + +The standard execution will only run pre-commit on modified files. Adding the `--all-files` option will run the pre-commit script on all files within the respository. + +### Pre-commit configuration + +The hooks included in the pre-commit script are defined in the `.pre-commit-config.yaml` file in each repository. Below is an example of a standard pre-commit configuration. See this example [pre-commit-config.yaml](https://github.com/carbonplan/carbonplan-cookiecutter-python/blob/main/%7B%7Bcookiecutter.project_name%7D%7D/.pre-commit-config.yaml) file for a more complete example. + +export default ({ children }) => ( + +
{children}
) diff --git a/pages/tech/testing.md b/pages/tech/testing.md new file mode 100644 index 0000000..3313d96 --- /dev/null +++ b/pages/tech/testing.md @@ -0,0 +1,43 @@ +import Section from '../../components/section' + +# Testing + +## Continuous integration + +Continuous integration (CI) is the practice of regularly merging, deploying, and testing software. We utilize multiple CI services: + +- [GitHub Actions](https://github.com/features/actions) +- [Vercel](https://vercel.com) +- [Pre-commit.ci](https://pre-commit.ci/) + +Most of our projects are set up to run test suites automatically on every push and pull request to the repository. This helps us catch errors early and ensures that our code is always in a releasable state. A pull request will be considered "ready to merge" when you have an 'all green' status on all of the CI services that are enabled for the repository. Below is an example of an 'all green' status on a pull request: + +![all green status](/all-green-build.png) + +Note that each time you push new commits to GitHub, the CI services will trigger a new build. If the existing build is still running, it will be canceled and a new build will be started. + +## Test-driven development + +We encourage contributors to embrace [test-driven development (TDD) practices](https://en.wikipedia.org/wiki/Test-driven_development). This development style strongly encourages writing tests before writing code. In other words, a developer writes a test that fails, then writes the code to make the test pass. This approach helps ensure that the code is correct and that it meets the requirements of the test. For scenarios where one is dealing with additional use cases, it is often worth writing different tests for each use case. This helps ensure that the tests are comprehensive and maintainable. + +Our test suites are written using the [pytest](https://docs.pytest.org/en/stable/) framework. Pytest is a powerful and flexible testing framework that makes it easy to write and run tests. It also provides a number of features that make it easy to write tests for complex scenarios. + +## Writing tests + +All tests should go in the `tests` subdirectory of the project. The test files should be named `test_*.py` and the test functions should be named `test_*`. This is a convention that is recognized by pytest and other testing frameworks. + +## Running the test suite + +To run the test suite, you can use the following command from the root of the cloned repository: + +``` +python -m pytest +``` + +Often you will want to run the test suite on a specific file or directory. You can do this by specifying the path to the file or directory as an argument to the `pytest` command: + +``` +python -m pytest path/to/test_file.py +``` + +export default ({ children }) =>
{children}
diff --git a/public/all-green-build.png b/public/all-green-build.png new file mode 100644 index 0000000..72dadbc Binary files /dev/null and b/public/all-green-build.png differ