TorchFT 8 GPU Integration Test #484
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: TorchFT 8 GPU Integration Test | |
on: | |
push: | |
branches: [ main ] | |
paths: | |
- 'torchtitan/components/ft.py' | |
- '.github/workflows/integration_test_8gpu_torchft.yaml' | |
pull_request: | |
paths: | |
- 'torchtitan/components/ft.py' | |
- '.github/workflows/integration_test_8gpu_torchft.yaml' | |
schedule: | |
# Runs every 6 hours | |
- cron: '0 */6 * * *' | |
concurrency: | |
group: unit-test${{ github.workflow }}-${{ github.ref == 'refs/heads/main' && github.run_number || github.ref }} | |
cancel-in-progress: true | |
defaults: | |
run: | |
shell: bash -l -eo pipefail {0} | |
jobs: | |
build-test: | |
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main | |
with: | |
runner: linux.g5.48xlarge.nvidia.gpu | |
gpu-arch-type: cuda | |
gpu-arch-version: "12.6" | |
# This image is faster to clone than the default, but it lacks CC needed by triton | |
# (1m25s vs 2m37s). | |
docker-image: torchtitan-ubuntu-20.04-clang12 | |
repository: pytorch/torchtitan | |
upload-artifact: outputs | |
script: | | |
set -eux | |
# The generic Linux job chooses to use base env, not the one setup by the image | |
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") | |
conda activate "${CONDA_ENV}" | |
# Log CUDA driver version for debugging. | |
DRIVER_VERSION=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader | head -n 1 || true) | |
echo "CUDA driver version: ${DRIVER_VERSION}" | |
pip config --user set global.progress_bar off | |
python -m pip install torchft-nightly | |
python -m pip install --force-reinstall --pre torch --index-url https://download.pytorch.org/whl/nightly/cu126 | |
USE_CPP=0 python -m pip install --pre torchao --index-url https://download.pytorch.org/whl/nightly/cu126 | |
mkdir artifacts-to-be-uploaded | |
echo "torchft_lighthouse" | |
RUST_BACKTRACE=1 torchft_lighthouse --min_replicas 1 --quorum_tick_ms 100 --join_timeout_ms 10000 > /dev/null 2>&1 & | |
echo "ft_integration_test" | |
# Getting error - Cuda failure 217 'peer access is not supported between these two devices' | |
python -m tests.integration_tests.ft artifacts-to-be-uploaded --ngpu 8 | |
# pkill -9 torchft_lighthouse |