From f17cc8d8e5598dc97ec67e8ff441ba81090a8c14 Mon Sep 17 00:00:00 2001 From: Cedar Date: Wed, 29 Jan 2025 10:14:05 -0800 Subject: [PATCH] run cpu llm tests on cpu server --- .github/workflows/pkgci_shark_ai.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pkgci_shark_ai.yml b/.github/workflows/pkgci_shark_ai.yml index 6b76ec885..96c2fed5b 100644 --- a/.github/workflows/pkgci_shark_ai.yml +++ b/.github/workflows/pkgci_shark_ai.yml @@ -26,7 +26,7 @@ jobs: matrix: version: [3.11] fail-fast: false - runs-on: mi300x-3 + runs-on: azure-cpubuilder-linux-scale # runs-on: ubuntu-latest # everything else works but this throws an "out of resources" during model loading # TODO: make a copy of this that runs on standard runners with tiny llama instead of a 8b model defaults: