Skip to content

Commit

Permalink
Python: small improvements in test running (#7576)
Browse files Browse the repository at this point in the history
### Motivation and Context

<!-- Thank you for your contribution to the semantic-kernel repo!
Please help reviewers and future users, providing the following
information:
  1. Why is this change required?
  2. What problem does it solve?
  3. What scenario does it contribute to?
  4. If it fixes an open issue, please link to the issue here.
-->
Adds pytest-xdist to parallelize tests, defaults to logical cores
Adds single test for Kernel to validate.

### Description

<!-- Describe your changes, the overall approach, the underlying design.
These notes will help understanding how your code works. Thanks! -->

### Contribution Checklist

<!-- Before submitting this PR, please make sure: -->

- [x] The code builds clean without any errors or warnings
- [x] The PR follows the [SK Contribution
Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md)
and the [pre-submission formatting
script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts)
raises no violations
- [x] All unit tests pass, and I have added new tests where possible
- [x] I didn't break anyone 😄
  • Loading branch information
eavanvalkenburg authored Aug 1, 2024
1 parent 7997e79 commit b1690ce
Show file tree
Hide file tree
Showing 8 changed files with 155 additions and 33 deletions.
41 changes: 30 additions & 11 deletions .github/workflows/python-integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,21 +73,28 @@ jobs:
- name: Install Ollama
if: matrix.os == 'ubuntu-latest'
run: |
curl -fsSL https://ollama.com/install.sh | sh
ollama serve &
sleep 5
if ${{ vars.OLLAMA_MODEL != '' }}; then
curl -fsSL https://ollama.com/install.sh | sh
ollama serve &
sleep 5
fi
- name: Pull model in Ollama
if: matrix.os == 'ubuntu-latest'
run: |
ollama pull ${{ vars.OLLAMA_MODEL }}
ollama list
if ${{ vars.OLLAMA_MODEL != '' }}; then
ollama pull ${{ vars.OLLAMA_MODEL }}
ollama list
fi
- name: Google auth
uses: google-github-actions/auth@v2
with:
project_id: ${{ vars.VERTEX_AI_PROJECT_ID }}
credentials_json: ${{ secrets.VERTEX_AI_SERVICE_ACCOUNT_KEY }}
- name: Set up gcloud
uses: google-github-actions/setup-gcloud@v2
- name: Setup Redis Stack Server
if: matrix.os == 'ubuntu-latest'
run: docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
- name: Run Integration Tests
id: run_tests
shell: bash
Expand Down Expand Up @@ -124,13 +131,25 @@ jobs:
VERTEX_AI_GEMINI_MODEL_ID: ${{ vars.VERTEX_AI_GEMINI_MODEL_ID }}
VERTEX_AI_EMBEDDING_MODEL_ID: ${{ vars.VERTEX_AI_EMBEDDING_MODEL_ID }}
run: |
if ${{ matrix.os == 'ubuntu-latest' }}; then
docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
fi
cd python
poetry run pytest ./tests/integration -v
poetry run pytest ./tests/samples -v
poetry run pytest ./tests/integration ./tests/samples -v --junitxml=pytest.xml
- name: Surface failing tests
if: always()
uses: pmeier/pytest-results-action@main
with:
# A list of JUnit XML files, directories containing the former, and wildcard
# patterns to process.
# See @actions/glob for supported patterns.
path: python/pytest.xml
# (Optional) Add a summary of the results at the top of the report
summary: true
# (Optional) Select which results should be included in the report.
# Follows the same syntax as `pytest -r`
display-options: fEX
# (Optional) Fail the workflow if no JUnit XML was found.
fail-on-empty: true
# (Optional) Title of the test results section in the workflow summary
title: Test results

python-integration-tests:
needs: paths-filter
Expand Down
16 changes: 6 additions & 10 deletions .github/workflows/python-test-coverage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,6 @@ on:
types:
- in_progress

env:
PYTHON_VERSION: "3.10"
RUN_OS: ubuntu-latest

jobs:
python-tests-coverage:
runs-on: ubuntu-latest
Expand All @@ -27,13 +23,13 @@ jobs:
uses: lewagon/[email protected]
with:
ref: ${{ github.event.pull_request.head.sha }}
check-name: 'Python Unit Tests (${{ env.PYTHON_VERSION }}, ${{ env.RUN_OS }}, false)'
check-name: 'Python Test Coverage'
repo-token: ${{ secrets.GH_ACTIONS_PR_WRITE }}
wait-interval: 90
allowed-conclusions: success
- uses: actions/checkout@v4
- name: Setup filename variables
run: echo "FILE_ID=${{ github.event.number }}-${{ env.RUN_OS }}-${{ env.PYTHON_VERSION }}" >> $GITHUB_ENV
run: echo "FILE_ID=${{ github.event.number }}" >> $GITHUB_ENV
- name: Download coverage
uses: dawidd6/action-download-artifact@v3
with:
Expand All @@ -57,9 +53,9 @@ jobs:
github-token: ${{ secrets.GH_ACTIONS_PR_WRITE }}
pytest-coverage-path: python-coverage.txt
coverage-path-prefix: "python/"
title: "Python ${{ env.PYTHON_VERSION }} Test Coverage Report"
badge-title: "Py${{ env.PYTHON_VERSION }} Test Coverage"
junitxml-title: "Python ${{ env.PYTHON_VERSION }} Unit Test Overview"
title: "Python Test Coverage Report"
badge-title: "Python Test Coverage"
junitxml-title: "Python Unit Test Overview"
junitxml-path: pytest.xml
default-branch: "main"
unique-id-for-comment: python-${{ env.PYTHON_VERSION }}
unique-id-for-comment: python-test-coverage
49 changes: 45 additions & 4 deletions .github/workflows/python-unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
os: [ubuntu-latest, windows-latest, macos-latest]
experimental: [false]
include:
- python-version: "3.13.0-beta.3"
- python-version: "3.13.0-beta.4"
os: "ubuntu-latest"
experimental: true
permissions:
Expand All @@ -28,8 +28,6 @@ jobs:
working-directory: python
steps:
- uses: actions/checkout@v4
- name: Setup filename variables
run: echo "FILE_ID=${{ github.event.number }}-${{ matrix.os }}-${{ matrix.python-version }}" >> $GITHUB_ENV
- name: Install poetry
run: pipx install poetry
- name: Set up Python ${{ matrix.python-version }}
Expand All @@ -40,15 +38,58 @@ jobs:
- name: Install dependencies
run: poetry install --with unit-tests
- name: Test with pytest
run: poetry run pytest -q --junitxml=pytest.xml --cov=semantic_kernel --cov-report=term-missing:skip-covered ./tests/unit | tee python-coverage.txt
run: poetry run pytest --junitxml=pytest.xml ./tests/unit
- name: Surface failing tests
if: always()
uses: pmeier/pytest-results-action@main
with:
# A list of JUnit XML files, directories containing the former, and wildcard
# patterns to process.
# See @actions/glob for supported patterns.
path: python/pytest.xml
# (Optional) Add a summary of the results at the top of the report
summary: true
# (Optional) Select which results should be included in the report.
# Follows the same syntax as `pytest -r`
display-options: fEX
# (Optional) Fail the workflow if no JUnit XML was found.
fail-on-empty: true
# (Optional) Title of the test results section in the workflow summary
title: Test results
python-test-coverage:
name: Python Test Coverage
runs-on: [ubuntu-latest]
continue-on-error: true
permissions:
contents: write
defaults:
run:
working-directory: python
steps:
- uses: actions/checkout@v4
- name: Setup filename variables
run: echo "FILE_ID=${{ github.event.number }}" >> $GITHUB_ENV
- name: Install poetry
run: pipx install poetry
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: "3.10"
cache: "poetry"
- name: Install dependencies
run: poetry install --with unit-tests
- name: Test with pytest
run: poetry run pytest -q --junitxml=pytest.xml --cov=semantic_kernel --cov-report=term-missing:skip-covered ./tests/unit | tee python-coverage.txt
- name: Upload coverage
if: always()
uses: actions/upload-artifact@v4
with:
name: python-coverage-${{ env.FILE_ID }}.txt
path: python/python-coverage.txt
overwrite: true
retention-days: 1
- name: Upload pytest.xml
if: always()
uses: actions/upload-artifact@v4
with:
name: pytest-${{ env.FILE_ID }}.xml
Expand Down
18 changes: 18 additions & 0 deletions python/.vscode/tasks.json
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,24 @@
},
"problemMatcher": []
},
{
"label": "Python: Tests - Unit - Failed Only",
"type": "shell",
"command": "poetry",
"args": [
"run",
"pytest",
"tests/unit/",
"--last-failed",
"-v"
],
"group": "test",
"presentation": {
"reveal": "always",
"panel": "shared"
},
"problemMatcher": []
},
{
"label": "Python: Tests - Code Coverage",
"type": "shell",
Expand Down
41 changes: 38 additions & 3 deletions python/poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 5 additions & 1 deletion python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,10 @@ ruff = ">=0.4.5"
ipykernel = "^6.29.4"
nbconvert = "^7.16.4"
pytest = "^8.2.1"
pytest-xdist = { version="^3.6.1", extras=["psutil"]}
pytest-cov = ">=5.0.0"
pytest-asyncio = "^0.23.7"
snoop = "^0.4.3"
pytest-cov = ">=5.0.0"
mypy = ">=1.10.0"
types-PyYAML = "^6.0.12.20240311"

Expand Down Expand Up @@ -167,6 +168,9 @@ redis = ["redis"]
usearch = ["usearch", "pyarrow"]
weaviate = ["weaviate-client"]

[tool.pytest.ini_options]
addopts = "-ra -q -r fEX -n logical --dist loadfile --dist worksteal"

[tool.ruff]
line-length = 120
target-version = "py310"
Expand Down
7 changes: 3 additions & 4 deletions python/tests/integration/completions/test_chat_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def history() -> ChatHistory:


@pytest.fixture(scope="module")
def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecutionSettings]]]:
def services() -> dict[str, tuple[ChatCompletionClientBase | None, type[PromptExecutionSettings]]]:
azure_openai_settings = AzureOpenAISettings.create()
endpoint = azure_openai_settings.endpoint
deployment_name = azure_openai_settings.chat_deployment_name
Expand Down Expand Up @@ -118,7 +118,7 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution
"azure_custom_client": (azure_custom_client, AzureChatPromptExecutionSettings),
"azure_ai_inference": (azure_ai_inference_client, AzureAIInferenceChatPromptExecutionSettings),
"mistral_ai": (MistralAIChatCompletion() if mistral_ai_setup else None, MistralAIChatPromptExecutionSettings),
"ollama": (OllamaChatCompletion(), OllamaChatPromptExecutionSettings),
"ollama": (OllamaChatCompletion() if ollama_setup else None, OllamaChatPromptExecutionSettings),
"google_ai": (GoogleAIChatCompletion(), GoogleAIChatPromptExecutionSettings),
"vertex_ai": (VertexAIChatCompletion(), VertexAIChatPromptExecutionSettings),
}
Expand Down Expand Up @@ -464,8 +464,7 @@ def services() -> dict[str, tuple[ChatCompletionClientBase, type[PromptExecution
ChatMessageContent(role=AuthorRole.USER, items=[TextContent(text="How are you today?")]),
],
["Hello", "well"],
# marks=pytest.mark.skipif(not ollama_setup, reason="Need local Ollama setup"),
marks=pytest.mark.skip(reason="Flaky test"),
marks=pytest.mark.skipif(not ollama_setup, reason="Need local Ollama setup"),
id="ollama_text_input",
),
pytest.param(
Expand Down
10 changes: 10 additions & 0 deletions python/tests/unit/kernel/test_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,16 @@ async def test_invoke_function_fail(kernel: Kernel, create_mock_function):
pass


@pytest.mark.asyncio
async def test_invoke_function_cancelled(kernel: Kernel, create_mock_function):
mock_function = create_mock_function(name="test_function")
mock_function._invoke_internal = AsyncMock(side_effect=OperationCancelledException("Operation cancelled"))
kernel.add_plugin(KernelPlugin(name="test", functions=[mock_function]))

result = await kernel.invoke(mock_function, arguments=KernelArguments())
assert result is None


@pytest.mark.asyncio
async def test_invoke_stream_function(kernel: Kernel, create_mock_function):
mock_function = create_mock_function(name="test_function")
Expand Down

0 comments on commit b1690ce

Please sign in to comment.