Skip to content

Commit

Permalink
Divide up integration tests (#224)
Browse files Browse the repository at this point in the history
* refactor tests, and only run integration tests on `main`

* set test dir in step 1

* add runs-on

* divide out workflows

* format

* tidy

* fix

* use tests dir

* add comment

* change check to repo owner

* tidy

* change to !- pull request

* update comment

* echo

* use github output

* run second job if push

* format

* pytest.mark.integration

* add timeout

* remove timeout from merge

* turn normalised error into percent

* limit metrics to 3sf

* update site used in test_evaluation script to give non nan result

* add pytest.ini file to mark integration tests

* update forecast test with no ts

* update forecast test with no ts spelling

---------

Co-authored-by: Megawattz <[email protected]>
  • Loading branch information
peterdudfield and zakwatts authored Jan 24, 2025
1 parent d00f5a8 commit 6d0e697
Show file tree
Hide file tree
Showing 18 changed files with 57 additions and 34 deletions.
17 changes: 16 additions & 1 deletion .github/workflows/pytest.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,28 @@ on:
- cron: "0 12 * * 1"
pull_request_target:
types: [opened, synchronize, reopened, ready_for_review]

jobs:
call-run-python-tests:
call-run-python-tests-unit:
uses: openclimatefix/.github/.github/workflows/python-test.yml@issue/pip-all
with:
# pytest-cov looks at this folder
pytest_cov_dir: "quartz_solar_forecast"
os_list: '["ubuntu-latest"]'
python-version: "['3.11']"
extra_commands: echo "HF_TOKEN=${{ vars.HF_TOKEN }}" > .env
pytest_numcpus: '1'
test_dir: tests/unit

call-run-python-tests-all:
# only run on push, not external PR
uses: openclimatefix/.github/.github/workflows/python-test.yml@issue/pip-all
if: github.event_name == 'push'
with:
# pytest-cov looks at this folder
pytest_cov_dir: "quartz_solar_forecast"
os_list: '["ubuntu-latest"]'
python-version: "['3.11']"
extra_commands: echo "HF_TOKEN=${{ vars.HF_TOKEN }}" > .env
pytest_numcpus: '1'
test_dir: tests
3 changes: 3 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[pytest]
markers =
integration: marks tests as integration tests
4 changes: 2 additions & 2 deletions quartz_solar_forecast/eval/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def metrics(results_df: pd.DataFrame, pv_metadata: pd.DataFrame, include_night:
.mean(),
4,
)
print(f"MAE: {mae} kw, normalized {mae_normalized} %")
print(f"MAE: {mae} kw, normalized {100*mae_normalized} %")

# calculate metrics over the different horizons hours
# find all unique horizon_hours
Expand Down Expand Up @@ -74,7 +74,7 @@ def metrics(results_df: pd.DataFrame, pv_metadata: pd.DataFrame, include_night:
)

print(
f"MAE for horizon {horizon_group}: {mae} +- {1.96*sem}. mae_normalized: {100*mae_normalized} %"
f"MAE for horizon {horizon_group}: {mae} +- {1.96*sem:.3g}. mae_normalized: {100*mae_normalized:.3g} %"
)

# TODO add more metrics using ocf_ml_metrics
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from quartz_solar_forecast.evaluation import run_eval
import tempfile
import pandas as pd
import pytest


@pytest.mark.integration
def test_run_eval():

# create a fake dataframe
Expand All @@ -15,7 +17,7 @@ def test_run_eval():
"pv_id",
"timestamp",
],
data=[[8215, "2021-01-26 01:15:00"], [8215, "2021-01-30 16:30:00"]],
data=[[7593, "2021-08-21 12:00:00"], [7593, "2021-10-04 20:00:00"]],
)

testset_filename = tmpdirname + "/test_dataset.csv"
Expand Down
2 changes: 2 additions & 0 deletions tests/eval/test_nwp.py → tests/integration/eval/test_nwp.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from quartz_solar_forecast.eval.nwp import get_nwp
import pandas as pd
import pytest


# can take ~ 1 minute to run
@pytest.mark.integration
def test_get_nwp():
# make test dataset file
test_set_df = pd.DataFrame(
Expand Down
4 changes: 3 additions & 1 deletion tests/eval/test_pv.py → tests/integration/eval/test_pv.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from quartz_solar_forecast.eval.pv import get_pv_truth, get_pv_metadata
import pandas as pd
import pytest


@pytest.mark.integration
def test_get_pv_metadata():
test_set_df = pd.DataFrame(
[
Expand All @@ -16,6 +17,7 @@ def test_get_pv_metadata():
assert "latitude" in metadata_df.columns


@pytest.mark.integration
def test_get_pv():
# make test dataset file
test_set_df = pd.DataFrame(
Expand Down
29 changes: 0 additions & 29 deletions tests/test_forecast_no_ts.py

This file was deleted.

File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
28 changes: 28 additions & 0 deletions tests/unit/test_forecast_no_ts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import pandas as pd
from quartz_solar_forecast.forecast import run_forecast
from quartz_solar_forecast.pydantic_models import PVSite


def test_run_forecast_no_ts():
# make input data
site = PVSite(latitude=51.75, longitude=-1.25, capacity_kwp=1.25)

current_ts = pd.Timestamp.now()

# run gradient boosting model with no ts
predictions_df = run_forecast(site=site, model="gb")
# check current ts agrees with dataset
assert predictions_df.index.min() >= current_ts - pd.Timedelta(hours=1)

print(predictions_df)
print(f"Current time: {current_ts}")
print(f"Max: {predictions_df['power_kw'].max()}")

# run xgb model with no ts
predictions_df = run_forecast(site=site, model="xgb")
# check current ts agrees with dataset
assert predictions_df.index.min() >= current_ts - pd.Timedelta(hours=1)

print(predictions_df)
print(f"Current time: {current_ts}")
print(f"Max: {predictions_df['power_kw'].max()}")
File renamed without changes.
File renamed without changes.

0 comments on commit 6d0e697

Please sign in to comment.