Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into opt
Browse files Browse the repository at this point in the history
  • Loading branch information
fnhirwa committed Jan 14, 2025
2 parents e4776e3 + c9375cf commit 9b352c6
Show file tree
Hide file tree
Showing 39 changed files with 1,870 additions and 1,133 deletions.
13 changes: 8 additions & 5 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,14 @@ def get_by_name(string: str):
"""
Import by name and return imported module/function/class
Args:
string (str): module/function/class to import, e.g. 'pandas.read_csv' will return read_csv function as
defined by pandas
Returns:
Parameters
----------
string (str):
module/function/class to import, e.g. 'pandas.read_csv'
will return read_csv function as defined by pandas
Returns
-------
imported object
"""
class_name = string.split(".")[-1]
Expand Down
2 changes: 1 addition & 1 deletion examples/ar.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@
# deepar.hparams.log_val_interval = -1
# trainer.limit_train_batches = 1.0
# res = Tuner(trainer).lr_find(
# deepar, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2
# deepar, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2 # noqa: E501
# )

# print(f"suggested learning rate: {res.suggestion()}")
Expand Down
2 changes: 1 addition & 1 deletion examples/nbeats.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@
# trainer.limit_train_batches = 1.0
# # run learning rate finder
# res = Tuner(trainer).lr_find(
# net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2
# net, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2 # noqa: E501
# )
# print(f"suggested learning rate: {res.suggestion()}")
# fig = res.plot(show=True, suggest=True)
Expand Down
4 changes: 2 additions & 2 deletions examples/stallion.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
data["avg_volume_by_agency"] = data.groupby(
["time_idx", "agency"], observed=True
).volume.transform("mean")
# data = data[lambda x: (x.sku == data.iloc[0]["sku"]) & (x.agency == data.iloc[0]["agency"])]
# data = data[lambda x: (x.sku == data.iloc[0]["sku"]) & (x.agency == data.iloc[0]["agency"])] # noqa: E501
special_days = [
"easter_day",
"good_friday",
Expand Down Expand Up @@ -151,7 +151,7 @@
# trainer.limit_train_batches = 1.0
# # run learning rate finder
# res = Tuner(trainer).lr_find(
# tft, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2
# tft, train_dataloaders=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2 # noqa: E501
# )
# print(f"suggested learning rate: {res.suggestion()}")
# fig = res.plot(show=True, suggest=True)
Expand Down
10 changes: 10 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,9 @@ exclude = [
".venv/",
".git/",
".history/",
"docs/source/tutorials/",
]
target-version = "py39"

[tool.ruff.lint]
select = ["E", "F", "W", "C4", "S"]
Expand All @@ -161,6 +163,11 @@ known-first-party = ["pytorch_forecasting"]
combine-as-imports = true
force-sort-within-sections = true

[tool.ruff.lint.per-file-ignores]
"pytorch_forecasting/data/timeseries.py" = [
"E501", # Line too long being fixed in #1746 To be removed after merging
]

[tool.black]
line-length = 88
include = '\.pyi?$'
Expand Down Expand Up @@ -188,3 +195,6 @@ exclude = '''
[tool.nbqa.mutate]
ruff = 1
black = 1

[tool.nbqa.exclude]
ruff = "docs/source/tutorials/" # ToDo: Remove this when fixing notebooks
4 changes: 2 additions & 2 deletions pytorch_forecasting/data/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
"""
Datasets, etc. for timeseries data.
Handling timeseries data is not trivial. It requires special treatment. This sub-package provides the necessary tools
to abstracts the necessary work.
Handling timeseries data is not trivial. It requires special treatment.
This sub-package provides the necessary tools to abstracts the necessary work.
"""

from pytorch_forecasting.data.encoders import (
Expand Down
Loading

0 comments on commit 9b352c6

Please sign in to comment.