From bde716991bfe436d717cdacd8045d589056e742f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20Kir=C3=A1ly?= Date: Fri, 30 Aug 2024 09:04:02 +0100 Subject: [PATCH] [MNT] add minimal dependency management utilities (#1628) Adds minimal utilities for dependency management, to determine the packages installed. This will be used later in isolation of dependencies that could be soft dependencies. Instead of dumping the new utils in the current `utils` file, a new folder `utils` is added, in which the current `utils` is moved one level lower, and a `_dependencies` submodule is also added. --- .github/workflows/test.yml | 2 +- .readthedocs.yml | 4 +- docs/requirements.txt | 1 + .../_templates/custom-module-template.rst | 4 +- docs/source/conf.py | 4 ++ pyproject.toml | 1 + pytorch_forecasting/metrics/_mqf2_utils.py | 14 ++++- pytorch_forecasting/utils/__init__.py | 51 +++++++++++++++++++ pytorch_forecasting/utils/_dependencies.py | 40 +++++++++++++++ .../{utils.py => utils/_utils.py} | 0 10 files changed, 114 insertions(+), 7 deletions(-) create mode 100644 pytorch_forecasting/utils/__init__.py create mode 100644 pytorch_forecasting/utils/_dependencies.py rename pytorch_forecasting/{utils.py => utils/_utils.py} (100%) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 3a1380fc..b15e14e0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -103,7 +103,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.11 - name: Cache pip uses: actions/cache@v2 diff --git a/.readthedocs.yml b/.readthedocs.yml index e14ec7ac..82fc2f91 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -9,7 +9,7 @@ version: 2 # reference: https://docs.readthedocs.io/en/stable/config-file/v2.html#sphinx sphinx: configuration: docs/source/conf.py - fail_on_warning: true + # fail_on_warning: true # Build documentation with MkDocs #mkdocs: @@ -21,6 +21,6 @@ formats: # Optionally set the version of Python and requirements required to build your docs python: - version: 3.8 + version: 3.11 install: - requirements: docs/requirements.txt diff --git a/docs/requirements.txt b/docs/requirements.txt index c68cd39e..bd746e82 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -18,3 +18,4 @@ nbconvert >=6.3.0 recommonmark >=0.7.1 pytorch-optimizer >=2.5.1 fastapi >0.80 +cpflows diff --git a/docs/source/_templates/custom-module-template.rst b/docs/source/_templates/custom-module-template.rst index 86174798..508b70b1 100644 --- a/docs/source/_templates/custom-module-template.rst +++ b/docs/source/_templates/custom-module-template.rst @@ -53,14 +53,14 @@ {% endblock %} {% block modules %} -{% if modules %} +{% if all_modules %} .. rubric:: Modules .. autosummary:: :toctree: :template: custom-module-template.rst :recursive: -{% for item in modules %} +{% for item in all_modules %} {{ item }} {%- endfor %} {% endif %} diff --git a/docs/source/conf.py b/docs/source/conf.py index f6ce9562..60128395 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -176,3 +176,7 @@ def setup(app: Sphinx): intersphinx_mapping = { "sklearn": ("https://scikit-learn.org/stable/", None), } + +suppress_warnings = [ + "autosummary.import_cycle", +] diff --git a/pyproject.toml b/pyproject.toml index 3c5ed511..fe22fd69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,6 +98,7 @@ dev = [ "pytest-dotenv>=0.5.2,<1.0.0", "tensorboard>=2.12.1,<3.0.0", "pandoc>=2.3,<3.0.0", + "cpflows", ] github-actions = ["pytest-github-actions-annotate-failures"] diff --git a/pytorch_forecasting/metrics/_mqf2_utils.py b/pytorch_forecasting/metrics/_mqf2_utils.py index 927ee56b..5dffbab4 100644 --- a/pytorch_forecasting/metrics/_mqf2_utils.py +++ b/pytorch_forecasting/metrics/_mqf2_utils.py @@ -12,12 +12,13 @@ class DeepConvexNet(DeepConvexFlow): r""" Class that takes a partially input convex neural network (picnn) as input and equips it with functions of logdet - computation (both estimation and exact computation) + computation (both estimation and exact computation). This class is based on DeepConvexFlow of the CP-Flow repo (https://github.com/CW-Huang/CP-Flow) For details of the logdet estimator, see ``Convex potential flows: Universal probability distributions with optimal transport and convex optimization`` + Parameters ---------- picnn @@ -94,6 +95,7 @@ class SequentialNet(SequentialFlow): layers and provides energy score computation This class is based on SequentialFlow of the CP-Flow repo (https://github.com/CW-Huang/CP-Flow) + Parameters ---------- networks @@ -116,6 +118,7 @@ def es_sample(self, hidden_state: torch.Tensor, dimension: int) -> torch.Tensor: """ Auxiliary function for energy score computation Drawing samples conditioned on the hidden state + Parameters ---------- hidden_state @@ -159,6 +162,7 @@ def energy_score( h_i is the hidden state associated with z_i, and es_num_samples is the number of samples drawn for each of w, w', w'' in energy score approximation + Parameters ---------- z @@ -224,6 +228,7 @@ class MQF2Distribution(Distribution): Distribution class for the model MQF2 proposed in the paper ``Multivariate Quantile Function Forecaster`` by Kan, Aubet, Januschowski, Park, Benidis, Ruthotto, Gasthaus + Parameters ---------- picnn @@ -290,6 +295,7 @@ def stack_sliding_view(self, z: torch.Tensor) -> torch.Tensor: over the observations z Then, reshapes the observations into a 2-dimensional tensor for further computation + Parameters ---------- z @@ -317,6 +323,7 @@ def log_prob(self, z: torch.Tensor) -> torch.Tensor: """ Computes the log likelihood log(g(z)) + logdet(dg(z)/dz), where g is the gradient of the picnn + Parameters ---------- z @@ -346,6 +353,7 @@ def energy_score(self, z: torch.Tensor) -> torch.Tensor: h_i is the hidden state associated with z_i, and es_num_samples is the number of samples drawn for each of w, w', w'' in energy score approximation + Parameters ---------- z @@ -370,6 +378,7 @@ def energy_score(self, z: torch.Tensor) -> torch.Tensor: def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: """ Generates the sample paths + Parameters ---------- sample_shape @@ -377,7 +386,7 @@ def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: Returns ------- sample_paths - Tesnor of shape (batch_size, *sample_shape, prediction_length) + Tesnor of shape (batch_size, * sample_shape, prediction_length) """ numel_batch = self.numel_batch @@ -407,6 +416,7 @@ def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: def quantile(self, alpha: torch.Tensor, hidden_state: Optional[torch.Tensor] = None) -> torch.Tensor: """ Generates the predicted paths associated with the quantile levels alpha + Parameters ---------- alpha diff --git a/pytorch_forecasting/utils/__init__.py b/pytorch_forecasting/utils/__init__.py new file mode 100644 index 00000000..16b39215 --- /dev/null +++ b/pytorch_forecasting/utils/__init__.py @@ -0,0 +1,51 @@ +""" +PyTorch Forecasting package for timeseries forecasting with PyTorch. +""" + +from pytorch_forecasting.utils._utils import ( + InitialParameterRepresenterMixIn, + OutputMixIn, + TupleOutputMixIn, + apply_to_list, + autocorrelation, + concat_sequences, + create_mask, + detach, + get_embedding_size, + groupby_apply, + integer_histogram, + masked_op, + move_to_device, + padded_stack, + profile, + redirect_stdout, + repr_class, + to_list, + unpack_sequence, + unsqueeze_like, +) + +__all__ = [ + "InitialParameterRepresenterMixIn", + "OutputMixIn", + "TupleOutputMixIn", + "apply_to_list", + "autocorrelation", + "get_embedding_size", + "concat_sequences", + "create_mask", + "to_list", + "RecurrentNetwork", + "DecoderMLP", + "detach", + "masked_op", + "move_to_device", + "integer_histogram", + "groupby_apply", + "padded_stack", + "profile", + "redirect_stdout", + "repr_class", + "unpack_sequence", + "unsqueeze_like", +] diff --git a/pytorch_forecasting/utils/_dependencies.py b/pytorch_forecasting/utils/_dependencies.py new file mode 100644 index 00000000..3d4f1279 --- /dev/null +++ b/pytorch_forecasting/utils/_dependencies.py @@ -0,0 +1,40 @@ +"""Utilities for managing dependencies. + +Copied from sktime/skbase. +""" + +from functools import lru_cache + + +@lru_cache +def _get_installed_packages_private(): + """Get a dictionary of installed packages and their versions. + + Same as _get_installed_packages, but internal to avoid mutating the lru_cache + by accident. + """ + from importlib.metadata import distributions, version + + dists = distributions() + package_names = {dist.metadata["Name"] for dist in dists} + package_versions = {pkg_name: version(pkg_name) for pkg_name in package_names} + # developer note: + # we cannot just use distributions naively, + # because the same top level package name may appear *twice*, + # e.g., in a situation where a virtual env overrides a base env, + # such as in deployment environments like databricks. + # the "version" contract ensures we always get the version that corresponds + # to the importable distribution, i.e., the top one in the sys.path. + return package_versions + + +def _get_installed_packages(): + """Get a dictionary of installed packages and their versions. + + Returns + ------- + dict : dictionary of installed packages and their versions + keys are PEP 440 compatible package names, values are package versions + MAJOR.MINOR.PATCH version format is used for versions, e.g., "1.2.3" + """ + return _get_installed_packages_private().copy() diff --git a/pytorch_forecasting/utils.py b/pytorch_forecasting/utils/_utils.py similarity index 100% rename from pytorch_forecasting/utils.py rename to pytorch_forecasting/utils/_utils.py