diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index ab9ac7f32..9754a2888 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -53,7 +53,7 @@ jobs: - name: Running all tests run: | if [ "${{ matrix.python-version }}" == "3.8" ]; then - make run_all_tests WANDB_API_KEY=${{ secrets.WANDB_API_KEY }} NEPTUNE_API_TOKEN=${{ secrets.NEPTUNE_API_TOKEN }} DOWNLOAD_ZOO_IN_TESTS=yes + make run_all_tests WANDB_API_KEY=${{ secrets.WANDB_API_KEY }} NEPTUNE_API_TOKEN=${{ secrets.NEPTUNE_API_TOKEN }} DOWNLOAD_ZOO_IN_TESTS=yes TEST_CLOUD_LOGGERS=yes else make run_all_tests WANDB_API_KEY=${{ secrets.WANDB_API_KEY }} NEPTUNE_API_TOKEN=${{ secrets.NEPTUNE_API_TOKEN }} fi diff --git a/README.md b/README.md index b2a3bc338..2ca0bf479 100644 --- a/README.md +++ b/README.md @@ -392,7 +392,7 @@ from oml.miners.inbatch_all_tri import AllTripletsMiner from oml.models import ViTExtractor from oml.samplers.balance import BalanceSampler from oml.utils.download_mock_dataset import download_mock_dataset -from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger, WandbLogger +from oml.lightning.pipelines.logging import NeptunePipelineLogger, TensorBoardPipelineLogger, WandBPipelineLogger dataset_root = "mock_dataset/" df_train, df_val = download_mock_dataset(dataset_root) @@ -413,15 +413,15 @@ val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=4) metric_callback = MetricValCallback(metric=EmbeddingMetrics(extra_keys=[train_dataset.paths_key,]), log_images=True) # 1) Logging with Tensorboard -logger = TensorBoardLogger(".") +logger = TensorBoardPipelineLogger(".") # 2) Logging with Neptune -# logger = NeptuneLogger(api_key="", project="", log_model_checkpoints=False) +# logger = NeptunePipelineLogger(api_key="", project="", log_model_checkpoints=False) # 3) Logging with Weights and Biases # import os # os.environ["WANDB_API_KEY"] = "" -# logger = WandbLogger(project="test_project", log_model=False) +# logger = WandBPipelineLogger(project="test_project", log_model=False) # run pl_model = ExtractorModule(extractor, criterion, optimizer) diff --git a/ci/requirements_optional.txt b/ci/requirements_optional.txt index e2bcfb70d..d45c108f2 100644 --- a/ci/requirements_optional.txt +++ b/ci/requirements_optional.txt @@ -1,4 +1,4 @@ grad-cam==1.4.6 jupyter>=1.0.0 -neptune-client>=0.14.2, <1.0.0 +neptune>=1.0.0, <1.10.1 wandb>=0.15.4 diff --git a/docs/readme/examples_source/extractor/train_val_pl.md b/docs/readme/examples_source/extractor/train_val_pl.md index e9cef85e3..b4d379442 100644 --- a/docs/readme/examples_source/extractor/train_val_pl.md +++ b/docs/readme/examples_source/extractor/train_val_pl.md @@ -16,7 +16,7 @@ from oml.miners.inbatch_all_tri import AllTripletsMiner from oml.models import ViTExtractor from oml.samplers.balance import BalanceSampler from oml.utils.download_mock_dataset import download_mock_dataset -from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger, WandbLogger +from oml.lightning.pipelines.logging import NeptunePipelineLogger, TensorBoardPipelineLogger, WandBPipelineLogger dataset_root = "mock_dataset/" df_train, df_val = download_mock_dataset(dataset_root) @@ -37,15 +37,15 @@ val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=4) metric_callback = MetricValCallback(metric=EmbeddingMetrics(extra_keys=[train_dataset.paths_key,]), log_images=True) # 1) Logging with Tensorboard -logger = TensorBoardLogger(".") +logger = TensorBoardPipelineLogger(".") # 2) Logging with Neptune -# logger = NeptuneLogger(api_key="", project="", log_model_checkpoints=False) +# logger = NeptunePipelineLogger(api_key="", project="", log_model_checkpoints=False) # 3) Logging with Weights and Biases # import os # os.environ["WANDB_API_KEY"] = "" -# logger = WandbLogger(project="test_project", log_model=False) +# logger = WandBPipelineLogger(project="test_project", log_model=False) # run pl_model = ExtractorModule(extractor, criterion, optimizer) diff --git a/oml/interfaces/loggers.py b/oml/interfaces/loggers.py new file mode 100644 index 000000000..d3f9d8d3e --- /dev/null +++ b/oml/interfaces/loggers.py @@ -0,0 +1,18 @@ +from abc import abstractmethod + +from matplotlib import pyplot as plt +from pytorch_lightning.loggers import Logger as LightningLogger + +from oml.const import TCfg + + +class IFigureLogger: + @abstractmethod + def log_figure(self, fig: plt.Figure, title: str, idx: int) -> None: + raise NotImplementedError() + + +class IPipelineLogger(LightningLogger, IFigureLogger): + @abstractmethod + def log_pipeline_info(self, cfg: TCfg) -> None: + raise NotImplementedError() diff --git a/oml/lightning/callbacks/metric.py b/oml/lightning/callbacks/metric.py index 882a19d36..40aad10b0 100644 --- a/oml/lightning/callbacks/metric.py +++ b/oml/lightning/callbacks/metric.py @@ -1,19 +1,18 @@ +import warnings from math import ceil from typing import Any, Optional import matplotlib.pyplot as plt -import numpy as np import pytorch_lightning as pl from pytorch_lightning import Callback -from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger, WandbLogger from pytorch_lightning.utilities.types import STEP_OUTPUT from torch.utils.data import DataLoader from oml.const import LOG_IMAGE_FOLDER from oml.ddp.patching import check_loaders_is_patched, patch_dataloader_to_ddp +from oml.interfaces.loggers import IFigureLogger from oml.interfaces.metrics import IBasicMetric, IMetricDDP, IMetricVisualisable from oml.lightning.modules.ddp import ModuleDDP -from oml.utils.images.images import figure_to_nparray from oml.utils.misc import flatten_dict @@ -102,23 +101,16 @@ def _log_images(self, pl_module: pl.LightningModule) -> None: if not isinstance(self.metric, IMetricVisualisable): return + if not isinstance(pl_module.logger, IFigureLogger): + warnings.warn( + f"Unexpected logger {pl_module.logger}. Figures have not been saved. " + f"Please, use a child of {IFigureLogger}." + ) + return + for fig, metric_log_str in zip(*self.metric.visualize()): log_str = f"{LOG_IMAGE_FOLDER}/{metric_log_str}" - if isinstance(pl_module.logger, NeptuneLogger): - from neptune.new.types import File # this is the optional dependency - - pl_module.logger.experiment[log_str].log(File.as_image(fig)) - elif isinstance(pl_module.logger, WandbLogger): - fig_img = figure_to_nparray(fig) - pl_module.logger.log_image(images=[fig_img], key=metric_log_str) - elif isinstance(pl_module.logger, TensorBoardLogger): - fig_img = figure_to_nparray(fig) - pl_module.logger.experiment.add_image( - log_str, np.transpose(fig_img, (2, 0, 1)), pl_module.current_epoch - ) - else: - raise ValueError(f"Logging with {type(pl_module.logger)} is not supported yet.") - + pl_module.logger.log_figure(fig=fig, title=log_str, idx=pl_module.current_epoch) plt.close(fig=fig) def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: diff --git a/oml/lightning/pipelines/logging.py b/oml/lightning/pipelines/logging.py new file mode 100644 index 000000000..b73f447ba --- /dev/null +++ b/oml/lightning/pipelines/logging.py @@ -0,0 +1,101 @@ +import warnings +from pathlib import Path +from typing import Any, Dict, List + +import matplotlib.pyplot as plt +import numpy as np +from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger, WandbLogger + +from oml.const import OML_PATH, TCfg +from oml.interfaces.loggers import IPipelineLogger +from oml.registry.transforms import save_transforms_as_files +from oml.utils.images.images import figure_to_nparray +from oml.utils.misc import dictconfig_to_dict, flatten_dict + + +def prepare_config_to_logging(cfg: TCfg) -> Dict[str, Any]: + cwd = Path.cwd().name + flattened_dict = flatten_dict({**dictconfig_to_dict(cfg), **{"dir": cwd}}, sep="|") + return flattened_dict + + +def prepare_tags(cfg: TCfg) -> List[str]: + cwd = Path.cwd().name + tags = list(cfg.get("tags", [])) + [cfg.get("postfix", "")] + [cwd] + tags = list(filter(lambda x: len(x) > 0, tags)) + return tags + + +class NeptunePipelineLogger(NeptuneLogger, IPipelineLogger): + def log_pipeline_info(self, cfg: TCfg) -> None: + warnings.warn( + "Unfortunately, in the case of using Neptune, you may experience that long experiments are " + "stacked and not responding. It's not an issue on OML's side, so, we cannot fix it." + ) + self.log_hyperparams(prepare_config_to_logging(cfg)) + + tags = prepare_tags(cfg) + self.run["sys/tags"].add(tags) + + # log transforms as files + for key, transforms_file in save_transforms_as_files(cfg): + self.run[key].upload(transforms_file) + + # log source code + source_files = list(map(lambda x: str(x), OML_PATH.glob("**/*.py"))) + list( + map(lambda x: str(x), OML_PATH.glob("**/*.yaml")) + ) + self.run["code"].upload_files(source_files) + + # log dataframe + self.run["dataset"].upload(str(Path(cfg["dataset_root"]) / cfg["dataframe_name"])) + + def log_figure(self, fig: plt.Figure, title: str, idx: int) -> None: + from neptune.types import File # this is the optional dependency + + self.experiment[title].log(File.as_image(fig)) + + +class WandBPipelineLogger(WandbLogger, IPipelineLogger): + def log_pipeline_info(self, cfg: TCfg) -> None: + # this is the optional dependency + import wandb + + self.log_hyperparams(prepare_config_to_logging(cfg)) + + tags = prepare_tags(cfg) + self.experiment.tags = tags + + # log transforms as files + keys_files = save_transforms_as_files(cfg) + if keys_files: + transforms = wandb.Artifact("transforms", type="transforms") + for _, transforms_file in keys_files: + transforms.add_file(transforms_file) + self.experiment.log_artifact(transforms) + + # log source code + code = wandb.Artifact("source_code", type="code") + code.add_dir(OML_PATH, name="oml") + self.experiment.log_artifact(code) + + # log dataset + dataset = wandb.Artifact("dataset", type="dataset") + dataset.add_file(str(Path(cfg["dataset_root"]) / cfg["dataframe_name"])) + self.experiment.log_artifact(dataset) + + def log_figure(self, fig: plt.Figure, title: str, idx: int) -> None: + fig_img = figure_to_nparray(fig) + self.log_image(images=[fig_img], key=title) + + +class TensorBoardPipelineLogger(TensorBoardLogger, IPipelineLogger): + def log_pipeline_info(self, cfg: TCfg) -> None: + pass + + def log_figure(self, fig: plt.Figure, title: str, idx: int) -> None: + fig_img = figure_to_nparray(fig) + self.experiment.add_image(title, np.transpose(fig_img, (2, 0, 1)), idx) + + +__all__ = ["IPipelineLogger", "TensorBoardPipelineLogger", "WandBPipelineLogger", "NeptunePipelineLogger"] diff --git a/oml/lightning/pipelines/parser.py b/oml/lightning/pipelines/parser.py index 2a8516961..213e98a95 100644 --- a/oml/lightning/pipelines/parser.py +++ b/oml/lightning/pipelines/parser.py @@ -1,21 +1,19 @@ -import warnings from pathlib import Path from typing import Any, Dict, Optional import torch from pytorch_lightning.callbacks import ModelCheckpoint -from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger, WandbLogger -from pytorch_lightning.loggers.logger import Logger from pytorch_lightning.strategies import DDPStrategy -from oml.const import OML_PATH, TCfg +from oml.const import TCfg from oml.datasets.base import DatasetWithLabels +from oml.interfaces.loggers import IPipelineLogger from oml.interfaces.samplers import IBatchSampler +from oml.lightning.pipelines.logging import TensorBoardPipelineLogger from oml.registry.loggers import get_logger_by_cfg from oml.registry.samplers import SAMPLERS_CATEGORIES_BASED, get_sampler_by_cfg from oml.registry.schedulers import get_scheduler_by_cfg -from oml.registry.transforms import save_transforms_as_files -from oml.utils.misc import dictconfig_to_dict, flatten_dict +from oml.utils.misc import dictconfig_to_dict def parse_engine_params_from_config(cfg: TCfg) -> Dict[str, Any]: @@ -59,82 +57,9 @@ def check_is_config_for_ddp(cfg: TCfg) -> bool: return bool(cfg["strategy"]) -def parse_logger_from_config(cfg: TCfg) -> Logger: - logger = TensorBoardLogger(".") if cfg.get("logger", None) is None else get_logger_by_cfg(cfg["logger"]) - return logger - - -def initialize_logging(cfg: TCfg) -> Logger: - logger = parse_logger_from_config(cfg) - cwd = Path.cwd().name - - dict_to_log = flatten_dict({**dictconfig_to_dict(cfg), **{"dir": cwd}}, sep="|") - - tags = list(cfg.get("tags", [])) + [cfg.get("postfix", "")] + [cwd] - - if isinstance(logger, NeptuneLogger): - warnings.warn( - "Unfortunately, in the case of using Neptune, you may experience that long experiments are" - "stacked and not responding. It's not an issue on OML's side, so, we cannot fix it." - ) - logger.log_hyperparams(dict_to_log) - upload_files_to_neptune_cloud(logger, cfg) - logger.run["sys/tags"].add(tags) - - elif isinstance(logger, WandbLogger): - logger.log_hyperparams(dict_to_log) - upload_files_to_wandb_cloud(logger, cfg) - logger.experiment.tags = list(filter(lambda x: len(x) > 0, tags)) # it fails in the case of empty tag - - elif isinstance(logger, TensorBoardLogger): - pass - - else: - raise ValueError(f"Unexpected logger {type(logger)}") - - return logger - - -def upload_files_to_neptune_cloud(logger: NeptuneLogger, cfg: TCfg) -> None: - assert isinstance(logger, NeptuneLogger) - - # log transforms as files - for key, transforms_file in save_transforms_as_files(cfg): - logger.run[key].upload(transforms_file) - - # log source code - source_files = list(map(lambda x: str(x), OML_PATH.glob("**/*.py"))) + list( - map(lambda x: str(x), OML_PATH.glob("**/*.yaml")) - ) - logger.run["code"].upload_files(source_files) - - # log dataset - logger.run["dataset"].upload(str(Path(cfg["dataset_root"]) / cfg["dataframe_name"])) - - -def upload_files_to_wandb_cloud(logger: WandbLogger, cfg: TCfg) -> None: - # this is the optional dependency - import wandb - - assert isinstance(logger, WandbLogger) - - # log transforms as files - keys_files = save_transforms_as_files(cfg) - if keys_files: - transforms = wandb.Artifact("transforms", type="transforms") - for _, transforms_file in keys_files: - transforms.add_file(transforms_file) - logger.experiment.log_artifact(transforms) - - # log source code - code = wandb.Artifact("source_code", type="code") - code.add_dir(OML_PATH, name="oml") - logger.experiment.log_artifact(code) - - # log dataset - dataset = wandb.Artifact("dataset", type="dataset") - dataset.add_file(str(Path(cfg["dataset_root"]) / cfg["dataframe_name"])) - logger.experiment.log_artifact(dataset) +def parse_logger_from_config(cfg: TCfg) -> IPipelineLogger: + logger = TensorBoardPipelineLogger(".") if cfg.get("logger", None) is None else get_logger_by_cfg(cfg["logger"]) + return logger # type: ignore def parse_scheduler_from_config(cfg: TCfg, optimizer: torch.optim.Optimizer) -> Dict[str, Any]: @@ -182,9 +107,6 @@ def parse_ckpt_callback_from_config(cfg: TCfg) -> ModelCheckpoint: __all__ = [ "parse_engine_params_from_config", "check_is_config_for_ddp", - "initialize_logging", - "upload_files_to_neptune_cloud", - "upload_files_to_wandb_cloud", "parse_scheduler_from_config", "parse_sampler_from_config", "parse_ckpt_callback_from_config", diff --git a/oml/lightning/pipelines/train.py b/oml/lightning/pipelines/train.py index edf802171..b59d8878b 100644 --- a/oml/lightning/pipelines/train.py +++ b/oml/lightning/pipelines/train.py @@ -11,9 +11,9 @@ from oml.lightning.modules.extractor import ExtractorModule, ExtractorModuleDDP from oml.lightning.pipelines.parser import ( check_is_config_for_ddp, - initialize_logging, parse_ckpt_callback_from_config, parse_engine_params_from_config, + parse_logger_from_config, parse_sampler_from_config, parse_scheduler_from_config, ) @@ -70,7 +70,9 @@ def extractor_training_pipeline(cfg: TCfg) -> None: cfg = dictconfig_to_dict(cfg) pprint(cfg) - logger = initialize_logging(cfg) + + logger = parse_logger_from_config(cfg) + logger.log_pipeline_info(cfg) trainer_engine_params = parse_engine_params_from_config(cfg) is_ddp = check_is_config_for_ddp(trainer_engine_params) diff --git a/oml/lightning/pipelines/train_postprocessor.py b/oml/lightning/pipelines/train_postprocessor.py index 8d5e50511..97485328e 100644 --- a/oml/lightning/pipelines/train_postprocessor.py +++ b/oml/lightning/pipelines/train_postprocessor.py @@ -21,9 +21,9 @@ ) from oml.lightning.pipelines.parser import ( check_is_config_for_ddp, - initialize_logging, parse_ckpt_callback_from_config, parse_engine_params_from_config, + parse_logger_from_config, parse_sampler_from_config, parse_scheduler_from_config, ) @@ -118,7 +118,9 @@ def postprocessor_training_pipeline(cfg: DictConfig) -> None: cfg = dictconfig_to_dict(cfg) pprint(cfg) - logger = initialize_logging(cfg) + + logger = parse_logger_from_config(cfg) + logger.log_pipeline_info(cfg) trainer_engine_params = parse_engine_params_from_config(cfg) is_ddp = check_is_config_for_ddp(trainer_engine_params) diff --git a/oml/registry/loggers.py b/oml/registry/loggers.py index ca4f1e545..b5758c2b7 100644 --- a/oml/registry/loggers.py +++ b/oml/registry/loggers.py @@ -1,13 +1,20 @@ import os -from typing import Any, Dict, Union - -from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger, WandbLogger -from pytorch_lightning.loggers.logger import Logger +from typing import Any, Dict from oml.const import TCfg +from oml.interfaces.loggers import IPipelineLogger +from oml.lightning.pipelines.logging import ( + NeptunePipelineLogger, + TensorBoardPipelineLogger, + WandBPipelineLogger, +) from oml.utils.misc import dictconfig_to_dict -LOGGERS_REGISTRY = {"wandb": WandbLogger, "neptune": NeptuneLogger, "tensorboard": TensorBoardLogger} +LOGGERS_REGISTRY = { + "wandb": WandBPipelineLogger, + "neptune": NeptunePipelineLogger, + "tensorboard": TensorBoardPipelineLogger, +} CLOUD_TOKEN_NAMES = {"wandb": "WANDB_API_KEY", "neptune": "NEPTUNE_API_TOKEN"} TOKEN_ERROR_MESSAGE = ( @@ -17,15 +24,15 @@ ) -def get_logger(name: str, **kwargs: Dict[str, Any]) -> Logger: +def get_logger(name: str, **kwargs: Dict[str, Any]) -> IPipelineLogger: if (name in CLOUD_TOKEN_NAMES) and (CLOUD_TOKEN_NAMES[name] not in os.environ): token_name = CLOUD_TOKEN_NAMES[name] raise ValueError(TOKEN_ERROR_MESSAGE.format(name.upper(), token_name, token_name)) - return LOGGERS_REGISTRY[name](**kwargs) + return LOGGERS_REGISTRY[name](**kwargs) # type: ignore -def get_logger_by_cfg(cfg: TCfg) -> Union[bool, Logger]: +def get_logger_by_cfg(cfg: TCfg) -> IPipelineLogger: cfg = dictconfig_to_dict(cfg) logger = get_logger(cfg["name"], **cfg["args"]) return logger diff --git a/oml/utils/misc.py b/oml/utils/misc.py index 3842b7981..70e1d75be 100644 --- a/oml/utils/misc.py +++ b/oml/utils/misc.py @@ -7,7 +7,7 @@ import torch from omegaconf import DictConfig, OmegaConf -from oml.const import DOTENV_PATH, TCfg +from oml.const import TCfg def find_value_ids(it: Iterable[Any], value: Any) -> List[int]: @@ -68,12 +68,6 @@ def flatten_dict( return dict(items) -def load_dotenv() -> None: - import dotenv # this is the optional dependency - - dotenv.load_dotenv(DOTENV_PATH) - - def dictconfig_to_dict(cfg: TCfg) -> Dict[str, Any]: if isinstance(cfg, DictConfig): cfg = OmegaConf.to_container(cfg, resolve=True) @@ -181,7 +175,6 @@ def find_first_occurrences(x: List[Hashable]) -> List[int]: "set_global_seed", "one_hot", "flatten_dict", - "load_dotenv", "dictconfig_to_dict", "smart_sample", "clip_max", diff --git a/tests/test_oml/test_registry/test_registry.py b/tests/test_oml/test_registry/test_registry.py index 1b8d11a78..6654d5722 100644 --- a/tests/test_oml/test_registry/test_registry.py +++ b/tests/test_oml/test_registry/test_registry.py @@ -2,13 +2,14 @@ from pathlib import Path from typing import Any, Dict +import dotenv import pytest import yaml from omegaconf import OmegaConf from torch import nn from torch.optim import Optimizer -from oml.const import CONFIGS_PATH, TCfg +from oml.const import CONFIGS_PATH, DOTENV_PATH, TCfg from oml.registry.loggers import LOGGERS_REGISTRY, get_logger from oml.registry.losses import LOSSES_REGISTRY, get_criterion from oml.registry.miners import MINERS_REGISTRY, get_miner @@ -33,7 +34,7 @@ get_transforms_for_pretrained, save_transforms_as_files, ) -from oml.utils.misc import dictconfig_to_dict, load_dotenv +from oml.utils.misc import dictconfig_to_dict def get_sampler_kwargs_runtime() -> Any: @@ -64,7 +65,7 @@ def get_opt() -> Optimizer: ], ) def test_registry(folder_name, registry, factory_fun, runtime_args) -> None: - load_dotenv() # we need to load tokens for cloud loggers (Neptune, W & B) + dotenv.load_dotenv(DOTENV_PATH) # we need to load tokens for cloud loggers (Neptune, W & B) for obj_name in registry.keys(): cfg = dictconfig_to_dict(OmegaConf.load(CONFIGS_PATH / folder_name / f"{obj_name}.yaml")) diff --git a/tests/test_runs/test_pipelines/configs/train_with_bboxes.yaml b/tests/test_runs/test_pipelines/configs/train_with_bboxes.yaml index 6b149766c..8833ac2b7 100644 --- a/tests/test_runs/test_pipelines/configs/train_with_bboxes.yaml +++ b/tests/test_runs/test_pipelines/configs/train_with_bboxes.yaml @@ -17,7 +17,7 @@ transforms_train: im_size: 32 transforms_val: - name: norm_resize_torch + name: norm_resize_albu args: im_size: 48 @@ -68,6 +68,11 @@ metric_args: log_images: True +logger: + name: wandb + args: + project: "test_project" + metric_for_checkpointing: OVERALL/cmc/5 max_epochs: 2 diff --git a/tests/test_runs/test_pipelines/configs/train_with_categories.yaml b/tests/test_runs/test_pipelines/configs/train_with_categories.yaml index 0fad02382..5342e47f8 100644 --- a/tests/test_runs/test_pipelines/configs/train_with_categories.yaml +++ b/tests/test_runs/test_pipelines/configs/train_with_categories.yaml @@ -12,7 +12,7 @@ num_workers: 0 cache_size: 0 transforms_train: - name: augs_albu + name: augs_torch args: im_size: 64 diff --git a/tests/test_runs/test_pipelines/configs/train_with_sequence.yaml b/tests/test_runs/test_pipelines/configs/train_with_sequence.yaml index e34be89a4..12a60c03f 100644 --- a/tests/test_runs/test_pipelines/configs/train_with_sequence.yaml +++ b/tests/test_runs/test_pipelines/configs/train_with_sequence.yaml @@ -12,7 +12,7 @@ num_workers: 0 cache_size: 10 transforms_train: - name: augs_torch + name: augs_albu args: im_size: 64 @@ -76,9 +76,9 @@ max_epochs: 2 valid_period: 1 logger: - name: tensorboard + name: neptune args: - save_dir: "." + project: "oml-team/test" lightning_trainer_extra_args: diff --git a/tests/test_runs/test_pipelines/test_pipelines.py b/tests/test_runs/test_pipelines/test_pipelines.py index f3fe76a1b..5093564e7 100644 --- a/tests/test_runs/test_pipelines/test_pipelines.py +++ b/tests/test_runs/test_pipelines/test_pipelines.py @@ -1,19 +1,23 @@ +import os import shutil import subprocess import warnings from pathlib import Path from typing import List, Tuple +import dotenv import pytest import torch import yaml # type: ignore -from oml.const import PROJECT_ROOT +from oml.const import DOTENV_PATH, PROJECT_ROOT warnings.filterwarnings("ignore") SCRIPTS_PATH = PROJECT_ROOT / "tests/test_runs/test_pipelines/" +dotenv.load_dotenv(DOTENV_PATH) # we need to load tokens for cloud loggers (Neptune, W & B) + def accelerator_devices_pairs() -> List[Tuple[str, int]]: pairs = [("cpu", 1), ("cpu", 2)] @@ -53,12 +57,16 @@ def test_train_and_validate(accelerator: str, devices: int) -> None: @pytest.mark.long +@pytest.mark.needs_optional_dependency +@pytest.mark.skipif(os.getenv("TEST_CLOUD_LOGGERS") != "yes", reason="To have more control.") @pytest.mark.parametrize("accelerator, devices", accelerator_devices_pairs()) def test_train_with_bboxes(accelerator: str, devices: int) -> None: run("train_with_bboxes.py", accelerator, devices) @pytest.mark.long +@pytest.mark.needs_optional_dependency +@pytest.mark.skipif(os.getenv("TEST_CLOUD_LOGGERS") != "yes", reason="To have more control.") @pytest.mark.parametrize("accelerator, devices", accelerator_devices_pairs()) def test_train_with_sequence(accelerator: str, devices: int) -> None: run("train_with_sequence.py", accelerator, devices)