@@ -392,7 +392,7 @@ from oml.miners.inbatch_all_tri import AllTripletsMiner
392
392
from oml.models import ViTExtractor
393
393
from oml.samplers.balance import BalanceSampler
394
394
from oml.utils.download_mock_dataset import download_mock_dataset
395
- from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger, WandbLogger
395
+ from oml.lightning.pipelines.logging import NeptunePipelineLogger, TensorBoardPipelineLogger, WandBPipelineLogger
396
396
397
397
dataset_root = " mock_dataset/"
398
398
df_train, df_val = download_mock_dataset(dataset_root)
@@ -413,15 +413,15 @@ val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=4)
413
413
metric_callback = MetricValCallback(metric = EmbeddingMetrics(extra_keys = [train_dataset.paths_key,]), log_images = True )
414
414
415
415
# 1) Logging with Tensorboard
416
- logger = TensorBoardLogger (" ." )
416
+ logger = TensorBoardPipelineLogger (" ." )
417
417
418
418
# 2) Logging with Neptune
419
- # logger = NeptuneLogger (api_key="", project="", log_model_checkpoints=False)
419
+ # logger = NeptunePipelineLogger (api_key="", project="", log_model_checkpoints=False)
420
420
421
421
# 3) Logging with Weights and Biases
422
422
# import os
423
423
# os.environ["WANDB_API_KEY"] = ""
424
- # logger = WandbLogger (project="test_project", log_model=False)
424
+ # logger = WandBPipelineLogger (project="test_project", log_model=False)
425
425
426
426
# run
427
427
pl_model = ExtractorModule(extractor, criterion, optimizer)
0 commit comments