Skip to content

Commit 38f30c4

Browse files
Update versionadded/versionchanged in master according to the latest releases (#2164)
* Update versionadded/versionchanged in master according to the latest releases * Revert master version * Update missing file Co-authored-by: Taras Savchyn <[email protected]>
1 parent 3eec6df commit 38f30c4

23 files changed

+48
-46
lines changed

ignite/contrib/metrics/regression/fractional_absolute_error.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class FractionalAbsoluteError(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. versionchanged:: 0.5.0
37+
.. versionchanged:: 0.4.5
3838
- Works with DDP.
3939
"""
4040

ignite/contrib/metrics/regression/fractional_bias.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class FractionalBias(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. versionchanged:: 0.5.0
37+
.. versionchanged:: 0.4.5
3838
- Works with DDP.
3939
"""
4040

ignite/contrib/metrics/regression/geometric_mean_absolute_error.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class GeometricMeanAbsoluteError(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. versionchanged:: 0.5.0
37+
.. versionchanged:: 0.4.5
3838
- Works with DDP.
3939
"""
4040

ignite/contrib/metrics/regression/maximum_absolute_error.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class MaximumAbsoluteError(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. versionchanged:: 0.5.0
37+
.. versionchanged:: 0.4.5
3838
- Works with DDP.
3939
"""
4040

ignite/contrib/metrics/regression/mean_absolute_relative_error.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class MeanAbsoluteRelativeError(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. versionchanged:: 0.5.0
37+
.. versionchanged:: 0.4.5
3838
- Works with DDP.
3939
"""
4040

ignite/contrib/metrics/regression/mean_normalized_bias.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class MeanNormalizedBias(_BaseRegression):
3434
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3535
non-blocking. By default, CPU.
3636
37-
.. versionchanged:: 0.5.0
37+
.. versionchanged:: 0.4.5
3838
- Works with DDP.
3939
"""
4040

ignite/contrib/metrics/regression/wave_hedges_distance.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ class WaveHedgesDistance(_BaseRegression):
3333
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
3434
non-blocking. By default, CPU.
3535
36-
.. versionchanged:: 0.5.0
36+
.. versionchanged:: 0.4.5
3737
- Works with DDP.
3838
"""
3939

ignite/distributed/comp_models/native.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -437,7 +437,7 @@ def _expand_hostlist(nodelist: str) -> List[str]:
437437
Args:
438438
nodelist: Compressed hostlist string
439439
440-
.. versionadded:: 0.5.1
440+
.. versionadded:: 0.4.6
441441
"""
442442
node_list = nodelist.split(", ")
443443

ignite/distributed/launcher.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ def training(local_rank, config, **kwargs):
208208
.. versionchanged:: 0.4.2
209209
``backend`` now accepts `horovod` distributed framework.
210210
211-
.. versionchanged:: 0.5.0
211+
.. versionchanged:: 0.4.5
212212
``init_method`` added.
213213
214214
"""

ignite/distributed/utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -414,7 +414,7 @@ def broadcast(
414414
415415
.. versionadded:: 0.4.2
416416
417-
.. versionchanged:: 0.5.0
417+
.. versionchanged:: 0.4.5
418418
added ``safe_mode``
419419
"""
420420
if _need_to_sync and isinstance(_model, _SerialModel):
@@ -523,7 +523,7 @@ def train_fn(local_rank, a, b, c):
523523
.. versionchanged:: 0.4.2
524524
``backend`` now accepts `horovod` distributed framework.
525525
526-
.. versionchanged:: 0.5.0
526+
.. versionchanged:: 0.4.5
527527
``kwargs`` now accepts ``init_method``, ``rank``, ``world_size`` for PyTorch native distributed backend.
528528
"""
529529
if not (has_xla_support or has_native_dist_support or has_hvd_support):

ignite/engine/__init__.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def supervised_training_step(
8282
update_fn = supervised_training_step(model, optimizer, loss_fn, 'cuda')
8383
trainer = Engine(update_fn)
8484
85-
.. versionadded:: 0.5.0
85+
.. versionadded:: 0.4.5
8686
"""
8787

8888
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
@@ -140,7 +140,7 @@ def supervised_training_step_amp(
140140
update_fn = supervised_training_step_amp(model, optimizer, loss_fn, 'cuda', scaler=scaler)
141141
trainer = Engine(update_fn)
142142
143-
.. versionadded:: 0.5.0
143+
.. versionadded:: 0.4.5
144144
"""
145145

146146
try:
@@ -206,7 +206,7 @@ def supervised_training_step_apex(
206206
update_fn = supervised_training_step_apex(model, optimizer, loss_fn, 'cuda')
207207
trainer = Engine(update_fn)
208208
209-
.. versionadded:: 0.5.0
209+
.. versionadded:: 0.4.5
210210
"""
211211

212212
try:
@@ -267,7 +267,7 @@ def supervised_training_step_tpu(
267267
update_fn = supervised_training_step_tpu(model, optimizer, loss_fn, 'xla')
268268
trainer = Engine(update_fn)
269269
270-
.. versionadded:: 0.5.0
270+
.. versionadded:: 0.4.5
271271
"""
272272
try:
273273
import torch_xla.core.xla_model as xm
@@ -380,7 +380,7 @@ def create_supervised_trainer(
380380
381381
See more: https://nvidia.github.io/apex/amp.html#module-apex.amp
382382
383-
.. versionchanged:: 0.5.0
383+
.. versionchanged:: 0.4.5
384384
385385
- Added ``amp_mode`` argument for automatic mixed precision.
386386
- Added ``scaler`` argument for gradient scaling.
@@ -449,7 +449,7 @@ def supervised_evaluation_step(
449449
`device` will now *only* be used to move the input data to the correct device.
450450
The `model` should be moved by the user before creating an optimizer.
451451
452-
.. versionadded:: 0.5.0
452+
.. versionadded:: 0.4.5
453453
"""
454454

455455
def evaluate_step(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
@@ -497,7 +497,7 @@ def supervised_evaluation_step_amp(
497497
`device` will now *only* be used to move the input data to the correct device.
498498
The `model` should be moved by the user before creating an optimizer.
499499
500-
.. versionadded:: 0.5.0
500+
.. versionadded:: 0.4.5
501501
"""
502502
try:
503503
from torch.cuda.amp import autocast
@@ -561,7 +561,7 @@ def create_supervised_evaluator(
561561
562562
- `PyTorch's Explanation <https://github.com/pytorch/pytorch/issues/7844#issuecomment-503713840>`_
563563
564-
.. versionchanged:: 0.5.0
564+
.. versionchanged:: 0.4.5
565565
- Added ``amp_mode`` argument for automatic mixed precision.
566566
"""
567567
device_type = device.type if isinstance(device, torch.device) else device

ignite/handlers/ema_handler.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ def print_ema_momentum(engine):
114114
115115
engine.run(...)
116116
117-
.. versionadded:: 0.5.0
117+
.. versionadded:: 0.4.6
118118
119119
"""
120120

ignite/handlers/lr_finder.py

+2
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,8 @@ class FastaiLRFinder:
7070
https://arxiv.org/abs/1506.01186
7171
7272
fastai/lr_find: https://github.com/fastai/fastai
73+
74+
.. versionadded:: 0.4.6
7375
"""
7476

7577
def __init__(self) -> None:

ignite/handlers/param_scheduler.py

+9-9
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ class ParamScheduler(metaclass=ABCMeta):
3232
More precisely, whatever the state of the optimizer (newly created or used by another scheduler) the scheduler
3333
sets defined absolute values.
3434
35-
.. versionadded:: 0.5.1
35+
.. versionadded:: 0.4.5
3636
"""
3737

3838
def __init__(
@@ -251,7 +251,7 @@ class CyclicalScheduler(ParamScheduler):
251251
If the scheduler is bound to an 'ITERATION_*' event, 'cycle_size' should
252252
usually be the number of batches in an epoch.
253253
254-
.. versionadded:: 0.5.1
254+
.. versionadded:: 0.4.5
255255
"""
256256

257257
def __init__(
@@ -340,7 +340,7 @@ class LinearCyclicalScheduler(CyclicalScheduler):
340340
# over the course of 1 epoch
341341
#
342342
343-
.. versionadded:: 0.5.1
343+
.. versionadded:: 0.4.5
344344
"""
345345

346346
def get_param(self) -> float:
@@ -408,7 +408,7 @@ class CosineAnnealingScheduler(CyclicalScheduler):
408408
.. [Smith17] Smith, Leslie N. "Cyclical learning rates for training neural networks."
409409
Applications of Computer Vision (WACV), 2017 IEEE Winter Conference on. IEEE, 2017
410410
411-
.. versionadded:: 0.5.1
411+
.. versionadded:: 0.4.5
412412
"""
413413

414414
def get_param(self) -> float:
@@ -449,7 +449,7 @@ class ConcatScheduler(ParamScheduler):
449449
# The annealing cycles are repeated indefinitely.
450450
#
451451
452-
.. versionadded:: 0.5.1
452+
.. versionadded:: 0.4.5
453453
"""
454454

455455
def __init__(self, schedulers: List[ParamScheduler], durations: List[int], save_history: bool = False):
@@ -675,7 +675,7 @@ class LRScheduler(ParamScheduler):
675675
# the first lr value from the optimizer, otherwise it is will be skipped:
676676
trainer.add_event_handler(Events.ITERATION_COMPLETED, scheduler)
677677
678-
.. versionadded:: 0.5.1
678+
.. versionadded:: 0.4.5
679679
"""
680680

681681
def __init__(self, lr_scheduler: _LRScheduler, save_history: bool = False):
@@ -806,7 +806,7 @@ def create_lr_scheduler_with_warmup(
806806
# Attach to the trainer
807807
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
808808
809-
.. versionadded:: 0.5.1
809+
.. versionadded:: 0.4.5
810810
"""
811811
if not isinstance(lr_scheduler, (ParamScheduler, _LRScheduler)):
812812
raise TypeError(
@@ -905,7 +905,7 @@ class PiecewiseLinear(ParamScheduler):
905905
# from 0.3 to 0.1 between 21st and 30th iterations and remains 0.1 until the end of the iterations.
906906
#
907907
908-
.. versionadded:: 0.5.1
908+
.. versionadded:: 0.4.5
909909
"""
910910

911911
def __init__(
@@ -995,7 +995,7 @@ class ParamGroupScheduler:
995995
# Attach single scheduler to the trainer
996996
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
997997
998-
.. versionadded:: 0.5.1
998+
.. versionadded:: 0.4.5
999999
"""
10001000

10011001
def __init__(self, schedulers: List[ParamScheduler], names: Optional[List[str]] = None, save_history: bool = False):

ignite/handlers/stores.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,8 @@ def log_training_results(engine):
3434
# output = [(y_pred0, y0), (y_pred1, y1), ...]
3535
# do something with output, e.g., plotting
3636
37-
.. versionadded:: 0.5.0
38-
.. versionchanged:: 0.5.0
37+
.. versionadded:: 0.4.5
38+
.. versionchanged:: 0.4.5
3939
`attach` now accepts an optional argument `name`
4040
"""
4141

ignite/metrics/gan/fid.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ class FID(_BaseInceptionMetric):
106106
m.update((y_pred, y))
107107
print(m.compute())
108108
109-
.. versionadded:: 0.5.0
109+
.. versionadded:: 0.4.6
110110
"""
111111

112112
def __init__(

ignite/metrics/gan/inception_score.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ class InceptionScore(_BaseInceptionMetric):
6060
m.update(images)
6161
print(m.compute())
6262
63-
.. versionadded:: 0.5.0
63+
.. versionadded:: 0.4.6
6464
"""
6565

6666
def __init__(

ignite/metrics/metric.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -556,7 +556,7 @@ def sync_all_reduce(*attrs: Any) -> Callable:
556556
Args:
557557
attrs: attribute names of decorated class
558558
559-
.. versionchanged:: 0.5.0
559+
.. versionchanged:: 0.4.5
560560
- Ability to handle different reduction operations (SUM, MAX, MIN, PRODUCT).
561561
"""
562562

ignite/metrics/multilabel_confusion_matrix.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ class MultiLabelConfusionMatrix(Metric):
3838
default, CPU.
3939
normalized: whether to normalize confusion matrix by its sum or not.
4040
41-
.. versionadded:: 0.5.0
41+
.. versionadded:: 0.4.5
4242
4343
"""
4444

ignite/metrics/nlp/bleu.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ class Bleu(Metric):
109109
110110
print(m.compute())
111111
112-
.. versionadded:: 0.5.0
112+
.. versionadded:: 0.4.5
113113
"""
114114

115115
def __init__(

ignite/metrics/nlp/rouge.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def compute_ngram_scores(candidate: Sequence[Any], reference: Sequence[Any], n:
4444
Returns:
4545
The score containing the number of ngram co-occurences
4646
47-
.. versionadded:: 0.5.0
47+
.. versionadded:: 0.4.5
4848
"""
4949

5050
# ngrams of the candidate
@@ -73,7 +73,7 @@ def compute_lcs_scores(candidate: Sequence[Any], reference: Sequence[Any]) -> Sc
7373
Returns:
7474
The score containing the length of longest common subsequence
7575
76-
.. versionadded:: 0.5.0
76+
.. versionadded:: 0.4.5
7777
"""
7878

7979
# lcs of candidate and reference
@@ -228,7 +228,7 @@ class RougeN(_BaseRouge):
228228
m.compute()
229229
>>> {'Rouge-2-P': 0.5, 'Rouge-2-R': 0.4, 'Rouge-2-F': 0.4}
230230
231-
.. versionadded:: 0.5.0
231+
.. versionadded:: 0.4.5
232232
"""
233233

234234
def __init__(
@@ -295,7 +295,7 @@ class RougeL(_BaseRouge):
295295
m.compute()
296296
>>> {'Rouge-L-P': 0.6, 'Rouge-L-R': 0.5, 'Rouge-L-F': 0.5}
297297
298-
.. versionadded:: 0.5.0
298+
.. versionadded:: 0.4.5
299299
"""
300300

301301
def __init__(
@@ -357,7 +357,7 @@ class Rouge(Metric):
357357
m.compute()
358358
>>> {'Rouge-L-P': 0.6, 'Rouge-L-R': 0.5, 'Rouge-L-F': 0.5, 'Rouge-2-P': 0.5, 'Rouge-2-R': 0.4, 'Rouge-2-F': 0.4}
359359
360-
.. versionadded:: 0.5.0
360+
.. versionadded:: 0.4.5
361361
"""
362362

363363
def __init__(

ignite/metrics/nlp/utils.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def ngrams(sequence: Sequence[Any], n: int) -> Counter:
1515
Returns:
1616
A counter of ngram objects
1717
18-
.. versionadded:: 0.5.0
18+
.. versionadded:: 0.4.5
1919
"""
2020
return Counter([tuple(sequence[i : i + n]) for i in range(len(sequence) - n + 1)])
2121

@@ -32,7 +32,7 @@ def lcs(seq_a: Sequence[Any], seq_b: Sequence[Any]) -> int:
3232
Returns:
3333
The length of the longest common subsequence
3434
35-
.. versionadded:: 0.5.0
35+
.. versionadded:: 0.4.5
3636
"""
3737
m = len(seq_a)
3838
n = len(seq_b)
@@ -73,7 +73,7 @@ def modified_precision(references: Sequence[Sequence[Any]], candidate: Any, n: i
7373
Returns:
7474
The length of the longest common subsequence
7575
76-
.. versionadded:: 0.5.0
76+
.. versionadded:: 0.4.5
7777
"""
7878
# ngrams of the candidate
7979
counts = ngrams(candidate, n)

0 commit comments

Comments
 (0)