158
158
159
159
160
160
< div class ="version ">
161
- < a href ='http://pytorch.org/docs/versions.html '> 1.7.0a0+8a83851 ▼</ a >
161
+ < a href ='http://pytorch.org/docs/versions.html '> 1.7.0a0+be34aa1 ▼</ a >
162
162
</ div >
163
163
164
164
@@ -642,10 +642,67 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
642
642
< span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _set_default_dtype</ span > < span class ="p "> (</ span > < span class ="n "> d</ span > < span class ="p "> )</ span >
643
643
644
644
< span class ="k "> def</ span > < span class ="nf "> set_deterministic</ span > < span class ="p "> (</ span > < span class ="n "> d</ span > < span class ="p "> ):</ span >
645
- < span class ="sa "> r</ span > < span class ="sd "> """Sets a global flag to force all operations to use a deterministic</ span >
646
- < span class ="sd "> implementation if available. If an operation that does not have a</ span >
647
- < span class ="sd "> deterministic implementation is called while this setting is True, the</ span >
648
- < span class ="sd "> operation will throw a RuntimeError.</ span >
645
+ < span class ="sa "> r</ span > < span class ="sd "> """ Sets whether native PyTorch operations must use deterministic</ span >
646
+ < span class ="sd "> algorithms. When True, operations without deterministic algorithms</ span >
647
+ < span class ="sd "> will throw a :class:RuntimeError when called.</ span >
648
+
649
+ < span class ="sd "> .. warning::</ span >
650
+ < span class ="sd "> This feature is a beta feature, so it does not affect every</ span >
651
+ < span class ="sd "> nondeterministic operation yet. The following operations are</ span >
652
+ < span class ="sd "> affected by this flag.</ span >
653
+
654
+ < span class ="sd "> The following normally-nondeterministic operations will act</ span >
655
+ < span class ="sd "> deterministically when `d=True`:</ span >
656
+
657
+ < span class ="sd "> * :class:`torch.nn.Conv1d` when called on CUDA tensor</ span >
658
+ < span class ="sd "> * :class:`torch.nn.Conv2d` when called on CUDA tensor</ span >
659
+ < span class ="sd "> * :class:`torch.nn.Conv3d` when called on CUDA tensor</ span >
660
+ < span class ="sd "> * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor</ span >
661
+ < span class ="sd "> * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor</ span >
662
+ < span class ="sd "> * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor</ span >
663
+ < span class ="sd "> * :func:`torch.bmm` when called on sparse-dense CUDA tensors</ span >
664
+
665
+ < span class ="sd "> The following normally-nondeterministic operations will throw a</ span >
666
+ < span class ="sd "> :class:`RuntimeError` when `d=True`:</ span >
667
+
668
+ < span class ="sd "> * :class:`torch.nn.AvgPool3d` when called on a CUDA tensor that requires grad</ span >
669
+ < span class ="sd "> * :class:`torch.nn.AdaptiveAvgPool2d` when called on a CUDA tensor that requires grad</ span >
670
+ < span class ="sd "> * :class:`torch.nn.AdaptiveAvgPool3d` when called on a CUDA tensor that requires grad</ span >
671
+ < span class ="sd "> * :class:`torch.nn.MaxPool3d` when called on a CUDA tensor that requires grad</ span >
672
+ < span class ="sd "> * :class:`torch.nn.AdaptiveMaxPool2d` when called on a CUDA tensor that requires grad</ span >
673
+ < span class ="sd "> * :class:`torch.nn.FractionalMaxPool2d` when called on a CUDA tensor that requires grad</ span >
674
+ < span class ="sd "> * :class:`torch.nn.FractionalMaxPool3d` when called on a CUDA tensor that requires grad</ span >
675
+ < span class ="sd "> * :func:`torch.nn.functional.interpolate` when called on a CUDA tensor that requires grad</ span >
676
+ < span class ="sd "> and one of the following modes is used:</ span >
677
+ < span class ="sd "> - `linear`</ span >
678
+ < span class ="sd "> - `bilinear`</ span >
679
+ < span class ="sd "> - `bicubic`</ span >
680
+ < span class ="sd "> - `trilinear`</ span >
681
+ < span class ="sd "> * :class:`torch.nn.ReflectionPad1d` when called on a CUDA tensor that requires grad</ span >
682
+ < span class ="sd "> * :class:`torch.nn.ReflectionPad2d` when called on a CUDA tensor that requires grad</ span >
683
+ < span class ="sd "> * :class:`torch.nn.ReplicationPad1d` when called on a CUDA tensor that requires grad</ span >
684
+ < span class ="sd "> * :class:`torch.nn.ReplicationPad2d` when called on a CUDA tensor that requires grad</ span >
685
+ < span class ="sd "> * :class:`torch.nn.ReplicationPad3d` when called on a CUDA tensor that requires grad</ span >
686
+ < span class ="sd "> * :class:`torch.nn.NLLLoss` when called on a CUDA tensor that requires grad</ span >
687
+ < span class ="sd "> * :class:`torch.nn.CTCLoss` when called on a CUDA tensor that requires grad</ span >
688
+ < span class ="sd "> * :class:`torch.nn.EmbeddingBag` when called on a CUDA tensor that requires grad</ span >
689
+ < span class ="sd "> * :func:`torch.scatter_add_` when called on a CUDA tensor</ span >
690
+ < span class ="sd "> * :func:`torch.index_add_` when called on a CUDA tensor</ span >
691
+ < span class ="sd "> * :func:`torch.index_select` when called on a CUDA tensor that requires grad</ span >
692
+ < span class ="sd "> * :func:`torch.repeat_interleave` when called on a CUDA tensor that requires grad</ span >
693
+ < span class ="sd "> * :func:`torch.histc` when called on a CUDA tensor</ span >
694
+ < span class ="sd "> * :func:`torch.bincount` when called on a CUDA tensor</ span >
695
+
696
+ < span class ="sd "> A handful of CUDA operations are nondeterministic if the CUDA version is</ span >
697
+ < span class ="sd "> 10.2 or greater, unless the environment variable `CUBLAS_WORKSPACE_CONFIG=:4096:8`</ span >
698
+ < span class ="sd "> or `CUBLAS_WORKSPACE_CONFIG=:16:8` is set. See the CUDA documentation for more</ span >
699
+ < span class ="sd "> details: `<https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility>`_</ span >
700
+ < span class ="sd "> If one of these environment variable configurations is not set, a :class:`RuntimeError`</ span >
701
+ < span class ="sd "> will be raised from these operations when called with CUDA tensors:</ span >
702
+
703
+ < span class ="sd "> * :func:`torch.mm`</ span >
704
+ < span class ="sd "> * :func:`torch.mv`</ span >
705
+ < span class ="sd "> * :func:`torch.bmm`</ span >
649
706
650
707
< span class ="sd "> Note that deterministic operations tend to have worse performance than</ span >
651
708
< span class ="sd "> non-deterministic operations.</ span >
@@ -656,11 +713,11 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
656
713
< span class ="sd "> """</ span >
657
714
< span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _set_deterministic</ span > < span class ="p "> (</ span > < span class ="n "> d</ span > < span class ="p "> )</ span >
658
715
659
- < span class ="k "> def</ span > < span class ="nf "> is_deterministic</ span > < span class ="p "> ():</ span >
660
- < span class ="sa "> r</ span > < span class ="sd "> """Returns True if the global deterministic flag is turned on and </ span >
661
- < span class ="sd "> operations are being forced to use a deterministic implementation .</ span >
716
+ < div class =" viewcode-block " id =" is_deterministic " > < a class =" viewcode-back " href =" ../generated/torch.is_deterministic.html#torch.is_deterministic " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> is_deterministic</ span > < span class ="p "> ():</ span >
717
+ < span class ="sa "> r</ span > < span class ="sd "> """Returns True if the global deterministic flag is turned on. Refer to </ span >
718
+ < span class ="sd "> :func:`torch.set_deterministic` documentation for more details .</ span >
662
719
< span class ="sd "> """</ span >
663
- < span class ="k "> return</ span > < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _get_deterministic</ span > < span class ="p "> ()</ span >
720
+ < span class ="k "> return</ span > < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _get_deterministic</ span > < span class ="p "> ()</ span > </ div >
664
721
665
722
< span class ="c1 "> ################################################################################</ span >
666
723
< span class ="c1 "> # Define Storage and Tensor classes</ span >
0 commit comments