Skip to content

Commit b1a3590

Browse files
author
Le Horizon
committed
remove unused config
(cherry picked from commit a9fcdef)
1 parent 76302fa commit b1a3590

File tree

1 file changed

+0
-4
lines changed

1 file changed

+0
-4
lines changed

alf/algorithms/config.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ def __init__(self,
2626
algorithm_ctor=None,
2727
data_transformer_ctor=None,
2828
random_seed=None,
29-
skip_torch_deterministic=False,
3029
num_iterations=1000,
3130
num_env_steps=0,
3231
unroll_length=8,
@@ -99,8 +98,6 @@ def __init__(self,
9998
will not be normalized. Data will be in mismatch, causing training to
10099
suffer and potentially fail.
101100
random_seed (None|int): random seed, a random seed is used if None
102-
skip_torch_deterministic (bool): if True, turns of
103-
``torch.use_deterministic_algorithms`` even when a random_seed is set.
104101
num_iterations (int): For RL trainer, indicates number of update
105102
iterations (ignored if 0). Note that for off-policy algorithms, if
106103
``initial_collect_steps>0``, then the first
@@ -271,7 +268,6 @@ def __init__(self,
271268
self.data_transformer_ctor = data_transformer_ctor
272269
self.data_transformer = None # to be set by Trainer
273270
self.random_seed = random_seed
274-
self.skip_torch_deterministic = skip_torch_deterministic
275271
self.num_iterations = num_iterations
276272
self.num_env_steps = num_env_steps
277273
self.unroll_length = unroll_length

0 commit comments

Comments
 (0)