Skip to content

Commit c7448aa

Browse files
jspark1105facebook-github-bot
authored andcommitted
remove unused parameters in optimizer tests (pytorch#18084)
Summary: Pull Request resolved: pytorch#18084 data_strategy parameter was not used in some of unit tests for optimizers Reviewed By: hyuen Differential Revision: D14487830 fbshipit-source-id: d757cd06aa2965f4c0570a4a18ba090b98820ef4
1 parent be364ac commit c7448aa

File tree

3 files changed

+5
-12
lines changed

3 files changed

+5
-12
lines changed

caffe2/python/operator_test/adadelta_test.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -144,10 +144,8 @@ def ref_sparse(param, moment, moment_delta, indices, grad, lr, decay,
144144
allow_nan=False, allow_infinity=False),
145145
decay=st.floats(min_value=0.01, max_value=0.99,
146146
allow_nan=False, allow_infinity=False),
147-
data_strategy=st.data(),
148147
**hu.gcs)
149-
def test_sparse_adadelta_empty(self, inputs, lr, epsilon, decay,
150-
data_strategy, gc, dc):
148+
def test_sparse_adadelta_empty(self, inputs, lr, epsilon, decay, gc, dc):
151149
param, moment, moment_delta = inputs
152150
moment = np.abs(moment)
153151
lr = np.array([lr], dtype=np.float32)

caffe2/python/operator_test/adagrad_test.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -134,10 +134,9 @@ def test_sparse_adagrad(self, inputs, lr, epsilon, gc, dc):
134134
epsilon=st.floats(
135135
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
136136
),
137-
data_strategy=st.data(),
138137
**hu.gcs
139138
)
140-
def test_sparse_adagrad_empty(self, inputs, lr, epsilon, data_strategy, gc, dc):
139+
def test_sparse_adagrad_empty(self, inputs, lr, epsilon, gc, dc):
141140
param, momentum = inputs
142141
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)
143142

@@ -176,10 +175,9 @@ def test_sparse_adagrad_empty(self, inputs, lr, epsilon, data_strategy, gc, dc):
176175
epsilon=st.floats(
177176
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
178177
),
179-
data_strategy=st.data(),
180178
**hu.gcs
181179
)
182-
def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, data_strategy, gc, dc):
180+
def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, gc, dc):
183181
adagrad_sparse_test_helper(
184182
self,
185183
inputs,
@@ -200,11 +198,10 @@ def test_row_wise_sparse_adagrad(self, inputs, lr, epsilon, data_strategy, gc, d
200198
epsilon=st.floats(
201199
min_value=0.01, max_value=0.99, allow_nan=False, allow_infinity=False
202200
),
203-
data_strategy=st.data(),
204201
**hu.gcs
205202
)
206203
def test_row_wise_sparse_adagrad_empty(
207-
self, inputs, lr, epsilon, data_strategy, gc, dc
204+
self, inputs, lr, epsilon, gc, dc
208205
):
209206
param, momentum = inputs
210207
grad = np.empty(shape=(0,) + param.shape[1:], dtype=np.float32)

caffe2/python/operator_test/wngrad_test.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -182,10 +182,8 @@ def test_sparse_wngrad(self, inputs, seq_b, lr, epsilon, gc, dc):
182182
allow_nan=False, allow_infinity=False),
183183
epsilon=st.floats(min_value=0.01, max_value=0.99,
184184
allow_nan=False, allow_infinity=False),
185-
data_strategy=st.data(),
186185
**hu.gcs_cpu_only)
187-
def test_sparse_wngrad_empty(self, inputs, seq_b, lr, epsilon,
188-
data_strategy, gc, dc):
186+
def test_sparse_wngrad_empty(self, inputs, seq_b, lr, epsilon, gc, dc):
189187
param = inputs[0]
190188
seq_b = np.array([seq_b, ], dtype=np.float32)
191189
lr = np.array([lr], dtype=np.float32)

0 commit comments

Comments
 (0)