Skip to content

Commit 086dec6

Browse files
author
Thomas Mulc
committed
new standard is 6 spaces for new line continuation
1 parent 9fa300e commit 086dec6

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

ADAG/ADAG.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def main():
5151
name='local_step',collections=['local_non_trainable'])
5252

5353
with tf.device(tf.train.replica_device_setter(ps_tasks=n_pss,
54-
worker_device="/job:%s/task:%d" % (FLAGS.job_name,FLAGS.task_index))):
54+
worker_device="/job:%s/task:%d" % (FLAGS.job_name,FLAGS.task_index))):
5555
global_step = tf.Variable(0,dtype=tf.int32,trainable=False,name='global_step')
5656
target = tf.constant(100.,shape=[2],dtype=tf.float32)
5757
loss = tf.reduce_mean(tf.square(c-target))
@@ -66,7 +66,7 @@ def main():
6666
local_to_global, global_to_local = create_global_variables()
6767

6868
# ADAG (simplest case since all batches are the same)
69-
update_window = 3 # T: update/communication window, a.k.a number of gradients to use before sending to ps
69+
update_window = 3 # T: update/communication window
7070
grad_list = [] # the array to store the gradients through the communication window
7171
for t in range(update_window):
7272
if t != 0:
@@ -75,7 +75,7 @@ def main():
7575
var_list=tf.local_variables()))
7676
else:
7777
grads, varss = zip(*loptimizer.compute_gradients(loss,
78-
var_list=tf.local_variables()))
78+
var_list=tf.local_variables()))
7979
grad_list.append(grads) #add gradients to the list
8080
opt_local = loptimizer.apply_gradients(zip(grads,varss),
8181
global_step=local_step) #update local parameters

0 commit comments

Comments
 (0)