@@ -51,7 +51,7 @@ def main():
51
51
name = 'local_step' ,collections = ['local_non_trainable' ])
52
52
53
53
with tf .device (tf .train .replica_device_setter (ps_tasks = n_pss ,
54
- worker_device = "/job:%s/task:%d" % (FLAGS .job_name ,FLAGS .task_index ))):
54
+ worker_device = "/job:%s/task:%d" % (FLAGS .job_name ,FLAGS .task_index ))):
55
55
global_step = tf .Variable (0 ,dtype = tf .int32 ,trainable = False ,name = 'global_step' )
56
56
target = tf .constant (100. ,shape = [2 ],dtype = tf .float32 )
57
57
loss = tf .reduce_mean (tf .square (c - target ))
@@ -66,7 +66,7 @@ def main():
66
66
local_to_global , global_to_local = create_global_variables ()
67
67
68
68
# ADAG (simplest case since all batches are the same)
69
- update_window = 3 # T: update/communication window, a.k.a number of gradients to use before sending to ps
69
+ update_window = 3 # T: update/communication window
70
70
grad_list = [] # the array to store the gradients through the communication window
71
71
for t in range (update_window ):
72
72
if t != 0 :
@@ -75,7 +75,7 @@ def main():
75
75
var_list = tf .local_variables ()))
76
76
else :
77
77
grads , varss = zip (* loptimizer .compute_gradients (loss ,
78
- var_list = tf .local_variables ()))
78
+ var_list = tf .local_variables ()))
79
79
grad_list .append (grads ) #add gradients to the list
80
80
opt_local = loptimizer .apply_gradients (zip (grads ,varss ),
81
81
global_step = local_step ) #update local parameters
0 commit comments