@@ -27,32 +27,32 @@ def main():
27
27
28
28
if FLAGS .job_name == 'ps' : #checks if parameter server
29
29
server = tf .train .Server (cluster ,
30
- job_name = "ps" ,
31
- task_index = FLAGS .task_index ,
32
- config = config )
30
+ job_name = "ps" ,
31
+ task_index = FLAGS .task_index ,
32
+ config = config )
33
33
server .join ()
34
34
else : #it must be a worker server
35
35
is_chief = (FLAGS .task_index == 0 ) #checks if this is the chief node
36
36
server = tf .train .Server (cluster ,
37
- job_name = "worker" ,
38
- task_index = FLAGS .task_index ,
39
- config = config )
37
+ job_name = "worker" ,
38
+ task_index = FLAGS .task_index ,
39
+ config = config )
40
40
41
41
# Graph
42
42
# We must not use train.replicate_device_setter for normal operations
43
43
# Local operations
44
44
with tf .device ("/job:worker/replica:0/task:%d" % FLAGS .task_index ):
45
45
a = tf .Variable (tf .constant (0. ,shape = [2 ]),dtype = tf .float32 ,
46
- collections = [tf .GraphKeys .LOCAL_VARIABLES ])
46
+ collections = [tf .GraphKeys .LOCAL_VARIABLES ])
47
47
b = tf .Variable (tf .constant (0. ,shape = [2 ]),dtype = tf .float32 ,
48
- collections = [tf .GraphKeys .LOCAL_VARIABLES ])
48
+ collections = [tf .GraphKeys .LOCAL_VARIABLES ])
49
49
c = a + b
50
50
local_step = tf .Variable (0 ,dtype = tf .int32 ,trainable = False ,
51
- name = 'local_step' ,collections = ['local_non_trainable' ])
51
+ name = 'local_step' ,collections = ['local_non_trainable' ])
52
52
53
53
with tf .device (tf .train .replica_device_setter (
54
- ps_tasks = n_pss , \
55
- worker_device = "/job:%s/task:%d" % (FLAGS .job_name ,FLAGS .task_index ))):
54
+ ps_tasks = n_pss ,
55
+ worker_device = "/job:%s/task:%d" % (FLAGS .job_name ,FLAGS .task_index ))):
56
56
global_step = tf .Variable (0 ,dtype = tf .int32 ,trainable = False ,name = 'global_step' )
57
57
target = tf .constant (100. ,shape = [2 ],dtype = tf .float32 )
58
58
loss = tf .reduce_mean (tf .square (c - target ))
@@ -79,7 +79,7 @@ def main():
79
79
var_list = tf .local_variables ()))
80
80
grad_list .append (grads ) #add gradients to the list
81
81
opt_local = loptimizer .apply_gradients (zip (grads ,varss ),
82
- global_step = local_step ) #update local parameters
82
+ global_step = local_step ) #update local parameters
83
83
grads = tf .reduce_mean (grad_list ,axis = 0 )
84
84
grads = tuple ([grads [i ]for i in range (len (varss ))])
85
85
opt = optimizer .apply_gradients (
0 commit comments