|
| 1 | +#!/usr/bin/env python |
| 2 | +from __future__ import absolute_import |
| 3 | +from __future__ import division |
| 4 | +from __future__ import print_function |
| 5 | + |
| 6 | +import os |
| 7 | +import sys |
| 8 | +import time |
| 9 | + |
| 10 | +import tensorflow as tf |
| 11 | +import coref_model as cm |
| 12 | +import util |
| 13 | + |
| 14 | +if __name__ == "__main__": |
| 15 | + config = util.initialize_from_env() |
| 16 | + task_index = int(os.environ["TASK"]) |
| 17 | + |
| 18 | + report_frequency = config["report_frequency"] |
| 19 | + cluster_config = config["cluster"] |
| 20 | + |
| 21 | + util.set_gpus(cluster_config["gpus"][task_index]) |
| 22 | + |
| 23 | + cluster = tf.train.ClusterSpec(cluster_config["addresses"]) |
| 24 | + server = tf.train.Server(cluster, |
| 25 | + job_name="worker", |
| 26 | + task_index=task_index) |
| 27 | + |
| 28 | + # Assigns ops to the local worker by default. |
| 29 | + with tf.device(tf.train.replica_device_setter(worker_device="/job:worker/task:%d" % task_index, cluster=cluster)): |
| 30 | + model = cm.CorefModel(config) |
| 31 | + saver = tf.train.Saver() |
| 32 | + init_op = tf.global_variables_initializer() |
| 33 | + |
| 34 | + log_dir = config["log_dir"] |
| 35 | + writer = tf.summary.FileWriter(os.path.join(log_dir, "w{}".format(task_index)), flush_secs=20) |
| 36 | + |
| 37 | + is_chief = (task_index == 0) |
| 38 | + |
| 39 | + # Create a "supervisor", which oversees the training process. |
| 40 | + sv = tf.train.Supervisor(is_chief=is_chief, |
| 41 | + logdir=log_dir, |
| 42 | + init_op=init_op, |
| 43 | + saver=saver, |
| 44 | + global_step=model.global_step, |
| 45 | + save_model_secs=120) |
| 46 | + |
| 47 | + # The supervisor takes care of session initialization, restoring from |
| 48 | + # a checkpoint, and closing when done or an error occurs. |
| 49 | + with sv.managed_session(server.target) as session: |
| 50 | + model.start_enqueue_thread(session) |
| 51 | + accumulated_loss = 0.0 |
| 52 | + initial_time = time.time() |
| 53 | + while not sv.should_stop(): |
| 54 | + tf_loss, tf_global_step, _ = session.run([model.loss, model.global_step, model.train_op]) |
| 55 | + accumulated_loss += tf_loss |
| 56 | + |
| 57 | + if tf_global_step % report_frequency == 0: |
| 58 | + total_time = time.time() - initial_time |
| 59 | + steps_per_second = tf_global_step / total_time |
| 60 | + |
| 61 | + average_loss = accumulated_loss / report_frequency |
| 62 | + print("[{}] loss={:.2f}, steps/s={:.2f}".format(tf_global_step, tf_loss, steps_per_second)) |
| 63 | + accumulated_loss = 0.0 |
| 64 | + writer.add_summary(util.make_summary({ |
| 65 | + "Train Loss": average_loss, |
| 66 | + "Steps per second": steps_per_second |
| 67 | + })) |
| 68 | + |
| 69 | + # Ask for all the services to stop. |
| 70 | + sv.stop() |
0 commit comments