|
11 | 11 | "%matplotlib inline\n",
|
12 | 12 | "\n",
|
13 | 13 | "import gym\n",
|
| 14 | + "from gym.wrappers import Monitor\n", |
14 | 15 | "import itertools\n",
|
15 | 16 | "import numpy as np\n",
|
16 | 17 | "import os\n",
|
|
67 | 68 | " self.output = tf.image.rgb_to_grayscale(self.input_state)\n",
|
68 | 69 | " self.output = tf.image.crop_to_bounding_box(self.output, 34, 0, 160, 160)\n",
|
69 | 70 | " self.output = tf.image.resize_images(\n",
|
70 |
| - " self.output, 84, 84, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n", |
| 71 | + " self.output, [84, 84], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n", |
71 | 72 | " self.output = tf.squeeze(self.output)\n",
|
72 | 73 | "\n",
|
73 | 74 | " def process(self, sess, state):\n",
|
|
107 | 108 | " summary_dir = os.path.join(summaries_dir, \"summaries_{}\".format(scope))\n",
|
108 | 109 | " if not os.path.exists(summary_dir):\n",
|
109 | 110 | " os.makedirs(summary_dir)\n",
|
110 |
| - " self.summary_writer = tf.train.SummaryWriter(summary_dir)\n", |
| 111 | + " self.summary_writer = tf.summary.FileWriter(summary_dir)\n", |
111 | 112 | "\n",
|
112 | 113 | " def _build_model(self):\n",
|
113 | 114 | " \"\"\"\n",
|
|
151 | 152 | " self.train_op = self.optimizer.minimize(self.loss, global_step=tf.contrib.framework.get_global_step())\n",
|
152 | 153 | "\n",
|
153 | 154 | " # Summaries for Tensorboard\n",
|
154 |
| - " self.summaries = tf.merge_summary([\n", |
155 |
| - " tf.scalar_summary(\"loss\", self.loss),\n", |
156 |
| - " tf.histogram_summary(\"loss_hist\", self.losses),\n", |
157 |
| - " tf.histogram_summary(\"q_values_hist\", self.predictions),\n", |
158 |
| - " tf.scalar_summary(\"max_q_value\", tf.reduce_max(self.predictions))\n", |
| 155 | + " self.summaries = tf.summary.merge([\n", |
| 156 | + " tf.summary.scalar(\"loss\", self.loss),\n", |
| 157 | + " tf.summary.histogram(\"loss_hist\", self.losses),\n", |
| 158 | + " tf.summary.histogram(\"q_values_hist\", self.predictions),\n", |
| 159 | + " tf.summary.scalar(\"max_q_value\", tf.reduce_max(self.predictions))\n", |
159 | 160 | " ])\n",
|
160 | 161 | "\n",
|
161 | 162 | "\n",
|
|
212 | 213 | "sp = StateProcessor()\n",
|
213 | 214 | "\n",
|
214 | 215 | "with tf.Session() as sess:\n",
|
215 |
| - " sess.run(tf.initialize_all_variables())\n", |
| 216 | + " sess.run(tf.global_variables_initializer())\n", |
216 | 217 | " \n",
|
217 | 218 | " # Example observation batch\n",
|
218 | 219 | " observation = env.reset()\n",
|
|
391 | 392 | " pass\n",
|
392 | 393 | "\n",
|
393 | 394 | " # Record videos\n",
|
394 |
| - " env.monitor.start(monitor_path,\n", |
395 |
| - " resume=True,\n", |
396 |
| - " video_callable=lambda count: count % record_video_every == 0)\n", |
| 395 | + " env= Monitor(env,\n", |
| 396 | + " directory=monitor_path,\n", |
| 397 | + " resume=True,\n", |
| 398 | + " video_callable=lambda count: count % record_video_every == 0)\n", |
397 | 399 | "\n",
|
398 | 400 | " for i_episode in range(num_episodes):\n",
|
399 | 401 | "\n",
|
|
526 | 528 | "name": "python",
|
527 | 529 | "nbconvert_exporter": "python",
|
528 | 530 | "pygments_lexer": "ipython3",
|
529 |
| - "version": "3.5.1" |
| 531 | + "version": "3.6.0" |
530 | 532 | }
|
531 | 533 | },
|
532 | 534 | "nbformat": 4,
|
|
0 commit comments