Skip to content

Commit f4dee6f

Browse files
josdantmarakis
authored andcommitted
fixing the names SimpleRNNLearner and AutoencoderLearner (#1125)
* fixing the names SimpleRNNLearner and AutoencoderLearner * remove the warning messages
1 parent e2b8a42 commit f4dee6f

File tree

1 file changed

+45
-24
lines changed

1 file changed

+45
-24
lines changed

Diff for: notebooks/chapter19/RNN.ipynb

+45-24
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,8 @@
5858
}
5959
],
6060
"source": [
61+
"import warnings\n",
62+
"warnings.filterwarnings(\"ignore\", category=FutureWarning)\n",
6163
"import os, sys\n",
6264
"sys.path = [os.path.abspath(\"../../\")] + sys.path\n",
6365
"from deep_learning4e import *\n",
@@ -158,13 +160,14 @@
158160
"<body>\n",
159161
"<h2></h2>\n",
160162
"\n",
161-
"<div class=\"highlight\"><pre><span></span><span class=\"k\">def</span> <span class=\"nf\">simple_rnn_learner</span><span class=\"p\">(</span><span class=\"n\">train_data</span><span class=\"p\">,</span> <span class=\"n\">val_data</span><span class=\"p\">,</span> <span class=\"n\">epochs</span><span class=\"o\">=</span><span class=\"mi\">2</span><span class=\"p\">):</span>\n",
163+
"<div class=\"highlight\"><pre><span></span><span class=\"k\">def</span> <span class=\"nf\">SimpleRNNLearner</span><span class=\"p\">(</span><span class=\"n\">train_data</span><span class=\"p\">,</span> <span class=\"n\">val_data</span><span class=\"p\">,</span> <span class=\"n\">epochs</span><span class=\"o\">=</span><span class=\"mi\">2</span><span class=\"p\">):</span>\n",
162164
" <span class=\"sd\">&quot;&quot;&quot;</span>\n",
163-
"<span class=\"sd\"> rnn example for text sentimental analysis</span>\n",
165+
"<span class=\"sd\"> RNN example for text sentimental analysis.</span>\n",
164166
"<span class=\"sd\"> :param train_data: a tuple of (training data, targets)</span>\n",
165167
"<span class=\"sd\"> Training data: ndarray taking training examples, while each example is coded by embedding</span>\n",
166-
"<span class=\"sd\"> Targets: ndarry taking targets of each example. Each target is mapped to an integer.</span>\n",
168+
"<span class=\"sd\"> Targets: ndarray taking targets of each example. Each target is mapped to an integer.</span>\n",
167169
"<span class=\"sd\"> :param val_data: a tuple of (validation data, targets)</span>\n",
170+
"<span class=\"sd\"> :param epochs: number of epochs</span>\n",
168171
"<span class=\"sd\"> :return: a keras model</span>\n",
169172
"<span class=\"sd\"> &quot;&quot;&quot;</span>\n",
170173
"\n",
@@ -199,7 +202,7 @@
199202
}
200203
],
201204
"source": [
202-
"psource(simple_rnn_learner)"
205+
"psource(SimpleRNNLearner)"
203206
]
204207
},
205208
{
@@ -220,7 +223,7 @@
220223
},
221224
{
222225
"cell_type": "code",
223-
"execution_count": 9,
226+
"execution_count": 3,
224227
"metadata": {},
225228
"outputs": [],
226229
"source": [
@@ -238,39 +241,51 @@
238241
},
239242
{
240243
"cell_type": "code",
241-
"execution_count": 11,
244+
"execution_count": 4,
242245
"metadata": {},
243246
"outputs": [
247+
{
248+
"name": "stderr",
249+
"output_type": "stream",
250+
"text": [
251+
"WARNING: Logging before flag parsing goes to stderr.\n",
252+
"W1018 22:51:23.614058 140557804885824 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_impl.py:180: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
253+
"Instructions for updating:\n",
254+
"Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
255+
"W1018 22:51:24.267649 140557804885824 deprecation_wrapper.py:119] From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n",
256+
"\n"
257+
]
258+
},
244259
{
245260
"name": "stdout",
246261
"output_type": "stream",
247262
"text": [
248263
"Train on 24990 samples, validate on 25000 samples\n",
249264
"Epoch 1/10\n",
250-
" - 45s - loss: 0.6877 - acc: 0.5406 - val_loss: 0.6731 - val_acc: 0.6045\n",
265+
" - 59s - loss: 0.6540 - accuracy: 0.5959 - val_loss: 0.6234 - val_accuracy: 0.6488\n",
251266
"Epoch 2/10\n",
252-
" - 52s - loss: 0.6441 - acc: 0.6241 - val_loss: 0.6258 - val_acc: 0.6300\n",
267+
" - 61s - loss: 0.5977 - accuracy: 0.6766 - val_loss: 0.6202 - val_accuracy: 0.6326\n",
253268
"Epoch 3/10\n",
254-
" - 50s - loss: 0.5275 - acc: 0.7393 - val_loss: 0.5547 - val_acc: 0.7229\n",
269+
" - 61s - loss: 0.5269 - accuracy: 0.7356 - val_loss: 0.4803 - val_accuracy: 0.7789\n",
255270
"Epoch 4/10\n",
256-
" - 50s - loss: 0.4703 - acc: 0.7908 - val_loss: 0.4851 - val_acc: 0.7740\n",
271+
" - 61s - loss: 0.4159 - accuracy: 0.8130 - val_loss: 0.5640 - val_accuracy: 0.7046\n",
257272
"Epoch 5/10\n",
258-
" - 48s - loss: 0.4021 - acc: 0.8279 - val_loss: 0.4517 - val_acc: 0.8121\n",
273+
" - 61s - loss: 0.3931 - accuracy: 0.8294 - val_loss: 0.4707 - val_accuracy: 0.8090\n",
259274
"Epoch 6/10\n",
260-
" - 55s - loss: 0.4043 - acc: 0.8269 - val_loss: 0.4532 - val_acc: 0.8042\n",
275+
" - 61s - loss: 0.3357 - accuracy: 0.8637 - val_loss: 0.4177 - val_accuracy: 0.8122\n",
261276
"Epoch 7/10\n",
262-
" - 51s - loss: 0.4242 - acc: 0.8315 - val_loss: 0.5257 - val_acc: 0.7785\n",
277+
" - 61s - loss: 0.3552 - accuracy: 0.8594 - val_loss: 0.4652 - val_accuracy: 0.7889\n",
263278
"Epoch 8/10\n",
264-
" - 58s - loss: 0.4534 - acc: 0.7964 - val_loss: 0.5347 - val_acc: 0.7323\n",
279+
" - 61s - loss: 0.3286 - accuracy: 0.8686 - val_loss: 0.4708 - val_accuracy: 0.7785\n",
265280
"Epoch 9/10\n",
266-
" - 51s - loss: 0.3821 - acc: 0.8354 - val_loss: 0.4671 - val_acc: 0.8054\n",
281+
" - 61s - loss: 0.3428 - accuracy: 0.8635 - val_loss: 0.4332 - val_accuracy: 0.8137\n",
267282
"Epoch 10/10\n",
268-
" - 56s - loss: 0.3283 - acc: 0.8691 - val_loss: 0.4523 - val_acc: 0.8067\n"
283+
" - 61s - loss: 0.3650 - accuracy: 0.8471 - val_loss: 0.4673 - val_accuracy: 0.7914\n"
269284
]
270285
}
271286
],
272287
"source": [
273-
"model = simple_rnn_learner(train, val, epochs=10)"
288+
"model = SimpleRNNLearner(train, val, epochs=10)"
274289
]
275290
},
276291
{
@@ -306,7 +321,7 @@
306321
},
307322
{
308323
"cell_type": "code",
309-
"execution_count": 19,
324+
"execution_count": 5,
310325
"metadata": {},
311326
"outputs": [
312327
{
@@ -398,18 +413,24 @@
398413
"<body>\n",
399414
"<h2></h2>\n",
400415
"\n",
401-
"<div class=\"highlight\"><pre><span></span><span class=\"k\">def</span> <span class=\"nf\">auto_encoder_learner</span><span class=\"p\">(</span><span class=\"n\">inputs</span><span class=\"p\">,</span> <span class=\"n\">encoding_size</span><span class=\"p\">,</span> <span class=\"n\">epochs</span><span class=\"o\">=</span><span class=\"mi\">200</span><span class=\"p\">):</span>\n",
402-
" <span class=\"sd\">&quot;&quot;&quot;simple example of linear auto encoder learning producing the input itself.</span>\n",
416+
"<div class=\"highlight\"><pre><span></span><span class=\"k\">def</span> <span class=\"nf\">AutoencoderLearner</span><span class=\"p\">(</span><span class=\"n\">inputs</span><span class=\"p\">,</span> <span class=\"n\">encoding_size</span><span class=\"p\">,</span> <span class=\"n\">epochs</span><span class=\"o\">=</span><span class=\"mi\">200</span><span class=\"p\">):</span>\n",
417+
" <span class=\"sd\">&quot;&quot;&quot;</span>\n",
418+
"<span class=\"sd\"> Simple example of linear auto encoder learning producing the input itself.</span>\n",
403419
"<span class=\"sd\"> :param inputs: a batch of input data in np.ndarray type</span>\n",
404-
"<span class=\"sd\"> :param encoding_size: int, the size of encoding layer&quot;&quot;&quot;</span>\n",
420+
"<span class=\"sd\"> :param encoding_size: int, the size of encoding layer</span>\n",
421+
"<span class=\"sd\"> :param epochs: number of epochs</span>\n",
422+
"<span class=\"sd\"> :return: a keras model</span>\n",
423+
"<span class=\"sd\"> &quot;&quot;&quot;</span>\n",
405424
"\n",
406425
" <span class=\"c1\"># init data</span>\n",
407426
" <span class=\"n\">input_size</span> <span class=\"o\">=</span> <span class=\"nb\">len</span><span class=\"p\">(</span><span class=\"n\">inputs</span><span class=\"p\">[</span><span class=\"mi\">0</span><span class=\"p\">])</span>\n",
408427
"\n",
409428
" <span class=\"c1\"># init model</span>\n",
410429
" <span class=\"n\">model</span> <span class=\"o\">=</span> <span class=\"n\">Sequential</span><span class=\"p\">()</span>\n",
411-
" <span class=\"n\">model</span><span class=\"o\">.</span><span class=\"n\">add</span><span class=\"p\">(</span><span class=\"n\">Dense</span><span class=\"p\">(</span><span class=\"n\">encoding_size</span><span class=\"p\">,</span> <span class=\"n\">input_dim</span><span class=\"o\">=</span><span class=\"n\">input_size</span><span class=\"p\">,</span> <span class=\"n\">activation</span><span class=\"o\">=</span><span class=\"s1\">&#39;relu&#39;</span><span class=\"p\">,</span> <span class=\"n\">kernel_initializer</span><span class=\"o\">=</span><span class=\"s1\">&#39;random_uniform&#39;</span><span class=\"p\">,</span><span class=\"n\">bias_initializer</span><span class=\"o\">=</span><span class=\"s1\">&#39;ones&#39;</span><span class=\"p\">))</span>\n",
430+
" <span class=\"n\">model</span><span class=\"o\">.</span><span class=\"n\">add</span><span class=\"p\">(</span><span class=\"n\">Dense</span><span class=\"p\">(</span><span class=\"n\">encoding_size</span><span class=\"p\">,</span> <span class=\"n\">input_dim</span><span class=\"o\">=</span><span class=\"n\">input_size</span><span class=\"p\">,</span> <span class=\"n\">activation</span><span class=\"o\">=</span><span class=\"s1\">&#39;relu&#39;</span><span class=\"p\">,</span> <span class=\"n\">kernel_initializer</span><span class=\"o\">=</span><span class=\"s1\">&#39;random_uniform&#39;</span><span class=\"p\">,</span>\n",
431+
" <span class=\"n\">bias_initializer</span><span class=\"o\">=</span><span class=\"s1\">&#39;ones&#39;</span><span class=\"p\">))</span>\n",
412432
" <span class=\"n\">model</span><span class=\"o\">.</span><span class=\"n\">add</span><span class=\"p\">(</span><span class=\"n\">Dense</span><span class=\"p\">(</span><span class=\"n\">input_size</span><span class=\"p\">,</span> <span class=\"n\">activation</span><span class=\"o\">=</span><span class=\"s1\">&#39;relu&#39;</span><span class=\"p\">,</span> <span class=\"n\">kernel_initializer</span><span class=\"o\">=</span><span class=\"s1\">&#39;random_uniform&#39;</span><span class=\"p\">,</span> <span class=\"n\">bias_initializer</span><span class=\"o\">=</span><span class=\"s1\">&#39;ones&#39;</span><span class=\"p\">))</span>\n",
433+
"\n",
413434
" <span class=\"c1\"># update model with sgd</span>\n",
414435
" <span class=\"n\">sgd</span> <span class=\"o\">=</span> <span class=\"n\">optimizers</span><span class=\"o\">.</span><span class=\"n\">SGD</span><span class=\"p\">(</span><span class=\"n\">lr</span><span class=\"o\">=</span><span class=\"mf\">0.01</span><span class=\"p\">)</span>\n",
415436
" <span class=\"n\">model</span><span class=\"o\">.</span><span class=\"n\">compile</span><span class=\"p\">(</span><span class=\"n\">loss</span><span class=\"o\">=</span><span class=\"s1\">&#39;mean_squared_error&#39;</span><span class=\"p\">,</span> <span class=\"n\">optimizer</span><span class=\"o\">=</span><span class=\"n\">sgd</span><span class=\"p\">,</span> <span class=\"n\">metrics</span><span class=\"o\">=</span><span class=\"p\">[</span><span class=\"s1\">&#39;accuracy&#39;</span><span class=\"p\">])</span>\n",
@@ -431,7 +452,7 @@
431452
}
432453
],
433454
"source": [
434-
"psource(auto_encoder_learner)"
455+
"psource(AutoencoderLearner)"
435456
]
436457
},
437458
{
@@ -458,7 +479,7 @@
458479
"name": "python",
459480
"nbconvert_exporter": "python",
460481
"pygments_lexer": "ipython3",
461-
"version": "3.7.2"
482+
"version": "3.6.8"
462483
}
463484
},
464485
"nbformat": 4,

0 commit comments

Comments
 (0)