Skip to content

Commit 6510aec

Browse files
authored
Merge branch 'master' into Jingqing-patch
2 parents 968aff9 + 42516c5 commit 6510aec

File tree

6 files changed

+10
-68
lines changed

6 files changed

+10
-68
lines changed

docs/user/get_start_model.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ Static model
2323
nn = Dropout(keep=0.8)(nn)
2424
nn = Dense(n_units=800, act=tf.nn.relu)(nn)
2525
nn = Dropout(keep=0.8)(nn)
26-
nn = Dense(n_units=10, act=tf.nn.relu)(nn)
26+
nn = Dense(n_units=10, act=None)(nn)
2727
M = Model(inputs=ni, outputs=nn, name="mlp") # “name" is optional
2828
return M
2929
@@ -49,7 +49,7 @@ In this case, you need to manually input the output shape of the previous layer
4949
self.dropout2 = Dropout(keep=0.8)
5050
self.dense2 = Dense(n_units=800, act=tf.nn.relu, in_channels=800)
5151
self.dropout3 = Dropout(keep=0.8)
52-
self.dense3 = Dense(n_units=10, act=tf.nn.relu, in_channels=800)
52+
self.dense3 = Dense(n_units=10, act=None, in_channels=800)
5353
5454
def forward(self, x, foo=False):
5555
z = self.dropout1(x)
@@ -156,7 +156,7 @@ Print model information
156156
# (dropout_1): Dropout(keep=0.8, name='dropout_1')
157157
# (dense_1): Dense(n_units=800, relu, in_channels='800', name='dense_1')
158158
# (dropout_2): Dropout(keep=0.8, name='dropout_2')
159-
# (dense_2): Dense(n_units=10, relu, in_channels='800', name='dense_2')
159+
# (dense_2): Dense(n_units=10, None, in_channels='800', name='dense_2')
160160
# )
161161
162162
import pprint
@@ -195,7 +195,7 @@ Print model information
195195
# 'name': 'dropout_3'},
196196
# 'class': 'Dropout',
197197
# 'prev_layer': ['dense_2_node_0']},
198-
# {'args': {'act': 'relu',
198+
# {'args': {'act': None,
199199
# 'layer_type': 'normal',
200200
# 'n_units': 10,
201201
# 'name': 'dense_3'},

examples/basic_tutorials/tutorial_cifar10_cnn_static.py

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
11
#!/usr/bin/env python3
22
# -*- coding: utf-8 -*-
33

4-
import multiprocessing
54
import time
6-
75
import numpy as np
8-
6+
import multiprocessing
97
import tensorflow as tf
108
import tensorlayer as tl
119
from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Flatten, Input, LocalResponseNorm, MaxPool2d)
@@ -80,14 +78,11 @@ def get_model_batchnorm(inputs_shape):
8078
print_freq = 5
8179
n_step_epoch = int(len(y_train) / batch_size)
8280
n_step = n_epoch * n_step_epoch
83-
shuffle_buffer_size = 128 # 100
84-
# init_learning_rate = 0.1
85-
# learning_rate_decay_factor = 0.1
86-
# num_epoch_decay = 350
81+
shuffle_buffer_size = 128
8782

8883
train_weights = net.trainable_weights
89-
# learning_rate = tf.Variable(init_learning_rate)
9084
optimizer = tf.optimizers.Adam(learning_rate)
85+
# looking for decay learning rate? see https://github.com/tensorlayer/srgan/blob/master/train.py
9186

9287

9388
def generator_train():
@@ -182,14 +177,10 @@ def _map_fn_test(img, target):
182177

183178
# use training and evaluation sets to evaluate the model every print_freq epoch
184179
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
185-
186180
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
187-
188181
print(" train loss: {}".format(train_loss / n_iter))
189182
print(" train acc: {}".format(train_acc / n_iter))
190-
191183
net.eval()
192-
193184
val_loss, val_acc, n_iter = 0, 0, 0
194185
for X_batch, y_batch in test_ds:
195186
_logits = net(X_batch) # is_train=False, disable dropout
@@ -199,10 +190,6 @@ def _map_fn_test(img, target):
199190
print(" val loss: {}".format(val_loss / n_iter))
200191
print(" val acc: {}".format(val_acc / n_iter))
201192

202-
# FIXME : how to apply lr decay in eager mode?
203-
# learning_rate.assign(tf.train.exponential_decay(init_learning_rate, epoch, num_epoch_decay,
204-
# learning_rate_decay_factor))
205-
206193
# use testing data to evaluate the model
207194
net.eval()
208195
test_loss, test_acc, n_iter = 0, 0, 0

examples/basic_tutorials/tutorial_mnist_mlp_dynamic.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
import time
2-
32
import numpy as np
4-
53
import tensorflow as tf
64
import tensorlayer as tl
75
from tensorlayer.layers import Dense, Dropout, Input
@@ -19,7 +17,6 @@ class CustomModel(Model):
1917

2018
def __init__(self):
2119
super(CustomModel, self).__init__()
22-
2320
self.dropout1 = Dropout(keep=0.8) #(self.innet)
2421
self.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784) #(self.dropout1)
2522
self.dropout2 = Dropout(keep=0.8) #(self.dense1)
@@ -52,27 +49,20 @@ def forward(self, x, foo=None):
5249
for epoch in range(n_epoch): ## iterate the dataset n_epoch times
5350
start_time = time.time()
5451
## iterate over the entire training set once (shuffle the data via training)
55-
5652
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
57-
5853
MLP.train() # enable dropout
59-
6054
with tf.GradientTape() as tape:
6155
## compute outputs
6256
_logits = MLP(X_batch, foo=1)
6357
## compute loss and update model
6458
_loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss')
65-
6659
grad = tape.gradient(_loss, train_weights)
6760
optimizer.apply_gradients(zip(grad, train_weights))
6861

6962
## use training and evaluation sets to evaluate the model every print_freq epoch
7063
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
71-
7264
MLP.eval() # disable dropout
73-
7465
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
75-
7666
train_loss, train_acc, n_iter = 0, 0, 0
7767
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False):
7868
_logits = MLP(X_batch, foo=1)
@@ -81,7 +71,6 @@ def forward(self, x, foo=None):
8171
n_iter += 1
8272
print(" train foo=1 loss: {}".format(train_loss / n_iter))
8373
print(" train foo=1 acc: {}".format(train_acc / n_iter))
84-
8574
val_loss, val_acc, n_iter = 0, 0, 0
8675
for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False):
8776
_logits = MLP(X_batch, foo=1) # is_train=False, disable dropout
@@ -90,7 +79,6 @@ def forward(self, x, foo=None):
9079
n_iter += 1
9180
print(" val foo=1 loss: {}".format(val_loss / n_iter))
9281
print(" val foo=1 acc: {}".format(val_acc / n_iter))
93-
9482
val_loss, val_acc, n_iter = 0, 0, 0
9583
for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False):
9684
_logits = MLP(X_batch) # is_train=False, disable dropout

examples/basic_tutorials/tutorial_mnist_mlp_dynamic_2.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
import time
2-
32
import numpy as np
4-
53
import tensorflow as tf
64
import tensorlayer as tl
75
from tensorlayer.layers import Dense, Dropout, Input, LayerList
@@ -19,17 +17,14 @@ class CustomModelHidden(Model):
1917

2018
def __init__(self):
2119
super(CustomModelHidden, self).__init__()
22-
2320
self.dropout1 = Dropout(keep=0.8) #(self.innet)
24-
2521
self.seq = LayerList(
2622
[
2723
Dense(n_units=800, act=tf.nn.relu, in_channels=784),
2824
Dropout(keep=0.8),
2925
Dense(n_units=800, act=tf.nn.relu, in_channels=800),
3026
]
3127
)
32-
3328
self.dropout3 = Dropout(keep=0.8) #(self.seq)
3429

3530
def forward(self, x):
@@ -43,7 +38,6 @@ class CustomModelOut(Model):
4338

4439
def __init__(self):
4540
super(CustomModelOut, self).__init__()
46-
4741
self.dense3 = Dense(n_units=10, act=tf.nn.relu, in_channels=800)
4842

4943
def forward(self, x, foo=None):
@@ -74,30 +68,23 @@ def forward(self, x, foo=None):
7468
for epoch in range(n_epoch): ## iterate the dataset n_epoch times
7569
start_time = time.time()
7670
## iterate over the entire training set once (shuffle the data via training)
77-
7871
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
79-
8072
MLP1.train() # enable dropout
8173
MLP2.train()
82-
8374
with tf.GradientTape() as tape:
8475
## compute outputs
8576
_hidden = MLP1(X_batch)
8677
_logits = MLP2(_hidden, foo=1)
8778
## compute loss and update model
8879
_loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss')
89-
9080
grad = tape.gradient(_loss, train_weights)
9181
optimizer.apply_gradients(zip(grad, train_weights))
9282

9383
## use training and evaluation sets to evaluate the model every print_freq epoch
9484
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
95-
9685
MLP1.eval() # disable dropout
9786
MLP2.eval()
98-
9987
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
100-
10188
train_loss, train_acc, n_iter = 0, 0, 0
10289
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False):
10390
_hidden = MLP1(X_batch)

examples/basic_tutorials/tutorial_mnist_mlp_static.py

Lines changed: 3 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
import time
2-
32
import numpy as np
4-
53
import tensorflow as tf
64
import tensorlayer as tl
75
from tensorlayer.layers import Dense, Dropout, Input
@@ -21,7 +19,7 @@
2119
def get_model(inputs_shape):
2220
ni = Input(inputs_shape)
2321
nn = Dropout(keep=0.8)(ni)
24-
nn = Dense(n_units=800, act=tf.nn.relu)(nn)
22+
nn = Dense(n_units=800, act=tf.nn.relu)(nn)
2523
nn = Dropout(keep=0.8)(nn)
2624
nn = Dense(n_units=800, act=tf.nn.relu)(nn)
2725
nn = Dropout(keep=0.8)(nn)
@@ -45,31 +43,23 @@ def get_model(inputs_shape):
4543
for epoch in range(n_epoch): ## iterate the dataset n_epoch times
4644
start_time = time.time()
4745
## iterate over the entire training set once (shuffle the data via training)
48-
4946
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
50-
5147
MLP.train() # enable dropout
52-
5348
with tf.GradientTape() as tape:
5449
## compute outputs
55-
_logits = MLP(X_batch) # alternatively, you can use MLP(x, is_train=True) and remove MLP.train()
50+
_logits = MLP(X_batch)
5651
## compute loss and update model
5752
_loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss')
58-
5953
grad = tape.gradient(_loss, train_weights)
6054
optimizer.apply_gradients(zip(grad, train_weights))
6155

6256
## use training and evaluation sets to evaluate the model every print_freq epoch
6357
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
64-
6558
MLP.eval() # disable dropout
66-
6759
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
68-
6960
train_loss, train_acc, n_iter = 0, 0, 0
7061
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False):
71-
72-
_logits = MLP(X_batch) # alternatively, you can use MLP(x, is_train=False) and remove MLP.eval()
62+
_logits = MLP(X_batch)
7363
train_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss')
7464
train_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
7565
n_iter += 1

examples/basic_tutorials/tutorial_mnist_mlp_static_2.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,5 @@
11
import time
2-
32
import numpy as np
4-
53
import tensorflow as tf
64
import tensorlayer as tl
75
from tensorlayer.layers import Dense, Dropout, Input
@@ -55,27 +53,20 @@ def get_model(inputs_shape, hmodel):
5553
for epoch in range(n_epoch): ## iterate the dataset n_epoch times
5654
start_time = time.time()
5755
## iterate over the entire training set once (shuffle the data via training)
58-
5956
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
60-
6157
MLP.train() # enable dropout
62-
6358
with tf.GradientTape() as tape:
6459
## compute outputs
6560
_logits = MLP(X_batch) # alternatively, you can use MLP(x, is_train=True) and remove MLP.train()
6661
## compute loss and update model
6762
_loss = tl.cost.cross_entropy(_logits, y_batch, name='train_loss')
68-
6963
grad = tape.gradient(_loss, train_weights)
7064
optimizer.apply_gradients(zip(grad, train_weights))
7165

7266
## use training and evaluation sets to evaluate the model every print_freq epoch
7367
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
74-
7568
MLP.eval() # disable dropout
76-
7769
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
78-
7970
train_loss, train_acc, n_iter = 0, 0, 0
8071
for X_batch, y_batch in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=False):
8172

@@ -85,7 +76,6 @@ def get_model(inputs_shape, hmodel):
8576
n_iter += 1
8677
print(" train loss: {}".format(train_loss / n_iter))
8778
print(" train acc: {}".format(train_acc / n_iter))
88-
8979
val_loss, val_acc, n_iter = 0, 0, 0
9080
for X_batch, y_batch in tl.iterate.minibatches(X_val, y_val, batch_size, shuffle=False):
9181
_logits = MLP(X_batch) # is_train=False, disable dropout

0 commit comments

Comments
 (0)