Skip to content
This repository was archived by the owner on Jun 5, 2022. It is now read-only.

Commit 9f93712

Browse files
Merge pull request aymericdamien#85 from normanheckscher/master
begin refactor for TF1.0
2 parents 2f0c1ba + ab15e28 commit 9f93712

22 files changed

+695
-692
lines changed

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ The following examples are coming from [TFLearn](https://github.com/tflearn/tfle
9898

9999
## Dependencies
100100
```
101-
tensorflow
101+
tensorflow 1.0alpha
102102
numpy
103103
matplotlib
104104
cuda

examples/2_BasicModels/linear_regression.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
4242

4343
# Initializing the variables
44-
init = tf.initialize_all_variables()
44+
init = tf.global_variables_initializer()
4545

4646
# Launch the graph
4747
with tf.Session() as sess:

examples/2_BasicModels/logistic_regression.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
3939

4040
# Initializing the variables
41-
init = tf.initialize_all_variables()
41+
init = tf.global_variables_initializer()
4242

4343
# Launch the graph
4444
with tf.Session() as sess:

examples/2_BasicModels/nearest_neighbor.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,14 @@
2626

2727
# Nearest Neighbor calculation using L1 Distance
2828
# Calculate L1 Distance
29-
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.neg(xte))), reduction_indices=1)
29+
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), reduction_indices=1)
3030
# Prediction: Get min distance index (Nearest neighbor)
3131
pred = tf.arg_min(distance, 0)
3232

3333
accuracy = 0.
3434

3535
# Initializing the variables
36-
init = tf.initialize_all_variables()
36+
init = tf.global_variables_initializer()
3737

3838
# Launch the graph
3939
with tf.Session() as sess:

examples/3_NeuralNetworks/autoencoder.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
# Import MNIST data
1919
from tensorflow.examples.tutorials.mnist import input_data
20-
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
20+
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
2121

2222
# Parameters
2323
learning_rate = 0.01
@@ -83,7 +83,7 @@ def decoder(x):
8383
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
8484

8585
# Initializing the variables
86-
init = tf.initialize_all_variables()
86+
init = tf.global_variables_initializer()
8787

8888
# Launch the graph
8989
with tf.Session() as sess:

examples/3_NeuralNetworks/bidirectional_rnn.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from __future__ import print_function
1111

1212
import tensorflow as tf
13-
from tensorflow.python.ops import rnn, rnn_cell
13+
from tensorflow.contrib import rnn
1414
import numpy as np
1515

1616
# Import MNIST data
@@ -60,20 +60,20 @@ def BiRNN(x, weights, biases):
6060
# Reshape to (n_steps*batch_size, n_input)
6161
x = tf.reshape(x, [-1, n_input])
6262
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
63-
x = tf.split(0, n_steps, x)
63+
x = tf.split(x, n_steps, 0)
6464

6565
# Define lstm cells with tensorflow
6666
# Forward direction cell
67-
lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
67+
lstm_fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
6868
# Backward direction cell
69-
lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
69+
lstm_bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
7070

7171
# Get lstm cell output
7272
try:
73-
outputs, _, _ = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
73+
outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
7474
dtype=tf.float32)
7575
except Exception: # Old TensorFlow version only returns outputs not states
76-
outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
76+
outputs = rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x,
7777
dtype=tf.float32)
7878

7979
# Linear activation, using rnn inner loop last output
@@ -82,15 +82,15 @@ def BiRNN(x, weights, biases):
8282
pred = BiRNN(x, weights, biases)
8383

8484
# Define loss and optimizer
85-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
85+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
8686
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
8787

8888
# Evaluate model
8989
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
9090
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
9191

9292
# Initializing the variables
93-
init = tf.initialize_all_variables()
93+
init = tf.global_variables_initializer()
9494

9595
# Launch the graph
9696
with tf.Session() as sess:

examples/3_NeuralNetworks/convolutional_network.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -96,15 +96,15 @@ def conv_net(x, weights, biases, dropout):
9696
pred = conv_net(x, weights, biases, keep_prob)
9797

9898
# Define loss and optimizer
99-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
99+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
100100
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
101101

102102
# Evaluate model
103103
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
104104
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
105105

106106
# Initializing the variables
107-
init = tf.initialize_all_variables()
107+
init = tf.global_variables_initializer()
108108

109109
# Launch the graph
110110
with tf.Session() as sess:

examples/3_NeuralNetworks/multilayer_perceptron.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -60,11 +60,11 @@ def multilayer_perceptron(x, weights, biases):
6060
pred = multilayer_perceptron(x, weights, biases)
6161

6262
# Define loss and optimizer
63-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
63+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
6464
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
6565

6666
# Initializing the variables
67-
init = tf.initialize_all_variables()
67+
init = tf.global_variables_initializer()
6868

6969
# Launch the graph
7070
with tf.Session() as sess:

examples/3_NeuralNetworks/recurrent_network.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from __future__ import print_function
1111

1212
import tensorflow as tf
13-
from tensorflow.python.ops import rnn, rnn_cell
13+
from tensorflow.contrib import rnn
1414

1515
# Import MNIST data
1616
from tensorflow.examples.tutorials.mnist import input_data
@@ -58,29 +58,29 @@ def RNN(x, weights, biases):
5858
# Reshaping to (n_steps*batch_size, n_input)
5959
x = tf.reshape(x, [-1, n_input])
6060
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
61-
x = tf.split(0, n_steps, x)
61+
x = tf.split(x, n_steps, 0)
6262

6363
# Define a lstm cell with tensorflow
64-
lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
64+
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
6565

6666
# Get lstm cell output
67-
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
67+
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
6868

6969
# Linear activation, using rnn inner loop last output
7070
return tf.matmul(outputs[-1], weights['out']) + biases['out']
7171

7272
pred = RNN(x, weights, biases)
7373

7474
# Define loss and optimizer
75-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
75+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
7676
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
7777

7878
# Evaluate model
7979
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
8080
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
8181

8282
# Initializing the variables
83-
init = tf.initialize_all_variables()
83+
init = tf.global_variables_initializer()
8484

8585
# Launch the graph
8686
with tf.Session() as sess:

examples/4_Utils/save_restore_model.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
# Import MNIST data
1313
from tensorflow.examples.tutorials.mnist import input_data
14-
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
14+
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
1515

1616
import tensorflow as tf
1717

@@ -60,11 +60,11 @@ def multilayer_perceptron(x, weights, biases):
6060
pred = multilayer_perceptron(x, weights, biases)
6161

6262
# Define loss and optimizer
63-
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
63+
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
6464
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
6565

6666
# Initializing the variables
67-
init = tf.initialize_all_variables()
67+
init = tf.global_variables_initializer()
6868

6969
# 'Saver' op to save and restore all the variables
7070
saver = tf.train.Saver()

examples/4_Utils/tensorboard_advanced.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -41,12 +41,12 @@ def multilayer_perceptron(x, weights, biases):
4141
layer_1 = tf.add(tf.matmul(x, weights['w1']), biases['b1'])
4242
layer_1 = tf.nn.relu(layer_1)
4343
# Create a summary to visualize the first layer ReLU activation
44-
tf.histogram_summary("relu1", layer_1)
44+
tf.summary.histogram("relu1", layer_1)
4545
# Hidden layer with RELU activation
4646
layer_2 = tf.add(tf.matmul(layer_1, weights['w2']), biases['b2'])
4747
layer_2 = tf.nn.relu(layer_2)
4848
# Create another summary to visualize the second layer ReLU activation
49-
tf.histogram_summary("relu2", layer_2)
49+
tf.summary.histogram("relu2", layer_2)
5050
# Output layer
5151
out_layer = tf.add(tf.matmul(layer_2, weights['w3']), biases['b3'])
5252
return out_layer
@@ -91,24 +91,24 @@ def multilayer_perceptron(x, weights, biases):
9191
init = tf.initialize_all_variables()
9292

9393
# Create a summary to monitor cost tensor
94-
tf.scalar_summary("loss", loss)
94+
tf.summary.scalar("loss", loss)
9595
# Create a summary to monitor accuracy tensor
96-
tf.scalar_summary("accuracy", acc)
96+
tf.summary.scalar("accuracy", acc)
9797
# Create summaries to visualize weights
9898
for var in tf.trainable_variables():
99-
tf.histogram_summary(var.name, var)
99+
tf.summary.histogram(var.name, var)
100100
# Summarize all gradients
101101
for grad, var in grads:
102-
tf.histogram_summary(var.name + '/gradient', grad)
102+
tf.summary.histogram(var.name + '/gradient', grad)
103103
# Merge all summaries into a single op
104-
merged_summary_op = tf.merge_all_summaries()
104+
merged_summary_op = tf.summary.merge_all()
105105

106106
# Launch the graph
107107
with tf.Session() as sess:
108108
sess.run(init)
109109

110110
# op to write logs to Tensorboard
111-
summary_writer = tf.train.SummaryWriter(logs_path,
111+
summary_writer = tf.summary.FileWriter(logs_path,
112112
graph=tf.get_default_graph())
113113

114114
# Training cycle

examples/4_Utils/tensorboard_basic.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -52,18 +52,18 @@
5252
init = tf.initialize_all_variables()
5353

5454
# Create a summary to monitor cost tensor
55-
tf.scalar_summary("loss", cost)
55+
tf.summary.scalar("loss", cost)
5656
# Create a summary to monitor accuracy tensor
57-
tf.scalar_summary("accuracy", acc)
57+
tf.summary.scalar("accuracy", acc)
5858
# Merge all summaries into a single op
59-
merged_summary_op = tf.merge_all_summaries()
59+
merged_summary_op = tf.summary.merge_all()
6060

6161
# Launch the graph
6262
with tf.Session() as sess:
6363
sess.run(init)
6464

6565
# op to write logs to Tensorboard
66-
summary_writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
66+
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
6767

6868
# Training cycle
6969
for epoch in range(training_epochs):

0 commit comments

Comments
 (0)