Skip to content

Commit 1271fad

Browse files
kkweonhunkim
authored andcommitted
refactor: add a FC layer after RNN (#74)
* refactor: add a FC layer 1. Add a FC Layer - #73 2. Add a numpy doc style 3. Explicitly define an activation function in LSTMCell - default is tanh 4. Change to AdamOptimizer 5. Use `with` statement else the session must be closed explicitly 6. No linebreak for the 79 char limit * refactor: nn.tanh -> tanh
1 parent d20edbd commit 1271fad

File tree

1 file changed

+54
-25
lines changed

1 file changed

+54
-25
lines changed

lab-12-5-rnn_stock_prediction.py

+54-25
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,45 @@
1+
'''
2+
This script shows how to predict stock prices using a basic RNN
3+
'''
14
import tensorflow as tf
25
import numpy as np
36
import matplotlib.pyplot as plt
47
tf.set_random_seed(777) # reproducibility
58

69

710
def MinMaxScaler(data):
11+
''' Min Max Normalization
12+
13+
Parameters
14+
----------
15+
data : numpy.ndarray
16+
input data to be normalized
17+
shape: [Batch size, dimension]
18+
19+
Returns
20+
----------
21+
data : numpy.ndarry
22+
normalized data
23+
shape: [Batch size, dimension]
24+
25+
References
26+
----------
27+
.. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html
28+
29+
'''
830
numerator = data - np.min(data, 0)
931
denominator = np.max(data, 0) - np.min(data, 0)
1032
# noise term prevents the zero division
1133
return numerator / (denominator + 1e-7)
1234

1335

36+
# train Parameters
1437
timesteps = seq_length = 7
1538
data_dim = 5
39+
hidden_dim = 10
1640
output_dim = 1
41+
learing_rate = 0.01
42+
iterations = 500
1743

1844
# Open, High, Low, Volume, Close
1945
xy = np.loadtxt('data-02-stock_daily.csv', delimiter=',')
@@ -22,6 +48,7 @@ def MinMaxScaler(data):
2248
x = xy
2349
y = xy[:, [-1]] # Close as label
2450

51+
# build a dataset
2552
dataX = []
2653
dataY = []
2754
for i in range(0, len(y) - seq_length):
@@ -31,47 +58,49 @@ def MinMaxScaler(data):
3158
dataX.append(_x)
3259
dataY.append(_y)
3360

34-
# split to train and testing
61+
# train/test split
3562
train_size = int(len(dataY) * 0.7)
3663
test_size = len(dataY) - train_size
37-
trainX, testX = np.array(dataX[0:train_size]), np.array(
38-
dataX[train_size:len(dataX)])
39-
trainY, testY = np.array(dataY[0:train_size]), np.array(
40-
dataY[train_size:len(dataY)])
64+
trainX, testX = np.array(dataX[0:train_size]), np.array(dataX[train_size:len(dataX)])
65+
trainY, testY = np.array(dataY[0:train_size]), np.array(dataY[train_size:len(dataY)])
4166

4267
# input place holders
4368
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
4469
Y = tf.placeholder(tf.float32, [None, 1])
4570

46-
cell = tf.contrib.rnn.BasicLSTMCell(num_units=output_dim, state_is_tuple=True)
71+
# build a LSTM network
72+
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
4773
outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
48-
Y_pred = outputs[:, -1] # We use the last cell's output
49-
50-
print(outputs[:, -1])
74+
Y_pred = tf.contrib.layers.fully_connected(outputs[:, -1], output_dim, activation_fn=None) # We use the last cell's output
5175

5276
# cost/loss
5377
loss = tf.reduce_sum(tf.square(Y_pred - Y)) # sum of the squares
5478
# optimizer
55-
optimizer = tf.train.GradientDescentOptimizer(0.01)
79+
optimizer = tf.train.AdamOptimizer(learing_rate)
5680
train = optimizer.minimize(loss)
5781

5882
# RMSE
5983
targets = tf.placeholder(tf.float32, [None, 1])
6084
predictions = tf.placeholder(tf.float32, [None, 1])
6185
rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))
6286

63-
sess = tf.Session()
64-
sess.run(tf.global_variables_initializer())
65-
66-
for i in range(500):
67-
_, step_loss = sess.run([train, loss], feed_dict={X: trainX, Y: trainY})
68-
print(i, step_loss)
69-
70-
testPredict = sess.run(Y_pred, feed_dict={X: testX})
71-
print("RMSE", sess.run(rmse, feed_dict={
72-
targets: testY, predictions: testPredict}))
73-
plt.plot(testY)
74-
plt.plot(testPredict)
75-
plt.xlabel("Time Period")
76-
plt.ylabel("Stock Price")
77-
plt.show()
87+
with tf.Session() as sess:
88+
init = tf.global_variables_initializer()
89+
sess.run(init)
90+
91+
# Training step
92+
for i in range(iterations):
93+
_, step_loss = sess.run([train, loss], feed_dict={X: trainX, Y: trainY})
94+
print("[step: {}] loss: {}".format(i, step_loss))
95+
96+
# Test step
97+
test_predict = sess.run(Y_pred, feed_dict={X: testX})
98+
rmse = sess.run(rmse, feed_dict={targets: testY, predictions: test_predict})
99+
print("RMSE: {}".format(rmse))
100+
101+
# Plot predictions
102+
plt.plot(testY)
103+
plt.plot(test_predict)
104+
plt.xlabel("Time Period")
105+
plt.ylabel("Stock Price")
106+
plt.show()

0 commit comments

Comments
 (0)