|
| 1 | +import numpy as np |
| 2 | +import matplotlib.pyplot as plt |
| 3 | +import tensorflow as tf |
| 4 | + |
| 5 | +N = 100 |
| 6 | + |
| 7 | +def gen(): |
| 8 | + # Create dataset |
| 9 | + data = lambda: None |
| 10 | + data.x = np.linspace(0, 3, N) |
| 11 | + data.y = np.exp(data.x) + 2 * np.random.rand(N) |
| 12 | + data.x = np.reshape(data.x, (-1, 1)) |
| 13 | + data.y = np.reshape(data.y, (-1, 1)) |
| 14 | + return data |
| 15 | + |
| 16 | +# Training Parameters |
| 17 | +learning_rate = 0.01 |
| 18 | +num_steps = 1000 |
| 19 | + |
| 20 | +# Setup Network |
| 21 | +X = tf.placeholder(tf.float32, [None, 1]) |
| 22 | +Y = tf.placeholder(tf.float32, [None, 1]) |
| 23 | + |
| 24 | +# Define Network |
| 25 | +fc1 = tf.layers.dense(X, 100, activation=tf.nn.relu, name='fc1') |
| 26 | +fc2 = tf.layers.dense(fc1, 100, activation=tf.nn.relu, name='fc2') |
| 27 | +Y_pred = tf.layers.dense(fc2, 1, name='out') |
| 28 | + |
| 29 | +# Define loss and optimizer |
| 30 | +loss = tf.square(Y_pred - Y) |
| 31 | +optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) |
| 32 | + |
| 33 | +# Initalize varibles, and run network |
| 34 | +init = tf.global_variables_initializer() |
| 35 | +sess = tf.Session() |
| 36 | +sess.run(init) |
| 37 | + |
| 38 | +# Train |
| 39 | +for step in range(num_steps): |
| 40 | + data = gen() |
| 41 | + sess.run(optimizer, feed_dict={X:data.x, Y:data.y}) |
| 42 | + |
| 43 | +_x = np.reshape(np.linspace(0, 3, 10 * N), (-1,1)) |
| 44 | +_y = sess.run(Y_pred, feed_dict={X:_x}) |
| 45 | + |
| 46 | +# Reshape |
| 47 | +x = np.reshape(data.x, (-1)); y = np.reshape(data.y, (-1)) |
| 48 | +_y = np.reshape(_y, (-1)); _x = np.reshape(_x, (-1)) |
| 49 | + |
| 50 | +# Plot Results |
| 51 | +plt.scatter(x, y) |
| 52 | +plt.plot(_x, _y, 'r') |
| 53 | +plt.show() |
0 commit comments