|
| 1 | +import random |
| 2 | +import matplotlib.pyplot as plt |
| 3 | +import numpy as np |
| 4 | +import pandas as pd |
| 5 | +import tensorflow as tf |
| 6 | + |
| 7 | +random.seed(0) |
| 8 | +np.random.seed(seed=0) |
| 9 | +tf.random.set_seed(seed=0) |
| 10 | + |
| 11 | +(x_train, y_train), (x_test, y_test ) = tf.keras.datasets.mnist.load_data() |
| 12 | +x_train.shape, y_train.shape, x_test.shape, y_test.shape |
| 13 | + |
| 14 | +print( |
| 15 | + f'Training Size - Inputs:{x_train.shape}, Targets:{y_train.shape}' |
| 16 | + f'\nTest Size - Inputs:{x_test.shape}, Targets:{y_test.shape}' |
| 17 | +) |
| 18 | + |
| 19 | +rows = 5 |
| 20 | +digits_per_row = 5 |
| 21 | + |
| 22 | +fig, axes = plt.subplots(nrows=rows, ncols=digits_per_row, figsize=(6, 6)) |
| 23 | +axes = axes.flatten() |
| 24 | + |
| 25 | +# Selecting random ids from 0 to 60000 |
| 26 | +total_digits = rows*digits_per_row |
| 27 | +random_ids= np.random.choice(x_train.shape[0], total_digits, replace=False) |
| 28 | + |
| 29 | +# Plotting the selected digits. |
| 30 | +for i, ax in enumerate(axes): |
| 31 | + idx = random_ids[i] |
| 32 | + ax.imshow(x_train[idx], cmap='gray') |
| 33 | + ax.set_title(f'Class: {y_train[idx]}') |
| 34 | + ax.axis('off') |
| 35 | +plt.tight_layout() |
| 36 | +plt.show() |
| 37 | + |
| 38 | +x_train = x_train.reshape((60000, 784)) |
| 39 | +y_train = tf.one_hot(y_train, depth=10) |
| 40 | +x_test = x_test.reshape((10000, 784)) |
| 41 | +y_test = tf.one_hot(y_test, depth=10) |
| 42 | + |
| 43 | +print( |
| 44 | + f'Training Size - Inputs:{x_train.shape}, Targets:{y_train.shape}' |
| 45 | + f'\nTest Size - Inputs:{x_test.shape}, Targets:{y_test.shape}' |
| 46 | +) |
| 47 | + |
| 48 | +activation = 'tanh' |
| 49 | +loss = 'categorical_crossentropy' # Do not change this loss function. |
| 50 | +metrics = ['accuracy'] |
| 51 | +learning_rate = 0.001 |
| 52 | +optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate) # Do not change this optimizer. |
| 53 | +epochs = 10 |
| 54 | + |
| 55 | +model = tf.keras.Sequential([ |
| 56 | + tf.keras.layers.Input(shape=(784,), name='input'), |
| 57 | + tf.keras.layers.Dense(units=256, activation=activation, name='hidden-1'), |
| 58 | + tf.keras.layers.Dense(units=256, activation=activation, name='hidden-2'), |
| 59 | + tf.keras.layers.Dense(units=10, activation='softmax', name='outputs') # Do not change this activation function. |
| 60 | +]) |
| 61 | +model.summary(expand_nested=True) |
| 62 | + |
| 63 | +model.compile(optimizer=optimizer, loss=loss, metrics=metrics) |
| 64 | +history = model.fit( |
| 65 | + x=x_train, |
| 66 | + y=y_train, |
| 67 | + epochs=epochs, |
| 68 | + validation_data=(x_test, y_test) |
| 69 | +) |
| 70 | + |
| 71 | +train_loss = history.history['loss'] |
| 72 | +val_loss = history.history['val_loss'] |
| 73 | +train_acc = history.history['accuracy'] |
| 74 | +val_acc = history.history['val_accuracy'] |
| 75 | + |
| 76 | +plt.plot(train_loss, label='Train Loss') |
| 77 | +plt.plot(val_loss, label='Validation Loss') |
| 78 | +plt.title('Neural Network Loss per epoch') |
| 79 | +plt.ylabel('Categorical Cross-Entropy') |
| 80 | +plt.xlabel('Epochs') |
| 81 | +plt.xlim(0, epochs) |
| 82 | +plt.ylim(0, 1) |
| 83 | +plt.legend() |
| 84 | +plt.show() |
| 85 | + |
| 86 | +plt.plot(train_acc, label='Train Accuracy') |
| 87 | +plt.plot(val_acc, label='Validation Accuracy') |
| 88 | +plt.title('Neural Network Accuracy per epoch') |
| 89 | +plt.ylabel('Accuracy') |
| 90 | +plt.xlabel('Epochs') |
| 91 | +plt.xlim(0, epochs) |
| 92 | +plt.ylim(0, 1) |
| 93 | +plt.legend() |
| 94 | +plt.show() |
| 95 | + |
0 commit comments