-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsrcnn.py
120 lines (91 loc) · 3.29 KB
/
srcnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# -*- coding: utf-8 -*-
# Imports
import numpy as np
import os
import tensorflow as tf
import scipy.misc as ms
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
def cnn_model_fn(features):
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features, [-1, 33, 33, 1]) # Input is 33x33 Grayscale Image
# Convolutional Layer #1 Low Dimensional Feature
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=64,
kernel_size=[9, 9],
padding="valid",
activation=tf.nn.relu)
# Convolutional Layer #2 Non-Linear Mapping (ReLU)
conv2 = tf.layers.conv2d(
inputs=conv1,
filters=32,
kernel_size=[1, 1],
padding="valid",
activation=tf.nn.relu)
# Convolutional Layer #3 High Dimensional Feature
conv3 = tf.layers.conv2d(
inputs=conv2,
filters=1,
kernel_size=[5, 5],
padding="valid")
return conv3
def get_batch(X, y, batch_size): # Exracts Batches In SGD
for i in np.arange(0, X.shape[0], batch_size):
yield(X[i:i+batch_size,:,:],y[i:i+batch_size,:,:])
# Our Input Values
x = tf.placeholder(tf.float32, shape = [None, 33, 33])
y = tf.placeholder(tf.float32, shape = [None, 21, 21])
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # To suprress the Tf Warning
# Load training and eval data
train_data = np.load("/tmp/SRCNN/data.npy") # Returns np.array
train_labels = np.load("/tmp/SRCNN/label.npy")
# Uncomment the code for Loading Eval Data
'''
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
'''
max_epochs = 20
num_examples = train_data.shape[0]
prediction = cnn_model_fn(x) # Outputs an 21x21xBatch_size Image
#print (prediction.shape)
# MSE Error Cost Function with ADAM Optimizer
loss = tf.reduce_sum(tf.square(prediction[:,:,:,0]-y))
optimizer = tf.train.AdamOptimizer().minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(max_epochs):
epoch_loss = 0
count = 0
print ()
print (epoch+1 , ': ........ ')
for (epoch_x , epoch_y) in get_batch(train_data, train_labels, 50):
_, c = sess.run([optimizer, loss], feed_dict={x: epoch_x, y: epoch_y})
count = count + 1
epoch_loss += (c)/(21.0*21.0)
epoch_loss = (epoch_loss*1.0)/count
print ('Epoch', epoch+1, 'completed out of ', max_epochs, 'loss', epoch_loss)
print ()
#prediction = cnn_model_fn(tf.cast(train_data[101,:,:],dtype=tf.float32))
#ms.imshow(prediction[:,:,0])
#ms.imshow(train_labels[101,:,:])
sr = prediction.eval({x:train_data[101:103,:,:]})
#ms.imshow(train_labels[101,:,:])
#ms.imshow(sr[0,:,:,0])
#print (sr[0,:,:,0], train_labels[101,:,:])
mse = tf.reduce_sum(tf.square(sr[0,:,:,0]-train_labels[101,:,:]))
mse = mse /( 21.0 * 21.0 )
mse_val = sess.run(mse)
print ('MSE: ', mse_val)
psnr = 20*np.log10(255.0) - 10*np.log10(mse_val)
print ('psnr value: ', psnr)
"""
prediction = cnn_model_fn(train_data[101,:,:])
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accu = 0.0
#accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
for (epoch_x , epoch_y) in get_batch(eval_data, onehot_labels2, 100):
accu += correct.eval({x:epoch_x, y:epoch_y})
accuracy = np.sum(accu)/float(eval_data.shape[0])
print('Accuracy :' , accuracy, '%')
"""