forked from cleverhans-lab/cleverhans
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmnist_tutorial_tf.py
341 lines (270 loc) · 11.2 KB
/
mnist_tutorial_tf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
"""
This tutorial shows how to generate some simple adversarial examples
and train a model using adversarial training using nothing but pure
TensorFlow.
It is very similar to mnist_tutorial_tf.py, which does the same thing
but with a dependence on keras.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from cleverhans.utils_mnist import data_mnist
from cleverhans.utils_tf import model_train, model_eval
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils import AccuracyReport
from cleverhans.model import Model
import os
FLAGS = flags.FLAGS
"""
CleverHans is intended to supply attacks and defense, not models.
Users may apply CleverHans to many different kinds of models.
In this tutorial, we show you an example of the kind of model
you might build.
"""
class MLP(Model):
"""
An example of a bare bones multilayer perceptron (MLP) class.
"""
def __init__(self, layers, input_shape):
super(MLP, self).__init__()
self.layer_names = []
self.layers = layers
self.input_shape = input_shape
if isinstance(layers[-1], Softmax):
layers[-1].name = 'probs'
layers[-2].name = 'logits'
else:
layers[-1].name = 'logits'
for i, layer in enumerate(self.layers):
if hasattr(layer, 'name'):
name = layer.name
else:
name = layer.__class__.__name__ + str(i)
self.layer_names.append(name)
layer.set_input_shape(input_shape)
input_shape = layer.get_output_shape()
def fprop(self, x, set_ref=False):
states = []
for layer in self.layers:
if set_ref:
layer.ref = x
x = layer.fprop(x)
assert x is not None
states.append(x)
states = dict(zip(self.get_layer_names(), states))
return states
class Layer(object):
def get_output_shape(self):
return self.output_shape
class Linear(Layer):
def __init__(self, num_hid):
self.num_hid = num_hid
def set_input_shape(self, input_shape):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
self.W = tf.Variable(init)
self.b = tf.Variable(np.zeros((self.num_hid,)).astype('float32'))
def fprop(self, x):
return tf.matmul(x, self.W) + self.b
class Conv2D(Layer):
def __init__(self, output_channels, kernel_shape, strides, padding):
self.__dict__.update(locals())
del self.self
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
kernel_shape = tuple(self.kernel_shape) + (input_channels,
self.output_channels)
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
init = tf.random_normal(kernel_shape, dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
axis=(0, 1, 2)))
self.kernels = tf.Variable(init)
self.b = tf.Variable(
np.zeros((self.output_channels,)).astype('float32'))
orig_input_batch_size = input_shape[0]
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
def fprop(self, x):
return tf.nn.conv2d(x, self.kernels,
(1,) + tuple(self.strides) + (1,), self.padding)
class ReLU(Layer):
def __init__(self):
pass
def set_input_shape(self, shape):
self.input_shape = shape
self.output_shape = shape
def get_output_shape(self):
return self.output_shape
def fprop(self, x):
return tf.nn.relu(x)
class Softmax(Layer):
def __init__(self):
pass
def set_input_shape(self, shape):
self.input_shape = shape
self.output_shape = shape
def fprop(self, x):
return tf.nn.softmax(x)
class Flatten(Layer):
def __init__(self):
pass
def set_input_shape(self, shape):
self.input_shape = shape
output_width = 1
for factor in shape[1:]:
output_width *= factor
self.output_width = output_width
self.output_shape = [None, output_width]
def fprop(self, x):
return tf.reshape(x, [-1, self.output_width])
def make_basic_cnn(nb_filters=64, nb_classes=10,
input_shape=(None, 28, 28, 1)):
layers = [Conv2D(nb_filters, (8, 8), (2, 2), "SAME"),
ReLU(),
Conv2D(nb_filters * 2, (6, 6), (2, 2), "VALID"),
ReLU(),
Conv2D(nb_filters * 2, (5, 5), (1, 1), "VALID"),
ReLU(),
Flatten(),
Linear(nb_classes),
Softmax()]
model = MLP(layers, input_shape)
return model
def mnist_tutorial(train_start=0, train_end=60000, test_start=0,
test_end=10000, nb_epochs=6, batch_size=128,
learning_rate=0.001,
clean_train=True,
testing=False):
"""
MNIST cleverhans tutorial
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param nb_epochs: number of epochs to train model
:param batch_size: size of training batches
:param learning_rate: learning rate for training
:param testing: if true, complete an AccuracyReport for unit tests
to verify that performance is adequate
:param clean_train: if true, train on clean examples
:return: an AccuracyReport object
"""
# Object used to keep track of (and return) key accuracies
report = AccuracyReport()
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Create TF session
sess = tf.Session()
# Get MNIST test data
X_train, Y_train, X_test, Y_test = data_mnist(train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end)
# Use label smoothing
assert Y_train.shape[1] == 10
label_smooth = .1
Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
y = tf.placeholder(tf.float32, shape=(None, 10))
model_path = "models/mnist"
# Train an MNIST model
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
fgsm_params = {'eps': 0.3}
if clean_train:
model = make_basic_cnn()
preds = model.get_probs(x)
def evaluate():
# Evaluate the accuracy of the MNIST model on legitimate test
# examples
eval_params = {'batch_size': batch_size}
acc = model_eval(
sess, x, y, preds, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
model_train(sess, x, y, preds, X_train, Y_train, evaluate=evaluate,
args=train_params)
# Calculate training error
if testing:
eval_params = {'batch_size': batch_size}
acc = model_eval(
sess, x, y, preds, X_train, Y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_probs(adv_x)
# Evaluate the accuracy of the MNIST model on adversarial examples
eval_par = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds_adv, X_test, Y_test, args=eval_par)
print('Test accuracy on adversarial examples: %0.4f\n' % acc)
report.clean_train_adv_eval = acc
# Calculate training error
if testing:
eval_par = {'batch_size': batch_size}
acc = model_eval(sess, x, y, preds_adv, X_train,
Y_train, args=eval_par)
report.train_clean_train_adv_eval = acc
print("Repeating the process, using adversarial training")
# Redefine TF model graph
model_2 = make_basic_cnn()
preds_2 = model_2(x)
fgsm2 = FastGradientMethod(model_2, sess=sess)
preds_2_adv = model_2(fgsm2.generate(x, **fgsm_params))
def evaluate_2():
# Accuracy of adversarially trained model on legitimate test inputs
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds_2, X_test, Y_test,
args=eval_params)
print('Test accuracy on legitimate examples: %0.4f' % accuracy)
report.adv_train_clean_eval = accuracy
# Accuracy of the adversarially trained model on adversarial examples
accuracy = model_eval(sess, x, y, preds_2_adv, X_test,
Y_test, args=eval_params)
print('Test accuracy on adversarial examples: %0.4f' % accuracy)
report.adv_train_adv_eval = accuracy
# Perform and evaluate adversarial training
model_train(sess, x, y, preds_2, X_train, Y_train,
predictions_adv=preds_2_adv, evaluate=evaluate_2,
args=train_params)
# Calculate training errors
if testing:
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, preds_2, X_train, Y_train,
args=eval_params)
report.train_adv_train_clean_eval = accuracy
accuracy = model_eval(sess, x, y, preds_2_adv, X_train,
Y_train, args=eval_params)
report.train_adv_train_adv_eval = accuracy
return report
def main(argv=None):
mnist_tutorial(nb_epochs=FLAGS.nb_epochs, batch_size=FLAGS.batch_size,
learning_rate=FLAGS.learning_rate,
clean_train=FLAGS.clean_train)
if __name__ == '__main__':
flags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')
flags.DEFINE_integer('batch_size', 128, 'Size of training batches')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate for training')
flags.DEFINE_bool('clean_train', True, 'Train on clean examples')
app.run()