-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdataClassifier.py
352 lines (297 loc) · 13.6 KB
/
dataClassifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
# dataClassifier.py
# -----------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
# This file contains feature extraction methods and harness
# code for data classification
import mostFrequent
import naiveBayes
import perceptron
import mira
import samples
import sys
import util
TEST_SET_SIZE = 100
DIGIT_DATUM_WIDTH=28
DIGIT_DATUM_HEIGHT=28
FACE_DATUM_WIDTH=60
FACE_DATUM_HEIGHT=70
def basicFeatureExtractorDigit(datum):
"""
Returns a set of pixel features indicating whether
each pixel in the provided datum is white (0) or gray/black (1)
"""
a = datum.getPixels()
features = util.Counter()
for x in range(DIGIT_DATUM_WIDTH):
for y in range(DIGIT_DATUM_HEIGHT):
if datum.getPixel(x, y) > 0:
features[(x,y)] = 1
else:
features[(x,y)] = 0
return features
def basicFeatureExtractorFace(datum):
"""
Returns a set of pixel features indicating whether
each pixel in the provided datum is an edge (1) or no edge (0)
"""
a = datum.getPixels()
features = util.Counter()
for x in range(FACE_DATUM_WIDTH):
for y in range(FACE_DATUM_HEIGHT):
if datum.getPixel(x, y) > 0:
features[(x,y)] = 1
else:
features[(x,y)] = 0
return features
def enhancedFeatureExtractorDigit(datum):
"""
Your feature extraction playground.
You should return a util.Counter() of features
for this datum (datum is of type samples.Datum).
## DESCRIBE YOUR ENHANCED FEATURES HERE...
##
"""
features = basicFeatureExtractorDigit(datum)
"*** YOUR CODE HERE ***"
return features
def contestFeatureExtractorDigit(datum):
"""
Specify features to use for the minicontest
"""
features = basicFeatureExtractorDigit(datum)
return features
def enhancedFeatureExtractorFace(datum):
"""
Your feature extraction playground for faces.
It is your choice to modify this.
"""
features = basicFeatureExtractorFace(datum)
return features
def analysis(classifier, guesses, testLabels, testData, rawTestData, printImage):
"""
This function is called after learning.
Include any code that you want here to help you analyze your results.
Use the printImage(<list of pixels>) function to visualize features.
An example of use has been given to you.
- classifier is the trained classifier
- guesses is the list of labels predicted by your classifier on the test set
- testLabels is the list of true labels
- testData is the list of training datapoints (as util.Counter of features)
- rawTestData is the list of training datapoints (as samples.Datum)
- printImage is a method to visualize the features
(see its use in the odds ratio part in runClassifier method)
This code won't be evaluated. It is for your own optional use
(and you can modify the signature if you want).
"""
# Put any code here...
# Example of use:
for i in range(len(guesses)):
prediction = guesses[i]
truth = testLabels[i]
if (prediction != truth):
print "==================================="
print "Mistake on example %d" % i
print "Predicted %d; truth is %d" % (prediction, truth)
print "Image: "
print rawTestData[i]
break
## =====================
## You don't have to modify any code below.
## =====================
class ImagePrinter:
def __init__(self, width, height):
self.width = width
self.height = height
def printImage(self, pixels):
"""
Prints a Datum object that contains all pixels in the
provided list of pixels. This will serve as a helper function
to the analysis function you write.
Pixels should take the form
[(2,2), (2, 3), ...]
where each tuple represents a pixel.
"""
image = samples.Datum(None,self.width,self.height)
for pix in pixels:
try:
# This is so that new features that you could define which
# which are not of the form of (x,y) will not break
# this image printer...
x,y = pix
image.pixels[x][y] = 2
except:
print "new features:", pix
continue
print image
def default(str):
return str + ' [Default: %default]'
def readCommand( argv ):
"Processes the command used to run from the command line."
from optparse import OptionParser
parser = OptionParser(USAGE_STRING)
parser.add_option('-c', '--classifier', help=default('The type of classifier'), choices=['mostFrequent', 'nb', 'naiveBayes', 'perceptron', 'mira', 'minicontest'], default='mostFrequent')
parser.add_option('-d', '--data', help=default('Dataset to use'), choices=['digits', 'faces'], default='digits')
parser.add_option('-t', '--training', help=default('The size of the training set'), default=100, type="int")
parser.add_option('-f', '--features', help=default('Whether to use enhanced features'), default=False, action="store_true")
parser.add_option('-o', '--odds', help=default('Whether to compute odds ratios'), default=False, action="store_true")
parser.add_option('-1', '--label1', help=default("First label in an odds ratio comparison"), default=0, type="int")
parser.add_option('-2', '--label2', help=default("Second label in an odds ratio comparison"), default=1, type="int")
parser.add_option('-w', '--weights', help=default('Whether to print weights'), default=False, action="store_true")
parser.add_option('-k', '--smoothing', help=default("Smoothing parameter (ignored when using --autotune)"), type="float", default=2.0)
parser.add_option('-a', '--autotune', help=default("Whether to automatically tune hyperparameters"), default=False, action="store_true")
parser.add_option('-i', '--iterations', help=default("Maximum iterations to run training"), default=3, type="int")
parser.add_option('-s', '--test', help=default("Amount of test data to use"), default=TEST_SET_SIZE, type="int")
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0: raise Exception('Command line input not understood: ' + str(otherjunk))
args = {}
# Set up variables according to the command line input.
print "Doing classification"
print "--------------------"
print "data:\t\t" + options.data
print "classifier:\t\t" + options.classifier
if not options.classifier == 'minicontest':
print "using enhanced features?:\t" + str(options.features)
else:
print "using minicontest feature extractor"
print "training set size:\t" + str(options.training)
if(options.data=="digits"):
printImage = ImagePrinter(DIGIT_DATUM_WIDTH, DIGIT_DATUM_HEIGHT).printImage
if (options.features):
featureFunction = enhancedFeatureExtractorDigit
else:
featureFunction = basicFeatureExtractorDigit
if (options.classifier == 'minicontest'):
featureFunction = contestFeatureExtractorDigit
elif(options.data=="faces"):
printImage = ImagePrinter(FACE_DATUM_WIDTH, FACE_DATUM_HEIGHT).printImage
if (options.features):
featureFunction = enhancedFeatureExtractorFace
else:
featureFunction = basicFeatureExtractorFace
else:
print "Unknown dataset", options.data
print USAGE_STRING
sys.exit(2)
if(options.data=="digits"):
legalLabels = range(10)
else:
legalLabels = range(2)
if options.training <= 0:
print "Training set size should be a positive integer (you provided: %d)" % options.training
print USAGE_STRING
sys.exit(2)
if options.smoothing <= 0:
print "Please provide a positive number for smoothing (you provided: %f)" % options.smoothing
print USAGE_STRING
sys.exit(2)
if options.odds:
if options.label1 not in legalLabels or options.label2 not in legalLabels:
print "Didn't provide a legal labels for the odds ratio: (%d,%d)" % (options.label1, options.label2)
print USAGE_STRING
sys.exit(2)
if(options.classifier == "mostFrequent"):
classifier = mostFrequent.MostFrequentClassifier(legalLabels)
elif(options.classifier == "naiveBayes" or options.classifier == "nb"):
classifier = naiveBayes.NaiveBayesClassifier(legalLabels)
classifier.setSmoothing(options.smoothing)
if (options.autotune):
print "using automatic tuning for naivebayes"
classifier.automaticTuning = True
else:
print "using smoothing parameter k=%f for naivebayes" % options.smoothing
elif(options.classifier == "perceptron"):
classifier = perceptron.PerceptronClassifier(legalLabels,options.iterations)
elif(options.classifier == "mira"):
classifier = mira.MiraClassifier(legalLabels, options.iterations)
if (options.autotune):
print "using automatic tuning for MIRA"
classifier.automaticTuning = True
else:
print "using default C=0.001 for MIRA"
elif(options.classifier == 'minicontest'):
import minicontest
classifier = minicontest.contestClassifier(legalLabels)
else:
print "Unknown classifier:", options.classifier
print USAGE_STRING
sys.exit(2)
args['classifier'] = classifier
args['featureFunction'] = featureFunction
args['printImage'] = printImage
return args, options
USAGE_STRING = """
USAGE: python dataClassifier.py <options>
EXAMPLES: (1) python dataClassifier.py
- trains the default mostFrequent classifier on the digit dataset
using the default 100 training examples and
then test the classifier on test data
(2) python dataClassifier.py -c naiveBayes -d digits -t 1000 -f -o -1 3 -2 6 -k 2.5
- would run the naive Bayes classifier on 1000 training examples
using the enhancedFeatureExtractorDigits function to get the features
on the faces dataset, would use the smoothing parameter equals to 2.5, would
test the classifier on the test data and performs an odd ratio analysis
with label1=3 vs. label2=6
"""
# Main harness code
def runClassifier(args, options):
featureFunction = args['featureFunction']
classifier = args['classifier']
printImage = args['printImage']
# Load data
numTraining = options.training
numTest = options.test
if(options.data=="faces"):
rawTrainingData = samples.loadDataFile("facedata/facedatatrain", numTraining,FACE_DATUM_WIDTH,FACE_DATUM_HEIGHT)
trainingLabels = samples.loadLabelsFile("facedata/facedatatrainlabels", numTraining)
rawValidationData = samples.loadDataFile("facedata/facedatatrain", numTest,FACE_DATUM_WIDTH,FACE_DATUM_HEIGHT)
validationLabels = samples.loadLabelsFile("facedata/facedatatrainlabels", numTest)
rawTestData = samples.loadDataFile("facedata/facedatatest", numTest,FACE_DATUM_WIDTH,FACE_DATUM_HEIGHT)
testLabels = samples.loadLabelsFile("facedata/facedatatestlabels", numTest)
else:
rawTrainingData = samples.loadDataFile("digitdata/trainingimages", numTraining,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)
trainingLabels = samples.loadLabelsFile("digitdata/traininglabels", numTraining)
rawValidationData = samples.loadDataFile("digitdata/validationimages", numTest,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)
validationLabels = samples.loadLabelsFile("digitdata/validationlabels", numTest)
rawTestData = samples.loadDataFile("digitdata/testimages", numTest,DIGIT_DATUM_WIDTH,DIGIT_DATUM_HEIGHT)
testLabels = samples.loadLabelsFile("digitdata/testlabels", numTest)
# Extract features
print "Extracting features..."
trainingData = map(featureFunction, rawTrainingData)
validationData = map(featureFunction, rawValidationData)
testData = map(featureFunction, rawTestData)
# Conduct training and testing
print "Training..."
classifier.train(trainingData, trainingLabels, validationData, validationLabels)
print "Validating..."
guesses = classifier.classify(validationData)
correct = [guesses[i] == validationLabels[i] for i in range(len(validationLabels))].count(True)
print str(correct), ("correct out of " + str(len(validationLabels)) + " (%.1f%%).") % (100.0 * correct / len(validationLabels))
print "Testing..."
guesses = classifier.classify(testData)
correct = [guesses[i] == testLabels[i] for i in range(len(testLabels))].count(True)
print str(correct), ("correct out of " + str(len(testLabels)) + " (%.1f%%).") % (100.0 * correct / len(testLabels))
analysis(classifier, guesses, testLabels, testData, rawTestData, printImage)
# do odds ratio computation if specified at command line
if((options.odds) & (options.classifier == "naiveBayes" or (options.classifier == "nb")) ):
label1, label2 = options.label1, options.label2
features_odds = classifier.findHighOddsFeatures(label1,label2)
if(options.classifier == "naiveBayes" or options.classifier == "nb"):
string3 = "=== Features with highest odd ratio of label %d over label %d ===" % (label1, label2)
else:
string3 = "=== Features for which weight(label %d)-weight(label %d) is biggest ===" % (label1, label2)
print string3
printImage(features_odds)
if((options.weights) & (options.classifier == "perceptron")):
for l in classifier.legalLabels:
features_weights = classifier.findHighWeightFeatures(l)
print ("=== Features with high weight for label %d ==="%l)
printImage(features_weights)
if __name__ == '__main__':
# Read input
args, options = readCommand( sys.argv[1:] )
# Run classifier
runClassifier(args, options)