-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathabusenet_evaluator_v3.py
163 lines (108 loc) · 6.68 KB
/
abusenet_evaluator_v3.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
from __future__ import print_function
import os
import argparse
from sklearn.metrics import accuracy_score, classification_report, precision_score, confusion_matrix, average_precision_score
from inference.displacenet_single_image_inference_unified import displaceNet_inference
class AbuseNetBaseEvaluator(object):
"""Perfofmance metrics base class.
"""
def __init__(self,
hra_model_backend_name,nb_of_conv_layers_to_fine_tune,
emotic_model_a_backend_name,emotic_model_b_backend_name,emotic_model_c_backend_name,
violation_class,
main_test_dir ='/home/sandbox/Desktop/Human_Rights_Archive_DB/test',
):
self.hra_model_backend_name = hra_model_backend_name
self.nb_of_conv_layers_to_fine_tune = nb_of_conv_layers_to_fine_tune
self.emotic_model_a_backend_name = emotic_model_a_backend_name
self.emotic_model_b_backend_name = emotic_model_b_backend_name
self.emotic_model_c_backend_name = emotic_model_c_backend_name
self.main_test_dir = main_test_dir
self.total_nb_of_test_images = sum([len(files) for r, d, files in os.walk(main_test_dir)])
self.sorted_categories_names = sorted(os.listdir(main_test_dir))
self.violation_class = violation_class
self.y_true = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
def _obtain_y_pred(self,
prob_threshold=0.75):
y_pred = []
y_scores = []
predicted_class_list = []
actual_class_list = []
coverage_count = 0
for hra_class in self.sorted_categories_names:
# variable that contains the main dir alongside the selected category
tmp = os.path.join(self.main_test_dir, hra_class)
img_names = sorted(os.listdir(tmp))
for raw_img in img_names:
# variable that contains the final image to be loaded
print(' Processing [' + raw_img + ']')
final_img = os.path.join(tmp, raw_img)
preds = displaceNet_inference(img_path=final_img,
emotic_model_a_backend_name=self.emotic_model_a_backend_name,
emotic_model_b_backend_name=self.emotic_model_b_backend_name,
emotic_model_c_backend_name=self.emotic_model_c_backend_name,
hra_model_backend_name=self.hra_model_backend_name,
nb_of_fine_tuned_conv_layers=self.nb_of_conv_layers_to_fine_tune,
violation_class=self.violation_class)
preds = preds[0]
y_pred.append(int(preds[0][0]))
y_scores.append(preds[0][2])
top_1_predicted_probability = preds[0][2]
# top_1_predicted = np.argmax(preds)
top_1_predicted_label = preds[0][1]
if top_1_predicted_probability >= prob_threshold:
coverage_count += 1
# print ('`' + hra_class + '/' + raw_img + '` ===> `' +
# top_1_predicted_label + '`' + ' with ' + str(top_1_predicted_probability) + ' P')
print(' GT `' + hra_class + '`' + '` <--> ` Pred. `' +
top_1_predicted_label + '`' + ' with ' + str(top_1_predicted_probability))
print ('\n')
predicted_class_list.append(top_1_predicted_label)
actual_class_list.append(hra_class)
total_coverage_per = (coverage_count * 100) / self.total_nb_of_test_images
return y_pred, self.y_true, y_scores, total_coverage_per
if __name__ == "__main__":
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--violation_class", type=str,
help='One of `cl` or `dp`')
parser.add_argument("--hra_model_backend_name", type=str,
help='One of `VGG16`, `VGG19`, `ResNet50`, `VGG16_Places365`')
parser.add_argument("--nb_of_conv_layers", type=int, default=None,
help="Number of fine-tuned conv. layers")
parser.add_argument("--emotic_model_a_backend_name", type=str,
help='One of `VGG16`, `VGG19`, `ResNet50`')
parser.add_argument("--emotic_model_b_backend_name", type=str,
help='One of `VGG16`, `VGG19`, `ResNet50`', default=None)
parser.add_argument("--emotic_model_c_backend_name", type=str,
help='One of `VGG16`, `VGG19`, `ResNet50`', default=None)
args = parser.parse_args()
return args
args = get_args()
# server
if args.violation_class == 'cl':
main_test_dir = '/home/gkallia/git/AbuseNet/datasets/HRA-2clas-full-test/ChildLabour'
elif args.violation_class =='dp':
main_test_dir = '/home/gkallia/git/AbuseNet/datasets/HRA-2clas-full-test/DisplacedPopulations'
# ---------------------------------------------------- #
base_evaluator = AbuseNetBaseEvaluator(hra_model_backend_name=args.hra_model_backend_name,
nb_of_conv_layers_to_fine_tune=args.nb_of_conv_layers,
emotic_model_a_backend_name=args.emotic_model_a_backend_name,
emotic_model_b_backend_name=args.emotic_model_b_backend_name,
emotic_model_c_backend_name=args.emotic_model_c_backend_name,
violation_class=args.violation_class,
main_test_dir =main_test_dir,
)
y_pred, y_true, y_scores, total_coverage_per = base_evaluator._obtain_y_pred()
# print y_true
top1_acc = accuracy_score(y_true, y_pred)
AP = average_precision_score(y_true, y_scores, 'micro')
string = args.hra_model_backend_name+'-'+args.violation_class+'-'+str(args.nb_of_conv_layers)+'layer(s)-'
print('\n')
print( '============================= %s =============================' %string)
print(' Top-1 acc. => ' + str(top1_acc))
print(' Coverage => ' + str(total_coverage_per) + '%')
print(' Average Precision (AP) => ' + str(AP) + '%')