-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathevaluate.py
56 lines (44 loc) · 1.38 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#!/usr/bin/env python
import pandas as pd
from sklearn.metrics import f1_score
def _evaluate(client_payload, answer_file_path, context):
"""
client_payload : Object holding the client payload
-predicted_data_path: Path to the submitted file
answer_file_path : Path to the gold label
context: Extra params for Advanced usage
"""
"""
Load submission data and gold labels
"""
predicted_data_path = client_payload['predicted_data_path']
predicted_data = pd.read_csv(predicted_data_path)
answer_file = pd.read_csv(answer_file_path)
"""
IMPORTANT : Also do your custom validation
#TODO
"""
assert(len(predicted_data) == len(answer_file))
predictions = predicted_data["predicted_class_idx"].tolist()
answer = answer_file["correct_class_idx"].tolist()
score = f1_score(answer, predictions, average="weighted")
"""
Validation done here
"""
# raise("Example Error Message")
_result_object = {
"score" : score,
"score_secondary" : 0,
}
return _result_object
if __name__ == "__main__":
client_payload = {}
client_payload["predicted_data_path"] = "temp/sample_submission.csv"
_answer_file_path = "data/ground_truth.csv"
_context = {}
print(
_evaluate(
client_payload,
_answer_file_path,
_context)
)