|
5 | 5 | import numpy as np
|
6 | 6 | from sklearn.preprocessing import StandardScaler
|
7 | 7 | from sklearn.decomposition import PCA
|
8 |
| -from sklearn.metrics import classification_report,f1_score |
| 8 | +from sklearn.metrics import classification_report, f1_score |
9 | 9 | from sklearn.svm import LinearSVC, SVC
|
10 | 10 | from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
|
11 | 11 | from sklearn.ensemble import RandomForestClassifier
|
12 | 12 | from sklearn.datasets import make_classification
|
13 | 13 | from sklearn.linear_model import LogisticRegression
|
14 | 14 | from feat.utils import get_resource_path
|
15 | 15 | import joblib
|
16 |
| -import os |
| 16 | +import os |
17 | 17 |
|
18 | 18 |
|
19 | 19 | def load_classifier(cf_path):
|
20 | 20 | clf = joblib.load(cf_path)
|
21 |
| - return clf |
| 21 | + return clf |
| 22 | + |
22 | 23 |
|
23 | 24 | class RandomForestClassifier():
|
24 | 25 | def __init__(self) -> None:
|
25 |
| - self.pca_model = load_classifier(os.path.join(get_resource_path(),"hog_pca_all_emotio.joblib")) |
26 |
| - self.classifier = load_classifier(os.path.join(get_resource_path(), "RF_568.joblib")) |
27 |
| - self.scaler = load_classifier(os.path.join(get_resource_path(), "hog_scalar_aus.joblib")) |
| 26 | + self.pca_model = load_classifier(os.path.join( |
| 27 | + get_resource_path(), "hog_pca_all_emotio.joblib")) |
| 28 | + self.classifier = load_classifier( |
| 29 | + os.path.join(get_resource_path(), "RF_568.joblib")) |
| 30 | + self.scaler = load_classifier(os.path.join( |
| 31 | + get_resource_path(), "hog_scalar_aus.joblib")) |
28 | 32 |
|
29 | 33 | def detect_au(self, frame, landmarks):
|
30 | 34 | """
|
31 | 35 | Note that here frame is represented by hogs
|
32 | 36 | """
|
33 | 37 | if len(frame.shape) < 2:
|
34 |
| - frame = frame.reshape(1,-1) |
| 38 | + frame = frame.reshape(1, -1) |
35 | 39 | if len(landmarks.shape) > 1:
|
36 |
| - landmarks = landmarks.flatten().reshape(1,-1) |
37 |
| - |
38 |
| - pca_transformed_frame = self.pca_model.transform(self.scaler.fit_transform(frame)) |
39 |
| - feature_cbd = np.concatenate((pca_transformed_frame,landmarks),1) |
| 40 | + landmarks = landmarks.flatten().reshape(1, -1) |
| 41 | + |
| 42 | + pca_transformed_frame = self.pca_model.transform( |
| 43 | + self.scaler.fit_transform(frame)) |
| 44 | + feature_cbd = np.concatenate((pca_transformed_frame, landmarks), 1) |
40 | 45 | pred_aus = []
|
41 | 46 | for keys in self.classifier:
|
42 | 47 | au_pred = self.classifier[keys].predict_proba(feature_cbd)
|
43 |
| - au_pred = au_pred[0,1] |
| 48 | + au_pred = au_pred[0, 1] |
44 | 49 | pred_aus.append(au_pred)
|
45 | 50 |
|
46 |
| - pred_aus = np.array(pred_aus).reshape(1,-1) |
| 51 | + pred_aus = np.array(pred_aus).reshape(1, -1) |
47 | 52 | return pred_aus
|
48 | 53 |
|
49 | 54 |
|
50 | 55 | class SVMClassifier():
|
51 | 56 | def __init__(self) -> None:
|
52 |
| - self.pca_model = load_classifier(os.path.join(get_resource_path(),"hog_pca_all_emotio.joblib")) |
53 |
| - self.classifier = load_classifier(os.path.join(get_resource_path(),"svm_568.joblib")) |
54 |
| - self.scaler = load_classifier(os.path.join(get_resource_path(), "hog_scalar_aus.joblib")) |
| 57 | + self.pca_model = load_classifier(os.path.join( |
| 58 | + get_resource_path(), "hog_pca_all_emotio.joblib")) |
| 59 | + self.classifier = load_classifier( |
| 60 | + os.path.join(get_resource_path(), "svm_568.joblib")) |
| 61 | + self.scaler = load_classifier(os.path.join( |
| 62 | + get_resource_path(), "hog_scalar_aus.joblib")) |
55 | 63 |
|
56 | 64 | def detect_au(self, frame, landmarks):
|
57 | 65 | """
|
58 | 66 | Note that here frame is represented by hogs
|
59 | 67 | """
|
60 | 68 | if len(frame.shape) < 2:
|
61 |
| - frame = frame.reshape(1,-1) |
| 69 | + frame = frame.reshape(1, -1) |
62 | 70 | if len(landmarks.shape) > 1:
|
63 |
| - landmarks = landmarks.flatten().reshape(1,-1) |
64 |
| - |
65 |
| - pca_transformed_frame = self.pca_model.transform(self.scaler.fit_transform(frame)) |
66 |
| - feature_cbd = np.concatenate((pca_transformed_frame,landmarks),1) |
| 71 | + landmarks = landmarks.flatten().reshape(1, -1) |
| 72 | + |
| 73 | + pca_transformed_frame = self.pca_model.transform( |
| 74 | + self.scaler.fit_transform(frame)) |
| 75 | + feature_cbd = np.concatenate((pca_transformed_frame, landmarks), 1) |
67 | 76 | pred_aus = []
|
68 | 77 | for keys in self.classifier:
|
69 | 78 | au_pred = self.classifier[keys].predict(feature_cbd)
|
70 |
| - au_pred = au_pred[0] # probably need to delete this |
| 79 | + au_pred = au_pred[0] # probably need to delete this |
71 | 80 | pred_aus.append(au_pred)
|
72 | 81 |
|
73 |
| - pred_aus = np.array(pred_aus).reshape(1,-1) |
| 82 | + pred_aus = np.array(pred_aus).reshape(1, -1) |
74 | 83 | return pred_aus
|
75 | 84 |
|
| 85 | + |
76 | 86 | class LogisticClassifier():
|
77 |
| - |
| 87 | + |
78 | 88 | def __init__(self) -> None:
|
79 |
| - self.pca_model = load_classifier(os.path.join(get_resource_path(),"hog_pca_all_emotio.joblib")) |
80 |
| - self.classifier = load_classifier(os.path.join(get_resource_path(),"Logistic_520.joblib")) |
81 |
| - self.scaler = load_classifier(os.path.join(get_resource_path(), "hog_scalar_aus.joblib")) |
| 89 | + self.pca_model = load_classifier(os.path.join( |
| 90 | + get_resource_path(), "hog_pca_all_emotio.joblib")) |
| 91 | + self.classifier = load_classifier(os.path.join( |
| 92 | + get_resource_path(), "Logistic_520.joblib")) |
| 93 | + self.scaler = load_classifier(os.path.join( |
| 94 | + get_resource_path(), "hog_scalar_aus.joblib")) |
| 95 | + |
82 | 96 | def detect_au(self, frame, landmarks):
|
83 | 97 | """
|
84 | 98 | Note that here frame is represented by hogs
|
85 | 99 | """
|
86 | 100 | if len(frame.shape) < 2:
|
87 |
| - frame = frame.reshape(1,-1) |
| 101 | + frame = frame.reshape(1, -1) |
88 | 102 | if len(landmarks.shape) > 1:
|
89 |
| - landmarks = landmarks.flatten().reshape(1,-1) |
90 |
| - |
91 |
| - pca_transformed_frame = self.pca_model.transform(self.scaler.fit_transform(frame)) |
92 |
| - feature_cbd = np.concatenate((pca_transformed_frame,landmarks),1) |
| 103 | + landmarks = landmarks.flatten().reshape(1, -1) |
| 104 | + |
| 105 | + pca_transformed_frame = self.pca_model.transform( |
| 106 | + self.scaler.fit_transform(frame)) |
| 107 | + feature_cbd = np.concatenate((pca_transformed_frame, landmarks), 1) |
93 | 108 | pred_aus = []
|
94 | 109 | for keys in self.classifier:
|
95 | 110 | au_pred = self.classifier[keys].predict_proba(feature_cbd)
|
96 |
| - au_pred = au_pred[0,1] |
| 111 | + au_pred = au_pred[0, 1] |
97 | 112 | pred_aus.append(au_pred)
|
98 | 113 |
|
99 |
| - pred_aus = np.array(pred_aus).reshape(1,-1) |
| 114 | + pred_aus = np.array(pred_aus).reshape(1, -1) |
100 | 115 | return pred_aus
|
0 commit comments