|
| 1 | +import os |
| 2 | +import numpy as np |
| 3 | +from sklearn.neural_network import MLPClassifier |
| 4 | +from sklearn.metrics import classification_report,f1_score |
| 5 | +from sklearn.model_selection import train_test_split |
| 6 | +import matplotlib.pyplot as plt |
| 7 | +import time |
| 8 | + |
| 9 | +################## |
| 10 | +from sklearn.ensemble import RandomForestClassifier |
| 11 | +################### |
| 12 | + |
| 13 | +#################################### Import Data ########################################## |
| 14 | +def import_data(file): |
| 15 | + file_dir = os.path.dirname(__file__) |
| 16 | + file_path= os.path.join(file_dir,'Compressed_data/'+file) ### Make sure the dataset is in the correct folder |
| 17 | + train_data= np.loadtxt(file_path, dtype= float , delimiter= ',', skiprows= 1) |
| 18 | + return train_data |
| 19 | + |
| 20 | +############################################## Neural Network Implementation #################################################### |
| 21 | +def NeuralNetworkTrain(X_train,y_train,X_test,y_test): |
| 22 | + train_scores = np.empty(0) |
| 23 | + test_scores = np.empty(0) |
| 24 | + indices = np.empty(0) |
| 25 | + for i in 5, 10, 15: #### We are taking only one hidden layer, try with different number of layers |
| 26 | + print("hidden layer: ",i,"\n") |
| 27 | + mlp = MLPClassifier(hidden_layer_sizes=(i,i,i),early_stopping=True,learning_rate='adaptive',learning_rate_init=0.003) |
| 28 | + mlp.fit(X_train,y_train) |
| 29 | + |
| 30 | + predictions_train = mlp.predict(X_train) |
| 31 | + print("Fitting of train data for size ",i," : \n",classification_report(y_train,predictions_train)) |
| 32 | + |
| 33 | + predictions_test = mlp.predict(X_test) |
| 34 | + print("Fitting of test data for size ",i," : \n",classification_report(y_test,predictions_test)) |
| 35 | + |
| 36 | + train_scores = np.append(train_scores, f1_score(y_train,predictions_train,average='macro')) |
| 37 | + test_scores = np.append(test_scores, f1_score(y_test,predictions_test,average='macro')) |
| 38 | + indices = np.append(indices,i) |
| 39 | + |
| 40 | + |
| 41 | + plt.plot(indices, train_scores) |
| 42 | + plt.plot(indices,test_scores) |
| 43 | + |
| 44 | + plt.legend(['Train scores','Test scores'],loc='upper left') |
| 45 | + plt.show() |
| 46 | + |
| 47 | + |
| 48 | +################################################################################################### |
| 49 | + |
| 50 | + |
| 51 | +######################################### Random Forest Implementation ############################################## |
| 52 | +def RandomForestTrain(X_train,y_train,X_test,y_test): |
| 53 | + train_scores = np.empty(0) |
| 54 | + test_scores = np.empty(0) |
| 55 | + indices = np.empty(0) |
| 56 | + rf = RandomForestClassifier(n_estimators=100) |
| 57 | + rf.fit(X_train,y_train) |
| 58 | + predictions_train = rf.predict(X_train) |
| 59 | + print("Fitting of train data : \n",classification_report(y_train,predictions_train)) |
| 60 | + |
| 61 | + predictions_test = rf.predict(X_test) |
| 62 | + print("Fitting of test data for size : \n",classification_report(y_test,predictions_test)) |
| 63 | + |
| 64 | + #train_scores = np.append(train_scores, f1_score(y_train,predictions_train,average='macro')) |
| 65 | + #test_scores = np.append(test_scores, f1_score(y_test,predictions_test,average='macro')) |
| 66 | + #indices = np.append(indices,i) |
| 67 | + |
| 68 | + ''' |
| 69 | + plt.plot(indices, train_scores) |
| 70 | + plt.plot(indices,test_scores) |
| 71 | +
|
| 72 | + plt.legend(['Train scores','Test scores'],loc='upper left') |
| 73 | + plt.show() |
| 74 | + ''' |
| 75 | + |
| 76 | +###################################################################################################### |
| 77 | + |
| 78 | + |
| 79 | +start_time = time.time() #### Tracking the execution time |
| 80 | + |
| 81 | +data = import_data('gdata.csv') #### Importing data |
| 82 | +cross_val = 1060904 #### Row number after which the data of the last user is recorded (For the purpose of crossvalidation) |
| 83 | +m = 1060904 #### Training only on data of two users, you guys can check out for 8 users (set m = 993720 for adata and m=1060904 for gdata) |
| 84 | +train_data = data[0:m,:] |
| 85 | +test_data = data[cross_val:,:] |
| 86 | + |
| 87 | +#Setting up the feature matrix and output vector |
| 88 | +parameters = len(train_data[0,:]) - 2 #### Setting up the number of parameters |
| 89 | + |
| 90 | +X_train = train_data[:,1:parameters+1] |
| 91 | +y_train = train_data[:,parameters+1] |
| 92 | + |
| 93 | +X_test = test_data[:,1:parameters+1] |
| 94 | +y_test = test_data[:,parameters+1] |
| 95 | + |
| 96 | + |
| 97 | +NeuralNetworkTrain(X_train,y_train,X_test,y_test) |
| 98 | + |
| 99 | +data = import_data('adata.csv') #### Importing data |
| 100 | +cross_val = 993720 #### Row number after which the data of the last user is recorded (For the purpose of crossvalidation) |
| 101 | +m = 993720 #### Training only on data of two users, you guys can check out for 8 users (set m = 993720 for adata and m=1060904 for gdata) |
| 102 | +train_data = data[0:m,:] |
| 103 | +test_data = data[cross_val:,:] |
| 104 | + |
| 105 | +#Setting up the feature matrix and output vector |
| 106 | +parameters = len(train_data[0,:]) - 2 #### Setting up the number of parameters |
| 107 | + |
| 108 | +X_train = train_data[:,1:parameters+1] |
| 109 | +y_train = train_data[:,parameters+1] |
| 110 | + |
| 111 | +X_test = test_data[:,1:parameters+1] |
| 112 | +y_test = test_data[:,parameters+1] |
| 113 | + |
| 114 | +NeuralNetworkTrain(X_train,y_train,X_test,y_test) |
| 115 | +print ("time elapsed: ", format(time.time() - start_time)) #### This will take 6-7 minutes if you take the entire dataset |
| 116 | +#RandomForestTrain(X_train,y_train,X_test,y_test) |
| 117 | +#print ("time elapsed: ", format(time.time() - start_time)) #### This is going to take a lot of time maybe half an hour |
| 118 | + |
| 119 | + |
0 commit comments