Skip to content

Commit bd03390

Browse files
authored
Add files via upload
1 parent 53173df commit bd03390

14 files changed

+12204
-0
lines changed

ASR.ipynb

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
{"nbformat":4,"nbformat_minor":0,"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.6.2"},"colab":{"name":"ASR.ipynb","provenance":[]}},"cells":[{"cell_type":"markdown","metadata":{"id":"RG_RXxCQtPFh","colab_type":"text"},"source":["## Importing Libraries"]},{"cell_type":"code","metadata":{"id":"H-GB9cTUtPFk","colab_type":"code","colab":{}},"source":["#Implementation of Incremental Mathematical Stream Regression\n","from sklearn import metrics #importing datasets and metrics from the sklearn library\n","import pandas as pd\n","import numpy as np "],"execution_count":0,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"s9ZrAlY7tPF2","colab_type":"text"},"source":["## Loading Dataset"]},{"cell_type":"code","metadata":{"id":"ZyUzQptXtPF3","colab_type":"code","colab":{}},"source":["testdataframe=pd.read_csv(\"H:/AOBD/boston.csv\",header=None) #load dataset from laptop\n","testdataarray=testdataframe.values #convert the dataframe to array\n","X_test=testdataarray[400:506,:-1] #separate the last 106 datapoints from data for test dataset\n","y_test=testdataarray[400:506,-1] #separate the last 106 datapoints output variable for test dataset"],"execution_count":0,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"aJ6wMlwgtPGn","colab_type":"text"},"source":["## Python Data Generator"]},{"cell_type":"code","metadata":{"id":"iUHXHyyOtPGy","colab_type":"code","colab":{}},"source":["def myGeneratorfun(wsize=100):\n"," print(\"In myGeneratorfun:\")\n"," counter=0 #counter to stop stream data after all data is used\n"," for chunk in pd.read_csv('H:/AOBD/boston.csv', header=None, chunksize=wsize): #for loop to separate out data in chunks\n"," print(\"In for loop, counter vlaue is\", counter)\n"," chunk_array=chunk.values #Each chunk of data is equal to window size and convert it to array\n"," counter=counter+1 #increment counter by 1 know that one more chunk of data is used\n"," yield (chunk_array[:,:-1], chunk_array[:,-1]) #This yield statement returns chunk of data \n"," if counter >= 4: #the for loop will continue to send chunks of data till the if statement is not satisfied\n"," break #after all the data is used as chunks"],"execution_count":0,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"JWu_Fj-rtPI7","colab_type":"text"},"source":["## ASR Implementation"]},{"cell_type":"markdown","metadata":{"id":"q8BpBlCltPI8","colab_type":"text"},"source":["![asr.JPG](attachment:asr.JPG)"]},{"cell_type":"markdown","metadata":{"id":"lNx4QofLtPI-","colab_type":"text"},"source":["![asr1.JPG](attachment:asr1.JPG)"]},{"cell_type":"code","metadata":{"id":"WOPNQ56ltPJP","colab_type":"code","colab":{},"outputId":"3773cb2c-bae6-484a-8ee2-715c91b8f0ba"},"source":["myGenerator = myGeneratorfun(100) #Call the data Generator with window size=100\n","\n","Beta_k = np.zeros(X_test.shape[1]) #initialize Beta_k with zeros of p*1 shape\n","Beta_k_dash = np.zeros(X_test.shape[1]) #initialize Beta_k_dash with zeros of p*1 shape\n","\n","#alpha = 0.5 #smoothing factor for ASR=0.5, equal weightage for New data and Previous Data\n","t=5 #initializing t as half life \n","\n","alpha= math.exp(-((math.log(2))/t)) #calculating alpha\n","for (x,y) in myGenerator: #to call for windows one-by-one window\n"," \n"," try: \n"," XTXi=inv(np.dot(x.T,x)) #XTX for current window if invertible\n"," except LinAlgError:\n"," XTXi=pinv(np.dot(x.T,x)) #If non-invertible\n"," \n"," XTy = np.dot(x.T,y) #XTy for current window\n"," Beta_k_dash = np.dot(XTXi,(XTy)) #Calculate parameters for current window\n"," Beta_k = np.dot((1-alpha),Beta_k_dash)+np.dot(alpha,Beta_k) # ASR Formula to calculate parameters using pre-synopis\n","\n"," #print(\"Betas:\", Beta)\n"," #print(\"Betas Shape:\", Beta.shape) \n","\n"," predictions=np.dot(Beta_k,X_test.T) # Calculate Predictions on test data\n"," print(predictions.shape)\n"," \n"," # Print Mean Absolute Error And Mean Squared Error\n"," print(\"MAE:\", metrics.mean_absolute_error(y_true=y_test,y_pred=predictions))\n"," print(\"MSE:\", metrics.mean_squared_error(y_true=y_test,y_pred=predictions))"],"execution_count":0,"outputs":[{"output_type":"stream","text":["In myGeneratorfun:\n","In for loop, counter vlaue is 0\n","(106,)\n","MAE: 26.162161684337455\n","MSE: 1111.6177456133041\n","In for loop, counter vlaue is 1\n","(106,)\n","MAE: 4.294020071584694\n","MSE: 30.171732802407305\n","In for loop, counter vlaue is 2\n","(106,)\n","MAE: 4.464642658408654\n","MSE: 30.705804122398295\n","In for loop, counter vlaue is 3\n","(106,)\n","MAE: 3.2698325654573788\n","MSE: 18.28457339686843\n"],"name":"stdout"}]}]}

CART.ipynb

+654
Large diffs are not rendered by default.

Collaborative_filtering_using_MF_with SGD.ipynb

+1
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)