-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathRNN.py
179 lines (129 loc) · 5.22 KB
/
RNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
# -*- coding: utf-8 -*-
"""RNN.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/10ZuPqQXah80sV5Bh9FMFxqBV4NLWfIBn
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#To Create a DataFrame
import pandas as pd
#Use of arrays in Python
import numpy as np
#For Plotting the accuracy
import matplotlib.pyplot as plt
import seaborn as sns
#Natural Language Toolkit for NLP related work
import nltk
# !conda install keras
#Tokenizing the sentences to word
from nltk.tokenize import word_tokenize
from keras.preprocessing.text import Tokenizer
#Padding the sentences
from keras.models import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
from sklearn.preprocessing import LabelEncoder
import warnings
warnings.filterwarnings('ignore')
sns.set()
nltk.download('punkt')
data = pd.read_csv("IMDB Dataset.csv", engine='python', encoding='utf-8',error_bad_lines=False)
data = pd.DataFrame(data)
print(data)
print (data.shape)
data.head(10)
# Data Vocabulary Creation
def remove_html(review):
bs = review.BeautifulSoup(review, "html.parser")
return ' ' + bs.get_review() + ' '
def keep_only_letters(review):
review=re.sub(r'[^a-zA-Z\s]',' ',)
return review
def convert_to_lowercase(review):
return review.lower()
def clean_reviews(review):
review = remove_html(review)
review = keep_only_letters(review)
review = convert_to_lowercase(review)
return review
imdb_train = data[:]
imdb_test = data[:]
from collections import Counter
counter = Counter([words for reviews in imdb_train['review'] for words in reviews.split()])
df = pd.DataFrame()
df['key'] = counter.keys()
df['value'] = counter.values()
df.sort_values(by='value', ascending=False, inplace=True)
print (df.shape[0])
print (df[:10000].value.sum()/df.value.sum())
top_10k_words = list(df[:10000].key.values)
#Converting Reviews into lists of integers
import pandas as np
def get_encoded_input(review):
words = review.split()
if len(words) > 500:
words = words[:500]
encoding = []
for word in words:
try:
index = top_10k_words.index(word)
except:
index = 10000
encoding.append(index)
while len(encoding) < 500:
encoding.append(10001)
return encoding
training_data = np.array([get_encoded_input(review) for review in imdb_train['review']])
testing_data = np.array([get_encoded_input(review) for review in imdb_test['review']])
print(training_data.shape, testing_data.shape)
data['review_word_length'] = [len(review.split()) for review in data['review']]
data['review_word_length'].plot(kind='hist', bins=30)
plt.title('Word length distribution')
plt.show()
train_labels = [1 if sentiment=='positive' else 0 for sentiment in imdb_train['sentiment']]
test_labels = [1 if sentiment=='positive' else 0 for sentiment in imdb_test['sentiment']]
train_labels = np.array(train_labels)
test_labels = np.array(test_labels)
# Output labels into numerical form
train_labels = [1 if sentiment=='positive' else 0 for sentiment in imdb_train['sentiment']]
test_labels = [1 if sentiment=='positive' else 0 for sentiment in imdb_test['sentiment']]
train_labels = np.array(train_labels)
test_labels = np.array(test_labels)
# Multi-layer Perceptron (Dense Neural Network) model
import tensorflow
from tensorflow.keras.layers import Activation
input_data = tensorflow.keras.layers.Input(shape=(500))
data = tensorflow.keras.layers.Embedding(input_dim=10002, output_dim=32, input_length=500)(input_data)
data = tensorflow.keras.layers.Flatten()(data)
data = tensorflow.keras.layers.Dense(16)(data)
data = tensorflow.keras.layers.Activation('relu')(data)
data = tensorflow.keras.layers.Dropout(0.5)(data)
data = tensorflow.keras.layers.Dense(8)(data)
data = tensorflow.keras.layers.Activation('relu')(data)
data = tensorflow.keras.layers.Dropout(0.5)(data)
data = tensorflow.keras.layers.Dense(4)(data)
data = tensorflow.keras.layers.Activation('relu')(data)
data = tensorflow.keras.layers.Dropout(0.5)(data)
data = tensorflow.keras.layers.Dense(1)(data)
output_data = tensorflow.keras.layers.Activation('sigmoid')(data)
model = tensorflow.keras.models.Model(inputs=input_data, outputs=output_data)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics='accuracy')
model.summary()
print(testing_data)
print(test_labels)
print(training_data)
print(train_labels)
model.fit(training_data, train_labels, epochs=10, batch_size=61, validation_data=(testing_data, test_labels))
## RNN
import tensorflow
from tensorflow.keras.layers import Activation
input_data = tensorflow.keras.layers.Input(shape=(500))
data = tensorflow.keras.layers.Embedding(input_dim=10002, output_dim=32, input_length=500)(input_data)
data = tensorflow.keras.layers.Bidirectional(tensorflow.keras.layers.SimpleRNN(50))(data)
data = tensorflow.keras.layers.Dense(1)(data)
output_data = tensorflow.keras.layers.Activation('sigmoid')(data)
model = tensorflow.keras.models.Model(inputs=input_data, outputs=output_data)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics='accuracy')
model.summary()
history = model.fit(training_data, train_labels, epochs=10, batch_size=256, validation_data=(testing_data, test_labels))