-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
122 lines (98 loc) · 3.5 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from tqdm import tqdm
import torch
from torch import nn, optim
from torch.nn import functional as F
from matplotlib import pyplot as plt
from torchsummary import summary
from utils import GetCorrectPredCount
train_losses = []
test_losses = []
train_acc = []
test_acc = []
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3)
self.fc1 = nn.Linear(4096, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(self.conv1(x), 2)
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = F.relu(self.conv3(x), 2)
x = F.relu(F.max_pool2d(self.conv4(x), 2))
x = x.view(-1, 4096)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def model_train(model, device, train_loader, optimizer):
model.train()
pbar = tqdm(train_loader)
train_loss = 0
correct = 0
processed = 0
for batch_idx, (data, target) in enumerate(pbar):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
# Predict
pred = model(data)
# Calculate loss
loss = F.nll_loss(pred, target)
train_loss+=loss.item()
# Backpropagation
loss.backward()
optimizer.step()
correct += GetCorrectPredCount(pred, target)
processed += len(data)
pbar.set_description(
desc= f'Train: Loss={loss.item():0.4f} \
Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')
train_acc.append(100*correct/processed)
train_losses.append(train_loss/len(train_loader))
def model_test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(
output, target, reduction='sum').item() # sum up batch loss
correct += GetCorrectPredCount(output, target)
test_loss /= len(test_loader.dataset)
test_acc.append(100. * correct / len(test_loader.dataset))
test_losses.append(test_loss)
print('Test set: Average loss: {:.4f},\
Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def plot_loss_n_acc():
"""
plots the train and test
losses and accuracies
Parameters
----------
losses : (train_losses, test_losses)
train and test losses values for each batch
accuracies : (train_acc, test_acc)
train and test accuracy values for each batch
"""
# performance and loss curves
fig, axs = plt.subplots(2, 2, figsize=(15,10))
axs[0, 0].plot(train_losses)
axs[0, 0].set_title("Training Loss")
axs[1, 0].plot(train_acc)
axs[1, 0].set_title("Training Accuracy")
axs[0, 1].plot(test_losses)
axs[0, 1].set_title("Test Loss")
axs[1, 1].plot(test_acc)
axs[1, 1].set_title("Test Accuracy")
def summary_printer(model):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
model = model.to(device)
return summary(model, input_size=(1, 28, 28))