From 1803887c0a8228922f9702e1811ce39168e4ff63 Mon Sep 17 00:00:00 2001 From: Ananthu Ajay <44108056+AnanthuAjay@users.noreply.github.com> Date: Tue, 16 Jun 2020 17:18:15 +0530 Subject: [PATCH] Create day5_Task4.py --- Tasks/daily tasks/Ananthu Ajay/day5_Task4.py | 107 +++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 Tasks/daily tasks/Ananthu Ajay/day5_Task4.py diff --git a/Tasks/daily tasks/Ananthu Ajay/day5_Task4.py b/Tasks/daily tasks/Ananthu Ajay/day5_Task4.py new file mode 100644 index 0000000..310509d --- /dev/null +++ b/Tasks/daily tasks/Ananthu Ajay/day5_Task4.py @@ -0,0 +1,107 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision +import torchvision.transforms as transforms +import torch.optim as optim + +transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize( + (0.5, 0.5, 0.5), + (0.5, 0.5, 0.5) + ) + ] +) + +trainset = torchvision.datasets.CIFAR10( + root='./data', + train=True, + download=True, + transform=transform +) + +testset = torchvision.datasets.CIFAR10( + root='./data', + train=False, + download=False, + transform=transform +) + +trainloader = torch.utils.data.DataLoader( + trainset, + batch_size=4, + shuffle=True, + num_workers=0 +) + +testloader = torch.utils.data.DataLoader( + testset, + batch_size=4, + shuffle=False, + num_workers=0 +) + +classes = ( + 'plane', 'car', 'bird', 'cat', + 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' +) + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +net = Net() + +loss_function = nn.CrossEntropyLoss() +optimizer = optim.SGD( + net.parameters(), + lr=1 +) + +for epoch in range(3): + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # data = (inputs, labels) + inputs, labels = data + optimizer.zero_grad() + + outputs = net(inputs) + loss = loss_function(outputs, labels) + loss.backward() + optimizer.step() + + running_loss = running_loss + loss.item() + if i % 2000 == 1999: + print( + '[%d, %5d] loss: %.3f' % + (epoch + 1, i+1, running_loss/2000) + ) + running_loss = 0.0 +print("vola") + +#When learning rate = 0.001 and epoch =2 +#[2, 12000] loss: 1.881 +#When learning rate = 0.01 and epoch =2 +#[2, 12000] loss: 1.285 +#When learning rate = 0.1 and epoch =3 +#[3, 12000] loss: 1.156 +#When learning rate = 1 and epoch =3 +#[3, 12000] loss: 1.973