Skip to content

Commit 6dea164

Browse files
committed
bump pytorch to v1.14.0
1 parent afd3510 commit 6dea164

File tree

5 files changed

+21
-19
lines changed

5 files changed

+21
-19
lines changed

README.md

+5-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
1-
Pytorch implementation of CVPR'17 - Local Binary Convolutional Neural Networks.
2-
* paper: http://xujuefei.com/lbcnn.html
1+
### Pytorch implementation of CVPR'17 - Local Binary Convolutional Neural Networks
2+
3+
Juefei-Xu, F., Naresh Boddeti, V., & Savvides, M. (2017). Local binary convolutional neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 19-28).
4+
5+
* paper link: http://xujuefei.com/lbcnn.html
36
* original Torch (Lua) repository: https://github.com/juefeix/lbcnn.torch
47

58
Training even MNIST with parameters, stated in the original repository, is incredibly slow. Here is an example of training a toy model -- "2 x {BatchNorm2d(8) -> ConvLBP(8, 16, 3) -> Conv(16, 8, 1)} -> FC(200) -> FC(50) -> FC(10)" -- on MNIST:

lbcnn_model.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def __init__(self, in_channels, out_channels, kernel_size=3, sparsity=0.5):
1212
mask_inactive = torch.rand(matrix_proba.shape) > sparsity
1313
binary_weights.masked_fill_(mask_inactive, 0)
1414
weights.data = binary_weights
15-
weights.requires_grad = False
15+
weights.requires_grad_(False)
1616

1717

1818
class BlockLBP(nn.Module):

main.py

+9-10
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
import torch.optim as optim
77
import torch.optim.lr_scheduler
88
import torch.utils.data
9-
from torch.autograd import Variable
109
from tqdm import tqdm
1110

1211
from lbcnn_model import Lbcnn
@@ -32,7 +31,8 @@ def train(n_epochs=50, lbcnn_depth=2, learning_rate=1e-2, momentum=0.9, weight_d
3231
if not os.path.exists(models_dir):
3332
os.makedirs(models_dir)
3433

35-
train_loader = get_mnist_loader()
34+
train_loader = get_mnist_loader(train=True)
35+
test_loader = get_mnist_loader(train=False)
3636
model = Lbcnn(depth=lbcnn_depth)
3737
use_cuda = torch.cuda.is_available()
3838
if use_cuda:
@@ -45,11 +45,8 @@ def train(n_epochs=50, lbcnn_depth=2, learning_rate=1e-2, momentum=0.9, weight_d
4545
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=lr_scheduler_step)
4646

4747
for epoch in range(n_epochs):
48-
scheduler.step()
4948
for batch_id, (inputs, labels) in enumerate(
5049
tqdm(train_loader, desc="Epoch {}/{}".format(epoch, n_epochs))):
51-
inputs = Variable(inputs)
52-
labels = Variable(labels)
5350
if use_cuda:
5451
inputs = inputs.cuda()
5552
labels = labels.cuda()
@@ -58,15 +55,17 @@ def train(n_epochs=50, lbcnn_depth=2, learning_rate=1e-2, momentum=0.9, weight_d
5855
loss = criterion(outputs, labels)
5956
loss.backward()
6057
optimizer.step()
61-
epoch_accuracy = calc_accuracy(model, loader=train_loader)
62-
print("Epoch {} train accuracy: {:.3f}".format(epoch, epoch_accuracy))
63-
if epoch_accuracy > best_accuracy:
64-
best_accuracy = epoch_accuracy
58+
accuracy_train = calc_accuracy(model, loader=train_loader)
59+
accuracy_test = calc_accuracy(model, loader=test_loader)
60+
print("Epoch {} accuracy: train={:.3f}, test={:.3f}".format(epoch, accuracy_train, accuracy_test))
61+
if accuracy_train > best_accuracy:
62+
best_accuracy = accuracy_train
6563
torch.save((lbcnn_depth, model.state_dict()), MODEL_PATH)
64+
scheduler.step(epoch=epoch)
6665
train_duration_sec = int(time.time() - start)
6766
print('Finished Training. Total training time: {} sec'.format(train_duration_sec))
6867

6968

7069
if __name__ == '__main__':
70+
# train includes test phase at each epoch
7171
train(n_epochs=5)
72-
test()

requirements.txt

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
pytorch==0.3.0
2-
torchvision==0.2.0
3-
tqdm==4.23.0
1+
pytorch==1.4.0
2+
torchvision==0.5.0
3+
tqdm

utils.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
import torch.utils.data
33
import torchvision
44
import torchvision.transforms as transforms
5-
from torch.autograd import Variable
65
from tqdm import tqdm
76

87

@@ -40,12 +39,13 @@ def calc_accuracy(model, loader, verbose=False):
4039
if use_cuda:
4140
inputs = inputs.cuda()
4241
labels = labels.cuda()
43-
outputs_batch = model(Variable(inputs, volatile=True))
42+
with torch.no_grad():
43+
outputs_batch = model(inputs)
4444
outputs_full.append(outputs_batch)
4545
labels_full.append(labels)
4646
model.train(mode_saved)
4747
outputs_full = torch.cat(outputs_full, dim=0)
4848
labels_full = torch.cat(labels_full, dim=0)
4949
_, labels_predicted = torch.max(outputs_full.data, dim=1)
50-
accuracy = torch.sum(labels_full == labels_predicted) / len(labels_full)
50+
accuracy = torch.sum(labels_full == labels_predicted).item() / float(len(labels_full))
5151
return accuracy

0 commit comments

Comments
 (0)