forked from gdjmck/AttentionBasedEmbeddingForMetricLearning
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdataset.py
146 lines (127 loc) · 6.2 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import pickle
import scipy.io as sio
import torch
import os
import numpy as np
from PIL import Image
from scipy.special import comb
import torchvision.transforms as transforms
import torchvision.datasets as datasets
mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
invTrans = transforms.Compose([ transforms.Normalize(mean = [ 0., 0., 0. ],
std = [ 1/0.229, 1/0.224, 1/0.225 ]),
transforms.Normalize(mean = [ -0.485, -0.456, -0.406 ],
std = [ 1., 1., 1. ]),
])
class ImageFolderWithName(datasets.ImageFolder):
def __init__(self, return_fn=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.return_fn = return_fn
def __getitem__(self, i):
img, label = super(ImageFolderWithName, self).__getitem__(i)
if not self.return_fn:
return img, label
else:
return img, label, self.imgs[i]
class MetricData(torch.utils.data.Dataset):
def __init__(self, data_root, anno_file, idx_file, return_fn=False):
self.return_fn = return_fn
if idx_file.endswith('pkl'):
with open(idx_file, 'rb') as f:
self.idx = pickle.load(f)
assert anno_file.endswith('mat')
self.anno = sio.loadmat(anno_file)['annotations']
self._convert_labels()
self.data_root = data_root
self.transforms = transforms.Compose([transforms.Resize(256), transforms.RandomCrop((224, 224)), \
transforms.RandomHorizontalFlip(), transforms.ToTensor(), \
transforms.Normalize(mean=mean,std=std)])
def __len__(self):
return self.anno.shape[1]
def _convert_labels(self):
labels, fns = [], []
for i in range(self.anno.shape[1]):
labels.append(self.anno[0, i][-2][0, 0])
fns.append(self.anno[0, i][-1][0])
self.labels = labels
self.fns = fns
@classmethod
def tensor2img(cls, tensor):
if type(tensor) != np.ndarray:
tensor = tensor.cpu().numpy()
if len(tensor.shape) == 4:
imgs = []
for i in range(tensor.shape[0]):
imgs.extend(cls.tensor2img(tensor[i, ...]))
return imgs
assert tensor.shape[0] == 3
img = np.transpose(tensor, (1, 2, 0))
img = img * np.array(std) + np.array(mean)
return [img*255]
def __getitem__(self, i):
# print('__getitem__\t', i, i%16, '\tlabel:', self.labels[i])
#label = self.labels[i]
img = Image.open(os.path.join(self.data_root, self.fns[i])).convert('RGB')
img = self.transforms(img)
return img if not self.return_fn else (img, self.fns[i])
class SourceSampler(torch.utils.data.Sampler):
def __init__(self, data_source, batch_k=2, batch_size=32):
self.data_source = data_source
self.batch_k = batch_k
self.num_samples = len(self.data_source)
self.batch_size = batch_size
print('number of data:', len(self.data_source))
labels, num_samples = np.unique(self.data_source.labels, return_counts=True)
self.max_samples = max(num_samples)
self.min_samples = min(num_samples)
self.labels = labels
assert self.min_samples >= self.batch_k
def __len__(self):
# return self.num_samples * self.batch_size * 2
iter_len = len(self.labels) * comb(self.min_samples, self.batch_k)
iter_len = int(iter_len // self.batch_size)
return iter_len
def __iter__(self):
for i in range(self.__len__()):
# sample both positive and negative labels
pos_labels = np.random.choice(self.labels, int(self.batch_size/(2*self.batch_k)), replace=False)
neg_labels = np.random.choice(self.labels, int(self.batch_size/(2*self.batch_k)), replace=False)
ret_idx = []
for label in pos_labels:
ret_idx.extend(np.random.choice(self.data_source.idx[label], 2, replace=False))
# print('\t\tpositive label ', label, '\t', ret_idx[-2:])
for label in neg_labels:
neg_label = np.random.choice([l for l in self.labels if l != label], 1)[0]
label_idx = np.random.choice(self.data_source.idx[label], 1)
neg_label_idx = np.random.choice(self.data_source.idx[neg_label], 1)
ret_idx.extend([label_idx[0], neg_label_idx[0]])
print('sampler:', len(ret_idx))
yield ret_idx
if __name__ == '__main__':
from sampler import BalancedBatchSampler
data = ImageFolderWithName(return_fn=False, root='/home/chk/cars_stanford/cars_train_labelled/train', transform=transforms.Compose([
transforms.Resize(228),
transforms.RandomCrop((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]),
loader=lambda x: Image.open(x).convert('RGB'))
dataset = torch.utils.data.DataLoader(data, batch_sampler=BalancedBatchSampler(data, batch_size=32, batch_k=4, length=2000), num_workers=4)
'''
data = MetricData(data_root='/home/chk/cars_stanford/cars_train', \
anno_file='/home/chk/cars_stanford/devkit/cars_train_annos.mat', \
idx_file='/home/chk/cars_stanford/devkit/cars_train_annos_idx.pkl', \
return_fn=True)
sampler = SourceSampler(data)
print('Batch sampler len:', len(sampler))
dataset = torch.utils.data.DataLoader(data, batch_sampler=sampler)
'''
from model import MetricLearner
#model = MetricLearner()
for i, (td, label) in enumerate(dataset):
# if i % 100 == 0:
print(i, '\tBatch shape:\t', td.shape, '\t', label)
#break
#pred = model(td)
#print(pred.shape)