-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathdataset.py
64 lines (50 loc) · 1.98 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os
import sys
import numpy
import torch
import pandas as pd
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import conf
def getDataLoader(seq_dir, info_list, seq_length, cnn_type, transform = None):
my_dataset = MyDataset(info_list, seq_dir, seq_length, cnn_type, transform)
#my_dataset.test()
my_dataloader = torch.utils.data.DataLoader(
my_dataset,
batch_size = conf.TRAINING_BATCH_SIZE,
shuffle = True,
num_workers = conf.NUM_WORKERS
)
return my_dataloader
class MyDataset(Dataset):
def __init__(self, info_list, seq_dir, seq_length, cnn_type, transform = None):
"""
Args:
info_list (string): Path to the info list file with annotations.
seq_dir (string): Directory with all the extracted features.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_frame = pd.read_csv(info_list, delimiter = ' ', header = None)
self.seq_dir = seq_dir
self.transform = transform
self.seq_length = seq_length
self.cnn_type = cnn_type
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
video_name = self.landmarks_frame.iloc[idx, 0]
seq_path = os.path.join(self.seq_dir, video_name[0 : len(video_name) - 4] + '-' + str(self.seq_length) + '-features-' + str(self.cnn_type) + '.npy')
label = self.landmarks_frame.iloc[idx,1]
features = numpy.load(seq_path)
if self.transform:
features = self.transform(features)
return features, label
def test(self):
print(len(self.landmarks_frame))
print(self.landmarks_frame)
print(type(self.landmarks_frame))
print(self.landmarks_frame.iloc[0, 0])
print(self.landmarks_frame.iloc[1, 0])