-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdataset.py
32 lines (22 loc) · 1.09 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import torch
from torch.utils.data import dataset
import torchvision.transforms as T
from PIL import Image
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
class LoLDataset(dataset.Dataset):
def __init__(self, low_light_root, target_root, img_size = 64):
super().__init__()
self.lol_fnames = [os.path.join(low_light_root, file) for file in os.listdir(low_light_root)]
self.target_fnames = [os.path.join(target_root, file) for file in os.listdir(target_root)]
self.transform = T.Compose([T.CenterCrop((img_size, img_size)),
T.ToTensor(),
T.Normalize([0.0,0.0,0.0], [1.0,1.0,1.0])])
def __getitem__(self, idx):
lol = Image.open(self.lol_fnames[idx]).convert('RGB')
target = Image.open(self.target_fnames[idx]).convert('RGB')
lol = self.transform(lol)
target = self.transform(target)
return lol, target
def __len__(self):
return len(self.lol_fnames)