Skip to content

Commit ebb88b1

Browse files
committed
first commit
1 parent c36073a commit ebb88b1

23 files changed

+889
-0
lines changed

1.jpg

26 KB
Loading

2.jpg

20.5 KB
Loading

Model.py

Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
import numpy as np
2+
import torch
3+
import torchvision
4+
from torch import nn,optim
5+
from config import config
6+
7+
8+
9+
def l2_norm(x):
10+
norm = torch.norm(x,p =2 ,dim =1 ,keepdim= True)
11+
x = torch.div(x,norm)
12+
return x
13+
# 自己搭建一个简单卷积神经网络
14+
class myModel(nn.Module):
15+
def __init__(self,num_classes):
16+
super(myModel,self).__init__()
17+
self.layer1 = nn.Sequential(
18+
nn.Conv2d(3,16,3), #in_channels out_channels kernel_size
19+
nn.BatchNorm2d(16),
20+
nn.ReLU(True),
21+
nn.MaxPool2d(kernel_size= 2,stride = 2) #149
22+
)
23+
self.layer2 = nn.Sequential(
24+
nn.Conv2d(16,32,3,2), #74 #
25+
nn.BatchNorm2d(32),
26+
nn.ReLU(True),
27+
nn.MaxPool2d(kernel_size =2,stride=2) #37
28+
29+
)
30+
self.layer3 = nn.Sequential(
31+
nn.Conv2d(32,32,3,2), #18
32+
nn.BatchNorm2d(32),
33+
nn.ReLU(True),
34+
nn.MaxPool2d(kernel_size= 2, stride = 2) #9
35+
)
36+
self.fc1 = nn.Sequential(
37+
nn.Linear(2592,120),
38+
nn.ReLU(True)
39+
)
40+
self.fc2 = nn.Sequential(
41+
nn.Linear(120,84),
42+
nn.ReLU(True),
43+
nn.Linear(84,num_classes)
44+
)
45+
46+
def forward(self, x):
47+
x = self.layer1(x)
48+
x = self.layer2(x)
49+
x = self.layer3(x)
50+
x = x.view(x.size(0),-1)
51+
x = self.fc1(x)
52+
x = self.fc2(x)
53+
return x
54+
55+
56+
57+
class ResNet18(nn.Module):
58+
def __init__(self,model,num_classes = 1000):
59+
super(ResNet18,self).__init__()
60+
self.backbone = model
61+
62+
self.fc1 = nn.Linear(512,1024)
63+
self.dropout = nn.Dropout(0.5)
64+
self.fc2 = nn.Linear(1024,num_classes)
65+
def forward(self, x):
66+
x = self.backbone.conv1(x)
67+
x= self.backbone.bn1 (x)
68+
x = self.backbone.relu(x)
69+
x= self.backbone.maxpool(x)
70+
71+
x= self.backbone.layer1(x)
72+
x = self.backbone.layer2(x)
73+
x= self.backbone.layer3(x)
74+
x = self.backbone.layer4(x)
75+
76+
x = self.backbone.avgpool(x)
77+
78+
x= x.view(x.size(0),-1)
79+
x= l2_norm(x)
80+
x = self.dropout(x)
81+
x = self.fc1(x)
82+
x = l2_norm(x)
83+
x = self.dropout(x)
84+
x = self.fc2(x)
85+
return x
86+
class ResNet101(nn.Module):
87+
def __init__(self,model,num_classes =1000):
88+
super(ResNet101,self).__init__()
89+
self.backbone = model
90+
91+
self.fc1 = nn.Linear(2048,2048)
92+
self.dropout = nn.Dropout(0.5)
93+
self.fc2 = nn.Linear(2048,num_classes)
94+
95+
def forward(self,x):
96+
x = self.backbone.conv1(x)
97+
x = self.backbone.bn1(x)
98+
x = self.backbone.relu(x)
99+
x = self.backbone.maxpool(x)
100+
101+
x = self.backbone.layer1(x)
102+
x = self.backbone.layer2(x)
103+
x = self.backbone.layer3(x)
104+
x = self.backbone.layer4(x)
105+
106+
x = self.backbone.avgpool(x)
107+
108+
x = x.view(x.size(0),-1)
109+
x = l2_norm(x)
110+
x = self.dropout(x)
111+
x = self.fc1(x)
112+
x = l2_norm(x)
113+
x = self.dropout(x)
114+
x = self.fc2(x)
115+
116+
return x
117+
118+
def get_net():
119+
#backbone = torchvision.models.resnet18(pretrained=True)
120+
#models = ResNet18(backbone,config.num_classes)
121+
backbone = torchvision.models.resnet101(pretrained=True)
122+
models = ResNet101(backbone, config.num_classes)
123+
return models

__pycache__/Model.cpython-36.pyc

3.28 KB
Binary file not shown.

__pycache__/config.cpython-36.pyc

696 Bytes
Binary file not shown.

__pycache__/datasets.cpython-36.pyc

3.5 KB
Binary file not shown.

__pycache__/test.cpython-36.pyc

2.7 KB
Binary file not shown.

config.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#可以根据自己的情况进行修改
2+
class MyConfigs():
3+
4+
data_folder = './data/flowers/'
5+
test_data_folder = ""
6+
model_name = "resnet" #Vgg ResNet152 myModel
7+
weights = "./checkpoints/"
8+
logs = "./logs/"
9+
example_folder = "./example/"
10+
freeze = True
11+
#
12+
epochs = 300
13+
batch_size = 16
14+
img_height = 227 #网络输入的高和宽
15+
img_width = 227
16+
num_classes = 20
17+
lr = 1e-2
18+
lr_decay = 1e-4
19+
weight_decay = 2e-4
20+
ratio = 0.2
21+
config = MyConfigs()

dandelion'.jpg

21 KB
Loading

datasets.py

Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
import torch
2+
from torch.utils.data import Dataset,DataLoader
3+
import cv2
4+
import os
5+
from tqdm import tqdm
6+
from config import config
7+
from glob import glob
8+
import os
9+
from torchvision import transforms
10+
import numpy as np
11+
import random
12+
from shutil import copy
13+
from PIL import Image
14+
import math
15+
16+
np.random.seed(666) #设置随机种子 为了保证每次划分训练集和测试机的是相同的
17+
18+
19+
'''
20+
# 1. 对于mini_data 数据集的解析
21+
def parse_data_config(data_path):
22+
files = []
23+
#
24+
for img in os.listdir(data_path):
25+
image = data_path + img
26+
label = img.split("__")[0][3:]
27+
files.append((image,label))
28+
return files
29+
30+
#划分训练集和测试集
31+
# ratio 为划分为测试集的比例
32+
def divide_data(data_path,ratio):
33+
files = parse_data_config(data_path)
34+
temp = np.array(files)
35+
test_data = []
36+
train_data = []
37+
for i in range(config.num_classes):
38+
temp_data = []
39+
for data in temp:
40+
if data[1] == str(i):
41+
temp_data.append(data)
42+
np.random.shuffle(np.array(temp_data))
43+
test_data =test_data + temp_data[:int(ratio * len(temp_data))]
44+
train_data = train_data + temp_data[int(ratio*len(temp_data))+1:]
45+
# np.random.shuffle(temp)
46+
# test_data = files[:int(ratio * len(files))]
47+
# train_data = files[int(ratio*len(files))+1:]
48+
49+
# 从训练集中挑选 10 中图片保存到 example 文件夹中
50+
if not os.path.exists(config.example_folder):
51+
os.mkdir(config.example_folder)
52+
else:
53+
for i in os.listdir(config.example_folder):
54+
os.remove(os.path.join(config.example_folder+i))
55+
for i in range(10):
56+
index = random.randint(0,len(test_data)-1) # 随机生成图片的索引
57+
copy(test_data[index][0],config.example_folder) # 将挑选的图像复制到example文件夹
58+
59+
return test_data, train_data
60+
'''
61+
# 2. 对于flowers 数据集的解析
62+
def get_files(file_dir,ratio):
63+
roses = []
64+
labels_roses = []
65+
tulips = []
66+
labels_tulips = []
67+
dandelion = []
68+
labels_dandelion=[]
69+
sunflowers = []
70+
labels_sunflowers = []
71+
for file in os.listdir(file_dir +'roses'):
72+
roses.append(file_dir + 'roses' + '/' + file)
73+
labels_roses.append(0)
74+
for file in os.listdir(file_dir + 'tulips'):
75+
tulips.append(file_dir + 'tulips' + '/' + file)
76+
labels_tulips.append(1)
77+
for file in os.listdir(file_dir + 'dandelion'):
78+
tulips.append(file_dir + 'dandelion' + '/' +file)
79+
labels_dandelion.append(2)
80+
for file in os.listdir(file_dir + 'sunflowers'):
81+
sunflowers.append(file_dir + 'sunflowers' + '/' +file)
82+
labels_sunflowers.append(3)
83+
84+
image_list = np.hstack((roses ,tulips, dandelion, sunflowers))
85+
labels_list = np.hstack((labels_roses, labels_tulips, labels_dandelion, labels_sunflowers))
86+
temp = np.array([image_list, labels_list])
87+
temp = temp.transpose()
88+
np.random.shuffle(temp)
89+
all_image_list = list(temp[:,0])
90+
all_label_list = list(temp[:,1])
91+
all_label_list = [int(i) for i in all_label_list]
92+
length = len(all_image_list)
93+
n_test = int(math.ceil(length * ratio))
94+
n_train = length - n_test
95+
96+
tra_image = all_image_list[0:n_train]
97+
tra_label = all_label_list[0:n_train]
98+
99+
test_image = all_image_list[n_train:-1]
100+
test_label = all_label_list[n_train:-1]
101+
102+
train_data = [(tra_image[i],tra_label[i]) for i in range(len(tra_image))]
103+
test_data = [(test_image[i],test_label[i]) for i in range(len(test_image))]
104+
# print("train_data = ",test_image)
105+
# print("test_data = " , test_label)
106+
return test_data,train_data
107+
108+
#这个数据集类的作用就是加载训练和测试时的数据
109+
class datasets(Dataset):
110+
def __init__(self,data,transform = None,test = False):
111+
imgs = []
112+
labels = []
113+
self.test = test
114+
self.len = len(data)
115+
self.data = data
116+
self.transform = transform
117+
for i in self.data:
118+
imgs.append(i[0])
119+
self.imgs = imgs
120+
labels.append(int(i[1]) ) #pytorch中交叉熵需要从0开始
121+
self.labels = labels
122+
def __getitem__(self,index):
123+
if self.test:
124+
filename = self.imgs[index]
125+
filename = filename
126+
img_path = self.imgs[index]
127+
img = cv2.imread(img_path)
128+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
129+
img = cv2.resize(img, (config.img_width, config.img_height))
130+
img = transforms.ToTensor()(img)
131+
return img,filename
132+
else:
133+
img_path = self.imgs[index]
134+
label = self.labels[index]
135+
#label = int(label)
136+
img = cv2.imread(img_path)
137+
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
138+
img = cv2.resize(img,(config.img_width,config.img_height))
139+
# img = transforms.ToTensor()(img)
140+
141+
if self.transform is not None:
142+
img = Image.fromarray(img)
143+
img = self.transform(img)
144+
145+
else:
146+
img = transforms.ToTensor()(img)
147+
return img,label
148+
149+
def __len__(self):
150+
return len(self.data)#self.len
151+
152+
def collate_fn(batch): #表示如何将多个样本拼接成一个batch
153+
imgs = []
154+
label = []
155+
for sample in batch:
156+
imgs.append(sample[0])
157+
label.append(sample[1])
158+
159+
return torch.stack(imgs, 0),label
160+
161+
162+
#用于调试代码
163+
if __name__ == '__main__':
164+
test_data,_ = get_files(config.data_folder,0.2)
165+
for i in (test_data):
166+
print(i)
167+
print(len(test_data))
168+
169+
transform = transforms.Compose([transforms.ToTensor()])
170+
data = datasets(test_data,transform = transform)
171+
#print(data[0])

example/1.jpg

26 KB
Loading

example/2.jpg

20.5 KB
Loading

example/3.jpg

2.16 KB
Loading

example/4.jpg

1.92 KB
Loading

example/dandelion'.jpg

21 KB
Loading

0 commit comments

Comments
 (0)