forked from rmokady/structural-analogy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodels.py
112 lines (97 loc) · 4.24 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
import torch
import torch.nn as nn
class ConvBlock(nn.Sequential):
def __init__(self, in_channel, out_channel, ker_size, padd, stride):
super(ConvBlock, self).__init__()
self.add_module('conv', nn.Conv2d(in_channel, out_channel, kernel_size=ker_size, stride=stride, padding=padd)),
self.add_module('norm', nn.BatchNorm2d(out_channel)),
self.add_module('LeakyRelu', nn.LeakyReLU(0.2, inplace=True))
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('Norm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class WDiscriminator(nn.Module):
def __init__(self, opt):
super(WDiscriminator, self).__init__()
self.is_cuda = torch.cuda.is_available()
N = int(opt.nfc)
self.head = ConvBlock(opt.nc_im, N, opt.ker_size, opt.padd_size, 1)
self.body = nn.Sequential()
for i in range(opt.num_layer - 2):
N = int(opt.nfc / pow(2, (i + 1)))
block = ConvBlock(max(2 * N, opt.min_nfc), max(N, opt.min_nfc), opt.ker_size, opt.padd_size, 1)
self.body.add_module('block%d' % (i + 1), block)
self.tail = nn.Conv2d(max(N, opt.min_nfc), 1, kernel_size=opt.ker_size, stride=1, padding=opt.padd_size)
def forward(self, x):
x = self.head(x)
x = self.body(x)
x = self.tail(x)
return x
class GeneratorConcatSkip2CleanAdd(nn.Module):
def __init__(self, opt):
super(GeneratorConcatSkip2CleanAdd, self).__init__()
self.is_cuda = torch.cuda.is_available()
N = opt.nfc
self.head = ConvBlock(opt.nc_im, N, opt.ker_size, opt.padd_size,
1) # GenConvTransBlock(opt.nc_z,N,opt.ker_size,opt.padd_size,opt.stride)
self.body = nn.Sequential()
for i in range(opt.num_layer - 2):
N = int(opt.nfc / pow(2, (i + 1)))
block = ConvBlock(max(2 * N, opt.min_nfc), max(N, opt.min_nfc), opt.ker_size, opt.padd_size, 1)
self.body.add_module('block%d' % (i + 1), block)
self.tail = nn.Sequential(
nn.Conv2d(max(N, opt.min_nfc), opt.nc_im, kernel_size=opt.ker_size, stride=1, padding=opt.padd_size),
nn.Tanh()
)
def forward(self, x, y):
x = self.head(x)
x = self.body(x)
x = self.tail(x)
ind = int((y.shape[2] - x.shape[2]) / 2)
y = y[:, :, ind:(y.shape[2] - ind), ind:(y.shape[3] - ind)]
return x + y
class Generator(nn.Module):
def __init__(self, opt):
super(Generator, self).__init__()
self.is_cuda = torch.cuda.is_available()
N = opt.nfc
self.head = ConvBlock(opt.nc_im, N, ker_size=3, padd=1, stride=1)
self.body = nn.Sequential()
for i in range(opt.num_layer - 2):
N = int(opt.nfc / pow(2, (i + 1)))
block = ConvBlock(max(2 * N, opt.min_nfc), max(N, opt.min_nfc), ker_size=3, padd=1, stride=1)
self.body.add_module('block%d' % (i + 1), block)
self.tail = nn.Sequential(
nn.Conv2d(max(N, opt.min_nfc), opt.nc_im, kernel_size=3, stride=1, padding=1),
nn.Tanh()
)
def forward(self, x, y):
x = self.head(x)
x = self.body(x)
x = self.tail(x)
ind = int((y.shape[2] - x.shape[2]) / 2)
y = y[:, :, ind:(y.shape[2] - ind), ind:(y.shape[3] - ind)]
return x + y
class Generator_no_res(nn.Module):
def __init__(self, opt):
super(Generator_no_res, self).__init__()
self.is_cuda = torch.cuda.is_available()
N = opt.nfc
self.head = ConvBlock(opt.nc_im, N, ker_size=3, padd=1, stride=1)
self.body = nn.Sequential()
for i in range(opt.num_layer - 2):
N = int(opt.nfc / pow(2, (i + 1)))
block = ConvBlock(max(2 * N, opt.min_nfc), max(N, opt.min_nfc), ker_size=3, padd=1, stride=1)
self.body.add_module('block%d' % (i + 1), block)
self.tail = nn.Sequential(
nn.Conv2d(max(N, opt.min_nfc), opt.nc_im, kernel_size=3, stride=1, padding=1),
nn.Tanh()
)
def forward(self, x):
x = self.head(x)
x = self.body(x)
x = self.tail(x)
return x