-
-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathbasic_blocks.py
122 lines (98 loc) · 3.58 KB
/
basic_blocks.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
"""Basic blocks for the lienar networks"""
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
from torch import nn
class AbstractLinearNetwork(nn.Module, metaclass=ABCMeta):
"""Abstract class for a network to combine the features from all the inputs."""
def __init__(
self,
in_features: int,
out_features: int,
):
"""Abstract class for a network to combine the features from all the inputs.
Args:
in_features: Number of input features.
out_features: Number of output features.
"""
super().__init__()
def cat_modes(self, x):
"""Concatenate modes of input data into 1D feature vector"""
if isinstance(x, OrderedDict):
return torch.cat([value for key, value in x.items()], dim=1)
elif isinstance(x, torch.Tensor):
return x
else:
raise ValueError(f"Input of unexpected type {type(x)}")
@abstractmethod
def forward(self):
"""Run model forward"""
pass
class ResidualLinearBlock(nn.Module):
"""A 1D fully-connected residual block using ELU activations and including optional dropout."""
def __init__(
self,
in_features: int,
n_layers: int = 2,
dropout_frac: float = 0.0,
):
"""A 1D fully-connected residual block using ELU activations and including optional dropout.
Args:
in_features: Number of input features.
n_layers: Number of layers in residual pathway.
dropout_frac: Probability of an element to be zeroed.
"""
super().__init__()
layers = []
for i in range(n_layers):
layers += [
nn.ELU(),
nn.Linear(
in_features=in_features,
out_features=in_features,
),
nn.Dropout(p=dropout_frac),
]
self.model = nn.Sequential(*layers)
def forward(self, x):
"""Run model forward"""
return self.model(x) + x
class ResidualLinearBlock2(nn.Module):
"""Residual block of 'full pre-activation' similar to the block in figure 4(e) of [1].
This was the best performing residual block tested in the study. This implementation differs
from that block just by using LeakyReLU activation to avoid dead neuron, and by including
optional dropout in the residual branch. This is also a 1D fully connected layer residual block
rather than a 2D convolutional block.
Sources:
[1] https://arxiv.org/pdf/1603.05027.pdf
"""
def __init__(
self,
in_features: int,
n_layers: int = 2,
dropout_frac: float = 0.0,
):
"""Residual block of 'full pre-activation' similar to the block in figure 4(e) of [1].
Sources:
[1] https://arxiv.org/pdf/1603.05027.pdf
Args:
in_features: Number of input features.
n_layers: Number of layers in residual pathway.
dropout_frac: Probability of an element to be zeroed.
"""
super().__init__()
layers = []
for i in range(n_layers):
layers += [
nn.BatchNorm1d(in_features),
nn.Dropout(p=dropout_frac),
nn.LeakyReLU(),
nn.Linear(
in_features=in_features,
out_features=in_features,
),
]
self.model = nn.Sequential(*layers)
def forward(self, x):
"""Run model forward"""
return self.model(x) + x