-
Notifications
You must be signed in to change notification settings - Fork 19
/
Copy pathDropout.py
51 lines (42 loc) · 1.56 KB
/
Dropout.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# Dropout
import torch
import torch.nn as nn
import torch.nn.functional as F
class DropoutNet(nn.Module):
"""
Neural network with a dropout layer.
Dropout is a regularization technique where randomly selected neurons are ignored during training.
This helps prevent overfitting.
Args:
input_size (int): Size of the input layer.
hidden_size (int): Size of the hidden layer.
output_size (int): Size of the output layer.
dropout_p (float): Probability of an element to be zeroed.
"""
def __init__(self, input_size, hidden_size, output_size, dropout_p=0.5):
super(DropoutNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.dropout = nn.Dropout(p=dropout_p)
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
"""
Forward pass through the network.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor.
"""
x = F.relu(self.fc1(x)) # Apply ReLU activation function after the first linear layer
x = self.dropout(x) # Apply dropout to the activations of the first layer
x = self.fc2(x) # Apply the second linear layer
return x
def test_dropout():
# Example usage of DropoutNet
input_size = 20
hidden_size = 50
output_size = 10
dropout_p = 0.5
net = DropoutNet(input_size, hidden_size, output_size, dropout_p)
input_tensor = torch.randn(1, input_size)
output_tensor = net(input_tensor)
print("Output tensor shape:", output_tensor.shape)