Skip to content

Commit 7c12777

Browse files
committed
Homogenize dataloader names
1 parent 8cb9624 commit 7c12777

File tree

4 files changed

+18
-19
lines changed

4 files changed

+18
-19
lines changed

pytorch_tutorial/logistic_regression/README.md

+5-6
Original file line numberDiff line numberDiff line change
@@ -92,12 +92,12 @@ The PyTorch [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.d
9292

9393
```python
9494
# Create data loader for loading data as randomized batches
95-
blobs_dataloader = DataLoader(
95+
train_dataloader = DataLoader(
9696
list(zip(x_train, y_train)), batch_size=batch_size, shuffle=True
9797
)
9898

9999
# Number of batches in an epoch (= n_samples / batch_size, rounded up)
100-
n_batches = len(blobs_dataloader)
100+
n_batches = len(train_dataloader)
101101
assert n_batches == math.ceil(n_samples / batch_size)
102102
```
103103

@@ -132,13 +132,13 @@ assert n_params == 3 * output_dim
132132

133133
## Loss function
134134

135-
This classification example uses the [cross-entropy](https://github.com/bpesquet/mlcourse/tree/main/lectures/classification_performance#assessing-performance-during-training-1) a.k.a. negative log-likelihood loss function, implemented by the [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html) class.
135+
This multiclass classification example uses the [cross-entropy](https://github.com/bpesquet/mlcourse/tree/main/lectures/classification_performance#assessing-performance-during-training-1) a.k.a. negative log-likelihood loss function, implemented by the [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html) class.
136136

137137
> [!NOTE]
138138
> PyTorch also offers the [NLLLoss](https://pytorch.org/docs/stable/generated/torch.nn.NLLLoss.html#torch.nn.NLLLoss) class implementing the negative log-likelihood loss. A key difference is that `CrossEntropyLoss` expects *logits* (raw, unnormalized predictions) as inputs, and uses [LogSoftmax](https://pytorch.org/docs/stable/generated/torch.nn.LogSoftmax.html#torch.nn.LogSoftmax) to transform them into probabilities before computing its output. Using `CrossEntropyLoss` is equivalent to applying `LogSoftmax` followed by `NLLLoss` ([more details](https://towardsdatascience.com/cross-entropy-negative-log-likelihood-and-all-that-jazz-47a95bd2e81)).
139139
140140
```python
141-
# Use cross-entropy loss function.
141+
# Use cross-entropy loss function for this multiclass classification task.
142142
# Softmax is computed internally to convert outputs into probabilities
143143
criterion = nn.CrossEntropyLoss()
144144
```
@@ -176,7 +176,7 @@ for epoch in range(n_epochs):
176176
n_correct = 0
177177

178178
# For each batch of data
179-
for x_batch, y_batch in blobs_dataloader:
179+
for x_batch, y_batch in train_dataloader:
180180
# Forward pass
181181
y_pred = model(x_batch)
182182

@@ -310,7 +310,6 @@ def plot_decision_boundaries(model, x, y, title, device):
310310
plt.legend(handles=legend_elements)
311311

312312
plt.title(title)
313-
plt.tight_layout()
314313

315314
return plt.gcf()
316315
```

pytorch_tutorial/logistic_regression/test_logistic_regression.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -51,12 +51,12 @@ def test_logistic_regression(show_plots=False):
5151
y_train = torch.from_numpy(targets).long().to(device)
5252

5353
# Create data loader for loading data as randomized batches
54-
blobs_dataloader = DataLoader(
54+
train_dataloader = DataLoader(
5555
list(zip(x_train, y_train)), batch_size=batch_size, shuffle=True
5656
)
5757

5858
# Number of batches in an epoch (= n_samples / batch_size, rounded up)
59-
n_batches = len(blobs_dataloader)
59+
n_batches = len(train_dataloader)
6060
assert n_batches == math.ceil(n_samples / batch_size)
6161

6262
# Create a logistic regression model for the 2D dataset
@@ -71,7 +71,7 @@ def test_logistic_regression(show_plots=False):
7171
# Linear layers have (in_features + 1) * out_features parameters
7272
assert n_params == 3 * output_dim
7373

74-
# Use cross-entropy loss function.
74+
# Use cross-entropy loss function for this multiclass classification task.
7575
# Softmax is computed internally to convert outputs into probabilities
7676
criterion = nn.CrossEntropyLoss()
7777

@@ -91,7 +91,7 @@ def test_logistic_regression(show_plots=False):
9191
n_correct = 0
9292

9393
# For each batch of data
94-
for x_batch, y_batch in blobs_dataloader:
94+
for x_batch, y_batch in train_dataloader:
9595
# Forward pass
9696
y_pred = model(x_batch)
9797

pytorch_tutorial/multilayer_perceptron/README.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -95,12 +95,12 @@ The PyTorch [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.d
9595

9696
```python
9797
# Create data loader for loading data as randomized batches
98-
circles_dataloader = DataLoader(
98+
train_dataloader = DataLoader(
9999
list(zip(x_train, y_train)), batch_size=batch_size, shuffle=True
100100
)
101101

102102
# Number of batches in an epoch (= n_samples / batch_size, rounded up)
103-
n_batches = len(circles_dataloader)
103+
n_batches = len(train_dataloader)
104104
assert n_batches == math.ceil(n_samples / batch_size)
105105
```
106106

@@ -151,7 +151,7 @@ assert n_params == 3 * hidden_layer_dim + hidden_layer_dim + 1
151151
For this binary classification task, we use the [binary cross-entropy](https://github.com/bpesquet/mlcourse/tree/main/lectures/classification_performance#choosing-a-loss-function) loss function, implemented bye the PyTorch [BCELoss](https://pytorch.org/docs/stable/generated/torch.nn.BCELoss.html) class.
152152

153153
```python
154-
# Use binary cross-entropy loss function
154+
# Use binary cross-entropy loss function for this binary classification task
155155
criterion = nn.BCELoss()
156156
```
157157

@@ -186,7 +186,7 @@ for epoch in range(n_epochs):
186186
n_correct = 0
187187

188188
# For each batch of data
189-
for x_batch, y_batch in circles_dataloader:
189+
for x_batch, y_batch in train_dataloader:
190190
# Forward pass
191191
y_pred = model(x_batch)
192192

pytorch_tutorial/multilayer_perceptron/test_multilayer_perceptron.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
"""
2-
MultiLayer Perceptron (MLP) a.k.a. Feedforward Neural Network
2+
MultiLayer Perceptron (MLP) a.k.a. Feedforward Neural Network
33
"""
44

55
import math
@@ -52,12 +52,12 @@ def test_multilayer_perceptron(show_plots=False):
5252
assert y_train.shape == torch.Size([n_samples, 1])
5353

5454
# Create data loader for loading data as randomized batches
55-
circles_dataloader = DataLoader(
55+
train_dataloader = DataLoader(
5656
list(zip(x_train, y_train)), batch_size=batch_size, shuffle=True
5757
)
5858

5959
# Number of batches in an epoch (= n_samples / batch_size, rounded up)
60-
n_batches = len(circles_dataloader)
60+
n_batches = len(train_dataloader)
6161
assert n_batches == math.ceil(n_samples / batch_size)
6262

6363
# Create a MultiLayer Perceptron with 2 inputs, a hidden layer and 1 output
@@ -83,7 +83,7 @@ def test_multilayer_perceptron(show_plots=False):
8383
# Output layer has (hidden_layer_dim + 1) * 1 parameters
8484
assert n_params == 3 * hidden_layer_dim + hidden_layer_dim + 1
8585

86-
# Use binary cross-entropy loss function
86+
# Use binary cross-entropy loss function for this binary classification task
8787
criterion = nn.BCELoss()
8888

8989
# Use a vanilla mini-batch stochastic gradient descent optimizer
@@ -102,7 +102,7 @@ def test_multilayer_perceptron(show_plots=False):
102102
n_correct = 0
103103

104104
# For each batch of data
105-
for x_batch, y_batch in circles_dataloader:
105+
for x_batch, y_batch in train_dataloader:
106106
# Forward pass
107107
y_pred = model(x_batch)
108108

0 commit comments

Comments
 (0)