Skip to content

Commit 7863c17

Browse files
zou3519facebook-github-bot
authored andcommitted
Fix convtranspose3d output_size calculation (pytorch#12952)
Summary: Closes pytorch#2119. There was a small bug where the output_size got sliced with `[-2:]` where we really meant to slice it as `[2:]` (to remove the batch and channel dimensions). Added a new test for this. Pull Request resolved: pytorch#12952 Differential Revision: D10510678 Pulled By: zou3519 fbshipit-source-id: 4c04a5007fc6d002e1806d6fe981b43d33d6a4f2
1 parent 046672e commit 7863c17

File tree

2 files changed

+7
-1
lines changed

2 files changed

+7
-1
lines changed

test/test_nn.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3584,6 +3584,12 @@ def test_ConvTranspose2d_output_size(self):
35843584
else:
35853585
self.assertRaises(ValueError, lambda: m(i, (h, w)))
35863586

3587+
def test_ConvTranspose3d_correct_output_size(self):
3588+
# Check that ConvTranspose3d can take a 5d output_size.
3589+
m = nn.ConvTranspose3d(2, 2, 2)
3590+
i = torch.rand(1, 2, 1, 1, 1)
3591+
out = m(i, output_size=(1, 2, 2, 2, 2))
3592+
35873593
def _test_Conv2d_naive_groups(self, device="cpu", dtype=torch.float):
35883594
# Check that grouped convolutions matches two half convolutions
35893595
m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype)

torch/nn/modules/conv.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -459,7 +459,7 @@ def _output_padding(self, input, output_size):
459459
output_size = list(output_size)
460460
k = input.dim() - 2
461461
if len(output_size) == k + 2:
462-
output_size = output_size[-2:]
462+
output_size = output_size[2:]
463463
if len(output_size) != k:
464464
raise ValueError(
465465
"output_size must have {} or {} elements (got {})"

0 commit comments

Comments
 (0)