diff --git a/backends/test/suite/operators/test_avgpool1d.py b/backends/test/suite/operators/test_avgpool1d.py new file mode 100644 index 00000000000..0b2d001de01 --- /dev/null +++ b/backends/test/suite/operators/test_avgpool1d.py @@ -0,0 +1,155 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +from executorch.backends.test.suite.flow import TestFlow + +from executorch.backends.test.suite.operators import ( + dtype_test, + operator_test, + OperatorTest, +) + + +class Model(torch.nn.Module): + def __init__( + self, + kernel_size=3, + stride=None, + padding=0, + ceil_mode=False, + count_include_pad=True, + ): + super().__init__() + self.avgpool = torch.nn.AvgPool1d( + kernel_size=kernel_size, + stride=stride, + padding=padding, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + ) + + def forward(self, x): + return self.avgpool(x) + + +@operator_test +class AvgPool1d(OperatorTest): + @dtype_test + def test_avgpool1d_dtype(self, flow: TestFlow, dtype) -> None: + # Input shape: (batch_size, channels, length) + self._test_op( + Model().to(dtype), + ((torch.rand(1, 8, 100) * 10).to(dtype),), + flow, + ) + + def test_avgpool1d_kernel_size(self, flow: TestFlow) -> None: + # Test with different kernel sizes + self._test_op( + Model(kernel_size=1), + (torch.randn(1, 8, 100),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(1, 8, 100),), + flow, + ) + + def test_avgpool1d_stride(self, flow: TestFlow) -> None: + # Test with different stride values + self._test_op( + Model(stride=2), + (torch.randn(1, 8, 100),), + flow, + ) + self._test_op( + Model(stride=3), + (torch.randn(1, 8, 100),), + flow, + ) + + def test_avgpool1d_padding(self, flow: TestFlow) -> None: + # Test with different padding values + self._test_op( + Model(padding=1), + (torch.randn(1, 8, 100),), + flow, + ) + self._test_op( + Model(padding=2), + (torch.randn(1, 8, 100),), + flow, + ) + + def test_avgpool1d_ceil_mode(self, flow: TestFlow) -> None: + # Test with ceil_mode=True + self._test_op( + Model(ceil_mode=True), + (torch.randn(1, 8, 100),), + flow, + ) + + def test_avgpool1d_count_include_pad(self, flow: TestFlow) -> None: + # Test with count_include_pad=False + self._test_op( + Model(padding=1, count_include_pad=False), + (torch.randn(1, 8, 100),), + flow, + ) + + def test_avgpool1d_batch_sizes(self, flow: TestFlow) -> None: + # Test with batch inputs + self._test_op( + Model(), + (torch.randn(2, 8, 100),), + flow, + ) + self._test_op( + Model(), + (torch.randn(8, 8, 100),), + flow, + ) + self._test_op( + Model(), + (torch.randn(16, 8, 100),), + flow, + ) + + def test_avgpool1d_input_sizes(self, flow: TestFlow) -> None: + # Test with different input sizes + self._test_op( + Model(), + (torch.randn(1, 4, 100),), + flow, + ) + self._test_op( + Model(), + (torch.randn(1, 16, 100),), + flow, + ) + + def test_avgpool1d_combinations(self, flow: TestFlow) -> None: + # Test with combinations of parameters + self._test_op( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 8, 100),), + flow, + ) + self._test_op( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 8, 100),), + flow, + ) + self._test_op( + Model(kernel_size=2, stride=2, padding=1, count_include_pad=False), + (torch.randn(1, 8, 100),), + flow, + ) diff --git a/backends/test/suite/operators/test_avgpool2d.py b/backends/test/suite/operators/test_avgpool2d.py new file mode 100644 index 00000000000..97bcb00372a --- /dev/null +++ b/backends/test/suite/operators/test_avgpool2d.py @@ -0,0 +1,168 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +from executorch.backends.test.suite.flow import TestFlow + +from executorch.backends.test.suite.operators import ( + dtype_test, + operator_test, + OperatorTest, +) + + +class Model(torch.nn.Module): + def __init__( + self, + kernel_size=3, + stride=None, + padding=0, + ceil_mode=False, + count_include_pad=True, + ): + super().__init__() + + # Create the avgpool layer with the given parameters + # torch.nn.AvgPool2d accepts both int and tuple types for kernel_size, stride, and padding + self.avgpool = torch.nn.AvgPool2d( + kernel_size=kernel_size, + stride=stride, + padding=padding, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + ) + + def forward(self, x): + return self.avgpool(x) + + +@operator_test +class AvgPool2d(OperatorTest): + @dtype_test + def test_avgpool2d_dtype(self, flow: TestFlow, dtype) -> None: + # Input shape: (batch_size, channels, height, width) + self._test_op( + Model().to(dtype), + ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), + flow, + ) + + def test_avgpool2d_kernel_size(self, flow: TestFlow) -> None: + # Test with different kernel sizes + self._test_op( + Model(kernel_size=1), + (torch.randn(1, 8, 20, 20),), + flow, + ) + self._test_op( + Model(kernel_size=5), + (torch.randn(1, 8, 20, 20),), + flow, + ) + self._test_op( + Model(kernel_size=(3, 2)), + (torch.randn(1, 8, 20, 20),), + flow, + ) + + def test_avgpool2d_stride(self, flow: TestFlow) -> None: + # Test with different stride values + self._test_op( + Model(stride=2), + (torch.randn(1, 8, 20, 20),), + flow, + ) + self._test_op( + Model(stride=(2, 1)), + (torch.randn(1, 8, 20, 20),), + flow, + ) + + def test_avgpool2d_padding(self, flow: TestFlow) -> None: + # Test with different padding values + self._test_op( + Model(padding=1), + (torch.randn(1, 8, 20, 20),), + flow, + ) + self._test_op( + Model(padding=(1, 2)), + (torch.randn(1, 8, 20, 20),), + flow, + ) + + def test_avgpool2d_ceil_mode(self, flow: TestFlow) -> None: + # Test with ceil_mode=True + self._test_op( + Model(ceil_mode=True), + (torch.randn(1, 8, 20, 20),), + flow, + ) + + def test_avgpool2d_count_include_pad(self, flow: TestFlow) -> None: + # Test with count_include_pad=False + self._test_op( + Model(padding=1, count_include_pad=False), + (torch.randn(1, 8, 20, 20),), + flow, + ) + + def test_avgpool2d_batch_sizes(self, flow: TestFlow) -> None: + # Test with batch inputs + self._test_op( + Model(), + (torch.randn(2, 8, 20, 20),), + flow, + ) + self._test_op( + Model(), + (torch.randn(8, 8, 20, 20),), + flow, + ) + self._test_op( + Model(), + (torch.randn(16, 8, 20, 20),), + flow, + ) + + def test_avgpool2d_input_sizes(self, flow: TestFlow) -> None: + # Test with different input sizes + self._test_op( + Model(), + (torch.randn(1, 4, 20, 20),), + flow, + ) + self._test_op( + Model(), + (torch.randn(1, 16, 20, 20),), + flow, + ) + + def test_avgpool2d_combinations(self, flow: TestFlow) -> None: + # Test with combinations of parameters + self._test_op( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 8, 20, 20),), + flow, + ) + self._test_op( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 8, 21, 21),), + flow, + ) + self._test_op( + Model( + kernel_size=(2, 3), + stride=(2, 1), + padding=(1, 0), + count_include_pad=False, + ), + (torch.randn(1, 8, 20, 20),), + flow, + ) diff --git a/backends/test/suite/operators/test_avgpool3d.py b/backends/test/suite/operators/test_avgpool3d.py new file mode 100644 index 00000000000..9e9b05907bc --- /dev/null +++ b/backends/test/suite/operators/test_avgpool3d.py @@ -0,0 +1,163 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +from executorch.backends.test.suite.flow import TestFlow + +from executorch.backends.test.suite.operators import ( + dtype_test, + operator_test, + OperatorTest, +) + + +class Model(torch.nn.Module): + def __init__( + self, + kernel_size=3, + stride=None, + padding=0, + ceil_mode=False, + count_include_pad=True, + ): + super().__init__() + + # Create the avgpool layer with the given parameters + # torch.nn.AvgPool3d accepts both int and tuple types for kernel_size, stride, and padding + self.avgpool = torch.nn.AvgPool3d( + kernel_size=kernel_size, + stride=stride, + padding=padding, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + ) + + def forward(self, x): + return self.avgpool(x) + + +@operator_test +class AvgPool3d(OperatorTest): + @dtype_test + def test_avgpool3d_dtype(self, flow: TestFlow, dtype) -> None: + # Input shape: (batch_size, channels, depth, height, width) + self._test_op( + Model().to(dtype), + ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), + flow, + ) + + def test_avgpool3d_kernel_size(self, flow: TestFlow) -> None: + # Test with different kernel sizes + self._test_op( + Model(kernel_size=1), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=(1, 2, 2)), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + + def test_avgpool3d_stride(self, flow: TestFlow) -> None: + # Test with different stride values + self._test_op( + Model(stride=2), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + self._test_op( + Model(stride=(1, 2, 2)), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + + def test_avgpool3d_padding(self, flow: TestFlow) -> None: + # Test with different padding values + self._test_op( + Model(padding=1), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + self._test_op( + Model(padding=(0, 1, 1)), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + + def test_avgpool3d_ceil_mode(self, flow: TestFlow) -> None: + # Test with ceil_mode=True + self._test_op( + Model(ceil_mode=True), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + + def test_avgpool3d_count_include_pad(self, flow: TestFlow) -> None: + # Test with count_include_pad=False + self._test_op( + Model(padding=1, count_include_pad=False), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + + def test_avgpool3d_batch_sizes(self, flow: TestFlow) -> None: + # Test with batch inputs + self._test_op( + Model(), + (torch.randn(2, 4, 8, 8, 8),), + flow, + ) + self._test_op( + Model(), + (torch.randn(8, 4, 8, 8, 8),), + flow, + ) + self._test_op( + Model(), + (torch.randn(16, 4, 8, 8, 8),), + flow, + ) + + def test_avgpool3d_input_sizes(self, flow: TestFlow) -> None: + # Test with different input sizes + self._test_op( + Model(), + (torch.randn(1, 2, 8, 8, 8),), + flow, + ) + self._test_op( + Model(), + (torch.randn(1, 8, 8, 8, 8),), + flow, + ) + + def test_avgpool3d_combinations(self, flow: TestFlow) -> None: + # Test with combinations of parameters + self._test_op( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 4, 8, 8, 8),), + flow, + ) + self._test_op( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 4, 10, 10, 10),), + flow, + ) + self._test_op( + Model( + kernel_size=(2, 2, 2), + stride=(1, 2, 2), + padding=(0, 1, 1), + count_include_pad=False, + ), + (torch.randn(1, 4, 8, 10, 10),), + flow, + ) diff --git a/backends/test/suite/operators/test_lstm.py b/backends/test/suite/operators/test_lstm.py new file mode 100644 index 00000000000..91dd73c9052 --- /dev/null +++ b/backends/test/suite/operators/test_lstm.py @@ -0,0 +1,208 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +from executorch.backends.test.suite.flow import TestFlow + +from executorch.backends.test.suite.operators import ( + dtype_test, + operator_test, + OperatorTest, +) + + +class Model(torch.nn.Module): + def __init__( + self, + input_size=64, + hidden_size=32, + num_layers=1, + bias=True, + batch_first=True, + dropout=0.0, + bidirectional=False, + ): + super().__init__() + self.lstm = torch.nn.LSTM( + input_size=input_size, + hidden_size=hidden_size, + num_layers=num_layers, + bias=bias, + batch_first=batch_first, + dropout=dropout, + bidirectional=bidirectional, + ) + + def forward(self, x): + return self.lstm(x)[0] # Return only the output, not the hidden states + + +@operator_test +class LSTM(OperatorTest): + @dtype_test + def test_lstm_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model(num_layers=2).to(dtype), + ((torch.rand(1, 10, 64) * 10).to(dtype),), # (batch=1, seq_len, input_size) + flow, + ) + + @dtype_test + def test_lstm_no_bias_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op( + Model(num_layers=2, bias=False).to(dtype), + ((torch.rand(1, 10, 64) * 10).to(dtype),), + flow, + ) + + def test_lstm_feature_sizes(self, flow: TestFlow) -> None: + self._test_op( + Model(input_size=32, hidden_size=16), + (torch.randn(1, 8, 32),), # (batch=1, seq_len, input_size) + flow, + ) + self._test_op( + Model(input_size=128, hidden_size=64), + (torch.randn(1, 12, 128),), + flow, + ) + self._test_op( + Model(input_size=256, hidden_size=128), + (torch.randn(1, 6, 256),), + flow, + ) + self._test_op( + Model(input_size=16, hidden_size=32), + (torch.randn(1, 5, 16),), + flow, + ) + + def test_lstm_batch_sizes(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(8, 10, 64),), + flow, + ) + self._test_op( + Model(), + (torch.randn(32, 10, 64),), + flow, + ) + self._test_op( + Model(), + (torch.randn(100, 10, 64),), + flow, + ) + + def test_lstm_seq_lengths(self, flow: TestFlow) -> None: + self._test_op( + Model(), + (torch.randn(1, 5, 64),), + flow, + ) + self._test_op( + Model(), + (torch.randn(1, 20, 64),), + flow, + ) + self._test_op( + Model(), + (torch.randn(1, 50, 64),), + flow, + ) + + def test_lstm_batch_first_false(self, flow: TestFlow) -> None: + self._test_op( + Model(batch_first=False), + (torch.randn(10, 1, 64),), # (seq_len, batch=1, input_size) + flow, + ) + + def test_lstm_num_layers(self, flow: TestFlow) -> None: + self._test_op( + Model(num_layers=2), + (torch.randn(1, 10, 64),), + flow, + ) + self._test_op( + Model(num_layers=3), + (torch.randn(1, 10, 64),), + flow, + ) + + def test_lstm_bidirectional(self, flow: TestFlow) -> None: + self._test_op( + Model(bidirectional=True), + (torch.randn(1, 10, 64),), + flow, + ) + + def test_lstm_with_dropout(self, flow: TestFlow) -> None: + # Note: Dropout is only effective with num_layers > 1 + self._test_op( + Model(num_layers=2, dropout=0.2), + (torch.randn(1, 10, 64),), + flow, + ) + + def test_lstm_with_initial_states(self, flow: TestFlow) -> None: + # Create a model that accepts initial states + class ModelWithStates(torch.nn.Module): + def __init__(self): + super().__init__() + self.lstm = torch.nn.LSTM( + input_size=64, + hidden_size=32, + num_layers=2, + batch_first=True, + ) + + def forward(self, x, h0, c0): + return self.lstm(x, (h0, c0))[0] # Return only the output + + batch_size = 1 + num_layers = 2 + hidden_size = 32 + + self._test_op( + ModelWithStates(), + ( + torch.randn(batch_size, 10, 64), # input + torch.randn(num_layers, batch_size, hidden_size), # h0 + torch.randn(num_layers, batch_size, hidden_size), # c0 + ), + flow, + ) + + def test_lstm_return_hidden_states(self, flow: TestFlow) -> None: + # Create a model that returns both output and hidden states + class ModelWithHiddenStates(torch.nn.Module): + def __init__(self): + super().__init__() + self.lstm = torch.nn.LSTM( + input_size=64, + hidden_size=32, + num_layers=2, + batch_first=True, + ) + + def forward(self, x): + # Return the complete output tuple: (output, (h_n, c_n)) + output, (h_n, c_n) = self.lstm(x) + return output, h_n, c_n + + batch_size = 1 + seq_len = 10 + input_size = 64 + + self._test_op( + ModelWithHiddenStates(), + (torch.randn(batch_size, seq_len, input_size),), + flow, + )