[GHA] Speed up PyTorch Layer unit tests (#20613)
* test * fixed tests * typo * fixed tests * rest of the tests * fixed rsub test * tmp fix * Revert "tmp fix" This reverts commit b8bf1e9492e13497895da488612c9a137ef840bc. * fixed test params * reset thirdparty/pugixml * Revert "fixed rsub test" This reverts commit 9b6be34b8666936e8124b6622fcc5185b640de92. * fixed typo * fixed test data * reset test_rsub * removed unused param * reverrted runner * simplified call * fixed random * changed logical to auto mode * Revert "fixed random" This reverts commit 8a4f20b24641144f823a7e1f1ff92038634acf32. * fixed test_all * replaced random_sample with randn * fixed rebase issue * reverted logical splitting * Update tests/layer_tests/pytorch_tests/test_repeat_interleave.py Co-authored-by: Maxim Vafin <maxim.vafin@intel.com> * Update tests/layer_tests/pytorch_tests/test_all.py Co-authored-by: Maxim Vafin <maxim.vafin@intel.com> * Apply suggestions from code review Co-authored-by: Maxim Vafin <maxim.vafin@intel.com> * fixed merge conflict --------- Co-authored-by: Maxim Vafin <maxim.vafin@intel.com>
This commit is contained in:
parent
3a67d2a722
commit
4078bd9c19
6
.github/workflows/linux.yml
vendored
6
.github/workflows/linux.yml
vendored
@ -931,6 +931,8 @@ jobs:
|
||||
|
||||
- name: Install Python API tests dependencies
|
||||
run: |
|
||||
# To enable pytest parallel features
|
||||
python3 -m pip install pytest-xdist[psutil]
|
||||
# For torchvision to OpenVINO preprocessing converter
|
||||
python3 -m pip install -r ${INSTALL_TEST_DIR}/python/preprocess/torchvision/requirements.txt
|
||||
|
||||
@ -1006,7 +1008,7 @@ jobs:
|
||||
python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/py_frontend_tests --junitxml=${INSTALL_TEST_DIR}/TEST-test_py_fontend.xml
|
||||
|
||||
- name: PyTorch Layer Tests
|
||||
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml
|
||||
run: python3 -m pytest ${LAYER_TESTS_INSTALL_DIR}/pytorch_tests -n logical -m precommit --junitxml=${INSTALL_TEST_DIR}/TEST-pytorch.xml
|
||||
env:
|
||||
TEST_DEVICE: CPU
|
||||
TEST_PRECISION: FP16
|
||||
@ -1327,7 +1329,7 @@ jobs:
|
||||
- name: PyTorch Models Tests
|
||||
run: |
|
||||
export PYTHONPATH=${MODEL_HUB_TESTS_INSTALL_DIR}:$PYTHONPATH
|
||||
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests/ -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v
|
||||
python3 -m pytest ${MODEL_HUB_TESTS_INSTALL_DIR}/torch_tests -m ${TYPE} --html=${INSTALL_TEST_DIR}/TEST-torch_model_tests.html --self-contained-html -v
|
||||
env:
|
||||
TYPE: ${{ github.event_name == 'schedule' && 'nightly' || 'precommit'}}
|
||||
TEST_DEVICE: CPU
|
||||
|
@ -8,13 +8,12 @@ import torch
|
||||
from pytorch_layer_test_class import PytorchLayerTest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', (np.random.randn(1, 2, 8, 9, 10).astype(np.float32),
|
||||
np.random.randn(2, 8, 9, 10).astype(np.float32)))
|
||||
@pytest.mark.parametrize('output_size', ([5, 7, 9], 7))
|
||||
@pytest.mark.parametrize('input_tensor', [[1, 2, 8, 9, 10], [2, 8, 9, 10]])
|
||||
@pytest.mark.parametrize('output_size', [[5, 7, 9], 7])
|
||||
class TestAdaptiveAvgPool3D(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
return (self.input_tensor,)
|
||||
return (self.input_tensor, )
|
||||
|
||||
def create_model(self, output_size):
|
||||
class aten_adaptive_avg_pool3d(torch.nn.Module):
|
||||
@ -35,16 +34,16 @@ class TestAdaptiveAvgPool3D(PytorchLayerTest):
|
||||
@pytest.mark.precommit_ts_backend
|
||||
@pytest.mark.precommit_fx_backend
|
||||
def test_adaptive_avg_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size):
|
||||
self.input_tensor = input_tensor
|
||||
self.input_tensor = np.random.randn(*input_tensor).astype(np.float32)
|
||||
self._test(*self.create_model(output_size), ie_device, precision, ir_version)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', [np.random.randn(2, 8, 9, 10).astype(np.float32), np.random.randn(8, 9, 10).astype(np.float32)])
|
||||
@pytest.mark.parametrize('output_size', ([7, 9], 7))
|
||||
@pytest.mark.parametrize('input_shape', [[2, 8, 9, 10], [8, 9, 10]])
|
||||
@pytest.mark.parametrize('output_size', [[7, 9], 7])
|
||||
class TestAdaptiveAvgPool2D(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
return (self.input_tensor,)
|
||||
return (self.input_tensor, )
|
||||
|
||||
def create_model(self, output_size):
|
||||
class aten_adaptive_avg_pool2d(torch.nn.Module):
|
||||
@ -64,17 +63,17 @@ class TestAdaptiveAvgPool2D(PytorchLayerTest):
|
||||
@pytest.mark.precommit
|
||||
@pytest.mark.precommit_ts_backend
|
||||
@pytest.mark.precommit_fx_backend
|
||||
def test_adaptive_avg_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size):
|
||||
self.input_tensor = input_tensor
|
||||
def test_adaptive_avg_pool2d(self, ie_device, precision, ir_version, input_shape, output_size):
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self._test(*self.create_model(output_size), ie_device, precision, ir_version)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', [np.random.randn(8, 9, 10).astype(np.float32), np.random.randn(9, 10).astype(np.float32)] )
|
||||
@pytest.mark.parametrize('output_size', ( 7, ))
|
||||
@pytest.mark.parametrize('input_shape', [[8, 9, 10], [9, 10]])
|
||||
@pytest.mark.parametrize('output_size', [7, ])
|
||||
class TestAdaptiveAvgPool1D(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
return (self.input_tensor,)
|
||||
return (self.input_tensor, )
|
||||
|
||||
def create_model(self, output_size):
|
||||
class aten_adaptive_avg_pool1d(torch.nn.Module):
|
||||
@ -94,8 +93,8 @@ class TestAdaptiveAvgPool1D(PytorchLayerTest):
|
||||
@pytest.mark.precommit
|
||||
@pytest.mark.precommit_ts_backend
|
||||
@pytest.mark.precommit_fx_backend
|
||||
def test_adaptive_avg_pool1d(self, ie_device, precision, ir_version, input_tensor, output_size):
|
||||
self.input_tensor = input_tensor
|
||||
def test_adaptive_avg_pool1d(self, ie_device, precision, ir_version, input_shape, output_size):
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self._test(*self.create_model(output_size), ie_device, precision, ir_version)
|
||||
|
||||
|
||||
|
@ -34,11 +34,9 @@ class TestAdaptiveMaxPool3D(PytorchLayerTest):
|
||||
|
||||
return aten_adaptive_max_pool3d(output_size, return_indices), ref_net, "aten::adaptive_max_pool3d"
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', ([
|
||||
np.random.randn(2, 1, 1, 4, 4).astype(np.float32),
|
||||
np.random.randn(4, 1, 3, 32, 32).astype(np.float32),
|
||||
np.random.randn(1, 3, 32, 32).astype(np.float32)
|
||||
]))
|
||||
@pytest.mark.parametrize('input_shape', [[2, 1, 1, 4, 4],
|
||||
[4, 1, 3, 32, 32],
|
||||
[1, 3, 32, 32]])
|
||||
@pytest.mark.parametrize('output_size', ([
|
||||
[2, 2, 2],
|
||||
[4, 4, 4],
|
||||
@ -53,8 +51,8 @@ class TestAdaptiveMaxPool3D(PytorchLayerTest):
|
||||
@pytest.mark.precommit_fx_backend
|
||||
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
|
||||
reason='Ticket - 122715')
|
||||
def test_adaptive_max_pool3d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices):
|
||||
self.input_tensor = input_tensor
|
||||
def test_adaptive_max_pool3d(self, ie_device, precision, ir_version, input_shape, output_size, return_indices):
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version)
|
||||
|
||||
|
||||
@ -81,11 +79,9 @@ class TestAdaptiveMaxPool2D(PytorchLayerTest):
|
||||
|
||||
return aten_adaptive_max_pool2d(output_size, return_indices), ref_net, "aten::adaptive_max_pool2d"
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', ([
|
||||
np.random.randn(2, 1, 4, 4).astype(np.float32),
|
||||
np.random.randn(1, 3, 32, 32).astype(np.float32),
|
||||
np.random.randn(3, 32, 32).astype(np.float32)
|
||||
]))
|
||||
@pytest.mark.parametrize('input_shape', [[2, 1, 4, 4],
|
||||
[1, 3, 32, 32],
|
||||
[3, 32, 32]])
|
||||
@pytest.mark.parametrize('output_size', ([
|
||||
[2, 2],
|
||||
[4, 4],
|
||||
@ -100,8 +96,8 @@ class TestAdaptiveMaxPool2D(PytorchLayerTest):
|
||||
@pytest.mark.precommit_fx_backend
|
||||
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
|
||||
reason='Ticket - 122715')
|
||||
def test_adaptive_max_pool2d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices):
|
||||
self.input_tensor = input_tensor
|
||||
def test_adaptive_max_pool2d(self, ie_device, precision, ir_version, input_shape, output_size, return_indices):
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version)
|
||||
|
||||
|
||||
@ -128,11 +124,11 @@ class TestAdaptiveMaxPool1D(PytorchLayerTest):
|
||||
|
||||
return aten_adaptive_max_pool1d(output_size, return_indices), ref_net, "aten::adaptive_max_pool1d"
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', ([
|
||||
np.random.randn(1, 4, 4).astype(np.float32),
|
||||
np.random.randn(3, 32, 32).astype(np.float32),
|
||||
np.random.randn(16, 8).astype(np.float32),
|
||||
]))
|
||||
@pytest.mark.parametrize('input_shape', [
|
||||
[1, 4, 4],
|
||||
[3, 32, 32],
|
||||
[16, 8]
|
||||
])
|
||||
@pytest.mark.parametrize('output_size', ([
|
||||
2,
|
||||
4,
|
||||
@ -147,6 +143,6 @@ class TestAdaptiveMaxPool1D(PytorchLayerTest):
|
||||
@pytest.mark.precommit_fx_backend
|
||||
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
|
||||
reason='Ticket - 122715')
|
||||
def test_adaptive_max_pool1d(self, ie_device, precision, ir_version, input_tensor, output_size, return_indices):
|
||||
self.input_tensor = input_tensor
|
||||
def test_adaptive_max_pool1d(self, ie_device, precision, ir_version, input_shape, output_size, return_indices):
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self._test(*self.create_model(output_size, return_indices), ie_device, precision, ir_version)
|
@ -9,10 +9,11 @@ from pytorch_layer_test_class import PytorchLayerTest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('alpha', (-0.5, 0, 0.5, 1, 2))
|
||||
@pytest.mark.parametrize('input_rhs', (np.random.randn(2, 5, 3, 4).astype(np.float32),
|
||||
np.random.randn(
|
||||
1, 5, 3, 4).astype(np.float32),
|
||||
np.random.randn(1).astype(np.float32)))
|
||||
@pytest.mark.parametrize('input_shape_rhs', [
|
||||
[2, 5, 3, 4],
|
||||
[1, 5, 3, 4],
|
||||
[1]
|
||||
])
|
||||
class TestAdd(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
@ -41,8 +42,8 @@ class TestAdd(PytorchLayerTest):
|
||||
@pytest.mark.precommit_ts_backend
|
||||
@pytest.mark.precommit_fx_backend
|
||||
@pytest.mark.parametrize("op_type", ["add", "add_"])
|
||||
def test_add(self, ie_device, precision, ir_version, alpha, input_rhs, op_type):
|
||||
self.input_rhs = input_rhs
|
||||
def test_add(self, ie_device, precision, ir_version, alpha, input_shape_rhs, op_type):
|
||||
self.input_rhs = np.random.randn(*input_shape_rhs).astype(np.float32)
|
||||
self._test(*self.create_model(alpha, op_type), ie_device, precision, ir_version, use_convert_model=True)
|
||||
|
||||
|
||||
|
@ -24,10 +24,10 @@ class aten_all(torch.nn.Module):
|
||||
|
||||
def forward(self, input_tensor):
|
||||
return torch.all(
|
||||
input_tensor,
|
||||
input_tensor,
|
||||
dim = self.dim
|
||||
) if self.keepdim is None else torch.all(
|
||||
input_tensor,
|
||||
input_tensor,
|
||||
dim = self.dim,
|
||||
keepdim = self.keepdim
|
||||
)
|
||||
@ -36,32 +36,35 @@ class TestAll(PytorchLayerTest):
|
||||
def _prepare_input(self):
|
||||
return (self.input_tensor,)
|
||||
|
||||
@pytest.mark.parametrize("input_tensor", [
|
||||
np.eye(5,5),
|
||||
np.zeros((5, 5)),
|
||||
np.zeros((9,8)) + 1,
|
||||
np.random.randint(0, 2, (5, 9, 7)),
|
||||
np.random.randint(0, 2, (10, 13, 11)),
|
||||
np.random.randint(0, 2, (8, 7, 6, 5, 4)),
|
||||
np.random.randint(0, 2, (11, 11), dtype=np.uint8),
|
||||
np.random.randint(0, 2, (7, 7), dtype=np.uint8),
|
||||
@pytest.mark.parametrize("input_shape, d_type", [
|
||||
(np.eye(5,5), np.int64),
|
||||
(np.zeros((5, 5)), np.int64),
|
||||
(np.zeros((9,8)) + 1, np.int64),
|
||||
([5, 9, 7], np.int64),
|
||||
([10, 13, 11], np.int64),
|
||||
([8, 7, 6, 5, 4], np.int64),
|
||||
([11, 11], np.uint8),
|
||||
([7, 7], np.uint8)
|
||||
])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_all_noparams(self, input_tensor, ie_device, precision, ir_version):
|
||||
self.input_tensor = input_tensor
|
||||
self._test(aten_all_noparam(), None, "aten::all",
|
||||
def test_all_noparams(self, input_shape, d_type, ie_device, precision, ir_version):
|
||||
if type(input_shape) is list:
|
||||
self.input_tensor = np.random.randint(0, 2, input_shape, dtype=d_type)
|
||||
else:
|
||||
self.input_tensor = input_shape
|
||||
self._test(aten_all_noparam(), None, "aten::all",
|
||||
ie_device, precision, ir_version, trace_model=True, freeze_model=False)
|
||||
|
||||
@pytest.mark.parametrize("input_tensor", [
|
||||
np.eye(5,5),
|
||||
np.zeros((5, 5)),
|
||||
np.zeros((9,8)) + 1,
|
||||
np.random.randint(0, 2, (5, 9, 7)),
|
||||
np.random.randint(0, 2, (10, 13, 11)),
|
||||
np.random.randint(0, 2, (8, 7, 6, 5, 4)),
|
||||
np.random.randint(0, 2, (11, 11), dtype=np.uint8),
|
||||
np.random.randint(0, 2, (7, 7), dtype=np.uint8),
|
||||
|
||||
@pytest.mark.parametrize("input_shape, d_type", [
|
||||
(np.eye(5,5), np.int64),
|
||||
(np.zeros((5, 5)), np.int64),
|
||||
(np.zeros((9,8)) + 1, np.int64),
|
||||
([5, 9, 7], np.int64),
|
||||
([10, 13, 11], np.int64),
|
||||
([8, 7, 6, 5, 4], np.int64),
|
||||
([11, 11], np.uint8),
|
||||
([7, 7], np.uint8)
|
||||
])
|
||||
@pytest.mark.parametrize("keepdim", [
|
||||
True,
|
||||
@ -72,8 +75,11 @@ class TestAll(PytorchLayerTest):
|
||||
@pytest.mark.precommit
|
||||
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
|
||||
reason='Ticket - 122715')
|
||||
def test_all(self, input_tensor, keepdim, ie_device, precision, ir_version):
|
||||
self.input_tensor = input_tensor
|
||||
for dim in range(len(input_tensor.shape)):
|
||||
self._test(aten_all(dim, keepdim), None, "aten::all",
|
||||
def test_all(self, input_shape, d_type, keepdim, ie_device, precision, ir_version):
|
||||
if type(input_shape) is list:
|
||||
self.input_tensor = np.random.randint(0, 2, input_shape, dtype=d_type)
|
||||
else:
|
||||
self.input_tensor = input_shape
|
||||
for dim in range(len(self.input_tensor.shape)):
|
||||
self._test(aten_all(dim, keepdim), None, "aten::all",
|
||||
ie_device, precision, ir_version, trace_model=True, freeze_model=False)
|
||||
|
@ -22,24 +22,24 @@ class TestArgSort(PytorchLayerTest):
|
||||
|
||||
def forward(self, input_tensor):
|
||||
if self.stable is not None:
|
||||
return torch.argsort(input_tensor,
|
||||
dim = self.dim,
|
||||
descending = self.descending,
|
||||
return torch.argsort(input_tensor,
|
||||
dim = self.dim,
|
||||
descending = self.descending,
|
||||
stable = self.stable
|
||||
)
|
||||
else:
|
||||
return torch.argsort(input_tensor,
|
||||
dim = self.dim,
|
||||
return torch.argsort(input_tensor,
|
||||
dim = self.dim,
|
||||
descending = self.descending
|
||||
)
|
||||
)
|
||||
ref_net = None
|
||||
|
||||
return aten_argsort(dim, descending, stable), ref_net, "aten::argsort"
|
||||
|
||||
@pytest.mark.parametrize("tensor_stable_pair", [
|
||||
(np.random.rand(1, 4), False),
|
||||
(np.random.rand(4, 4), False),
|
||||
(np.random.rand(4, 4, 4), False),
|
||||
([1, 4], False),
|
||||
([4, 4], False),
|
||||
([4, 4, 4], False),
|
||||
(np.array([1, 2, 4, 6, 5, 8, 7]), False),
|
||||
(np.array([6, 5, 4, 2, 3, 0, 1]), False),
|
||||
(np.array([1, 1, 1, 2, 1, 3, 1, 4, 2, 5, 1, 2, 4, 4, 0]), True),
|
||||
@ -49,20 +49,20 @@ class TestArgSort(PytorchLayerTest):
|
||||
(np.array([[9, 8, 8], [8, 7, 7], [7, 5, 6],
|
||||
[8, 8, 9], [7, 7, 8], [6, 5, 7],
|
||||
[8, 9, 8], [7, 8, 7], [5, 6, 7]]), True),
|
||||
(np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
|
||||
[[5, 2, 4], [4, 9, 0], [7, 7, 9]],
|
||||
(np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
|
||||
[[5, 2, 4], [4, 9, 0], [7, 7, 9]],
|
||||
[[5, 2, 4], [4, 9, 0], [7, 7, 9]]]), True),
|
||||
(np.array([[[3, 2, 2], [1, 2, 1], [3, 2, 2]],
|
||||
[[1, 2, 1], [4, 3, 4], [3, 2, 2]],
|
||||
(np.array([[[3, 2, 2], [1, 2, 1], [3, 2, 2]],
|
||||
[[1, 2, 1], [4, 3, 4], [3, 2, 2]],
|
||||
[[3, 2, 2], [1, 2, 1], [7, 9, 9]]]), True),
|
||||
(np.array([[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
|
||||
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
|
||||
(np.array([[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
|
||||
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
|
||||
[[3, 2, 2], [3, 2, 1], [1, 2, 3]],
|
||||
[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
|
||||
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
|
||||
[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
|
||||
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
|
||||
[[3, 2, 2], [3, 2, 1], [1, 2, 3]],
|
||||
[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
|
||||
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
|
||||
[[2, 1, 3], [3, 2, 1], [1, 2, 3]],
|
||||
[[2, 0, 2], [1, 2, 1], [3, 2, 8]],
|
||||
[[3, 2, 2], [3, 2, 1], [1, 2, 3]]]), True)
|
||||
])
|
||||
@pytest.mark.parametrize("descending", [
|
||||
@ -72,7 +72,11 @@ class TestArgSort(PytorchLayerTest):
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_argsort(self, tensor_stable_pair, descending, ie_device, precision, ir_version):
|
||||
self.input_tensor, stable = tensor_stable_pair
|
||||
input_shape, stable = tensor_stable_pair
|
||||
if type(input_shape) is list:
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
else:
|
||||
self.input_tensor = input_shape
|
||||
dims = len(self.input_tensor.shape)
|
||||
for dim in range(-dims, dims):
|
||||
stable_values = [True] if stable else [True, False, None]
|
||||
|
@ -41,41 +41,59 @@ class TestFloorDivide(PytorchLayerTest):
|
||||
|
||||
return aten_floor_divide(), ref_net, "aten::floor_divide"
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', ([
|
||||
np.random.randn(5).astype(np.float32),
|
||||
np.random.randn(5, 5, 1).astype(np.float32),
|
||||
np.random.randn(1, 1, 5, 5).astype(np.float32),
|
||||
@pytest.mark.parametrize('input_tensor',
|
||||
([
|
||||
[5], [5, 5, 1], [1, 1, 5, 5],
|
||||
]))
|
||||
@pytest.mark.parametrize('other_tensor', ([
|
||||
np.array([[0.5]]).astype(np.float32),
|
||||
np.random.randn(5).astype(np.float32),
|
||||
np.random.randn(5, 1).astype(np.float32),
|
||||
np.random.randn(1, 5).astype(np.float32),
|
||||
@pytest.mark.parametrize('other_tensor',
|
||||
([
|
||||
np.array([[0.5]]).astype(np.float32), [5], [5, 1], [1, 5]
|
||||
]))
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
@pytest.mark.xfail(condition=platform.system() == 'Darwin' and platform.machine() == 'arm64',
|
||||
reason='Ticket - 122715')
|
||||
def test_floor_divide(self, input_tensor, other_tensor, ie_device, precision, ir_version):
|
||||
self.input_tensor = input_tensor
|
||||
self.other_tensor = other_tensor
|
||||
if type(input_tensor) is list:
|
||||
self.input_tensor = np.random.randn(*input_tensor).astype(np.float32)
|
||||
else:
|
||||
self.input_tensor = input_tensor
|
||||
if type(other_tensor) is list:
|
||||
self.other_tensor = np.random.randn(*other_tensor).astype(np.float32)
|
||||
else:
|
||||
self.other_tensor = other_tensor
|
||||
self._test(*self.create_model(), ie_device, precision, ir_version, trace_model=True, use_convert_model=True)
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', ([
|
||||
np.random.randint(low=0, high=10, size=5).astype(np.float32),
|
||||
np.random.randint(low=1, high=10, size=(5, 5, 1)).astype(np.float32),
|
||||
np.random.randint(low=1, high=10, size=(1, 1, 5, 5)).astype(np.float32),
|
||||
]))
|
||||
@pytest.mark.parametrize('other_tensor', ([
|
||||
np.array([[2]]).astype(np.float32),
|
||||
np.random.randint(low=1, high=10, size=5).astype(np.float32),
|
||||
np.random.randint(low=1, high=10, size=(5, 1)).astype(np.float32),
|
||||
np.random.randint(low=1, high=10, size=(1, 5)).astype(np.float32),
|
||||
]))
|
||||
@pytest.mark.parametrize('input_data',
|
||||
[
|
||||
{ "tensor": [5], "low": 0, "high": 10 },
|
||||
{ "tensor": [5, 5, 1], "low": 1, "high": 10 },
|
||||
{ "tensor": [1, 1, 5, 5], "low": 1, "high": 10 }
|
||||
])
|
||||
@pytest.mark.parametrize('other_data',
|
||||
[
|
||||
{ "tensor": np.array([[2]]).astype(np.float32) },
|
||||
{ "tensor": [5], "low": 1, "high": 10 },
|
||||
{ "tensor": [5, 1], "low": 1, "high": 10 },
|
||||
{ "tensor": [5, 1], "low": 1, "high": 10 }
|
||||
])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_floor_divide_int(self, input_tensor, other_tensor, ie_device, precision, ir_version):
|
||||
self.input_tensor = input_tensor
|
||||
self.other_tensor = other_tensor
|
||||
def test_floor_divide_int(self, input_data, other_data, ie_device, precision, ir_version):
|
||||
input_tensor = input_data["tensor"]
|
||||
if type(input_tensor) is list:
|
||||
self.input_tensor = np.random.randint(low=input_data["low"],
|
||||
high=input_data["high"],
|
||||
size=input_tensor).astype(np.float32)
|
||||
else:
|
||||
self.input_tensor = input_tensor
|
||||
|
||||
other_tensor = other_data["tensor"]
|
||||
if type(other_tensor) is list:
|
||||
self.other_tensor = np.random.randint(low=other_data["low"],
|
||||
high=other_data["high"],
|
||||
size=other_tensor).astype(np.float32)
|
||||
else:
|
||||
self.other_tensor = other_tensor
|
||||
self.create_model = self.create_model_int
|
||||
self._test(*self.create_model(), ie_device, precision, ir_version)
|
||||
|
@ -32,10 +32,10 @@ class TestIndexPut_SingleIndices(PytorchLayerTest):
|
||||
"input_data",
|
||||
(
|
||||
{
|
||||
"input_tensor": np.random.randn(5).astype(np.float32),
|
||||
"input_shape": [5],
|
||||
"values": np.array(11).astype(np.float32)},
|
||||
{
|
||||
"input_tensor": np.random.randn(3, 3).astype(np.float32),
|
||||
"input_shape": [3, 3],
|
||||
"values": np.array([10, 11, 12]).astype(np.float32),
|
||||
},
|
||||
),
|
||||
@ -54,7 +54,7 @@ class TestIndexPut_SingleIndices(PytorchLayerTest):
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_index_put_single_indices(self, ie_device, precision, ir_version, input_data, indices, accumulate):
|
||||
self.input_tensor = input_data["input_tensor"]
|
||||
self.input_tensor = np.random.randn(*input_data["input_shape"]).astype(np.float32)
|
||||
self.values = input_data["values"]
|
||||
self._test(*self.create_model(indices, accumulate), ie_device, precision, ir_version)
|
||||
|
||||
@ -83,11 +83,11 @@ class TestIndexPut_ManyIndices(PytorchLayerTest):
|
||||
"input_data",
|
||||
(
|
||||
{
|
||||
"input_tensor": np.random.randn(3, 3).astype(np.float32),
|
||||
"input_shape": [3, 3],
|
||||
"values": np.array(12).astype(np.float32)
|
||||
},
|
||||
{
|
||||
"input_tensor": np.random.randn(3, 3, 3).astype(np.float32),
|
||||
"input_shape": [3, 3, 3],
|
||||
"values": np.array([10, 11, 12]).astype(np.float32),
|
||||
},
|
||||
),
|
||||
@ -107,7 +107,7 @@ class TestIndexPut_ManyIndices(PytorchLayerTest):
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_index_put_many_indices(self, ie_device, precision, ir_version, input_data, indices, accumulate):
|
||||
self.input_tensor = input_data["input_tensor"]
|
||||
self.input_tensor = np.random.randn(*input_data["input_shape"]).astype(np.float32)
|
||||
self.values = input_data["values"]
|
||||
self._test(*self.create_model(indices, accumulate), ie_device, precision, ir_version)
|
||||
|
||||
@ -135,11 +135,11 @@ class TestNonZero_IndexPut(PytorchLayerTest):
|
||||
"input_data",
|
||||
(
|
||||
{
|
||||
"input_tensor": np.random.randn(3).astype(np.float32),
|
||||
"input_shape": [3],
|
||||
"values": np.array(11).astype(np.float32),
|
||||
},
|
||||
{
|
||||
"input_tensor": np.random.randn(3, 3).astype(np.float32),
|
||||
"input_shape": [3, 3],
|
||||
"values": np.array([10, 11, 12]).astype(np.float32),
|
||||
},
|
||||
),
|
||||
@ -147,19 +147,22 @@ class TestNonZero_IndexPut(PytorchLayerTest):
|
||||
@pytest.mark.parametrize(
|
||||
"indices",
|
||||
(
|
||||
(np.random.randint(low=0, high=2, size=(1,)), np.random.randint(low=0, high=2, size=(1,))),
|
||||
(np.random.randint(low=0, high=2, size=(2,)), np.random.randint(low=0, high=2, size=(2,))),
|
||||
(np.array([0, 1, 0]), np.array([1, 1, 0])),
|
||||
(np.ones(shape=(3,)), np.ones(shape=(3,))),
|
||||
(np.ones(shape=(3,)), np.zeros(shape=(3,))),
|
||||
[[1, ], [1, ]],
|
||||
[[2, ], [2, ]],
|
||||
[np.array([0, 1, 0]), np.array([1, 1, 0])],
|
||||
[np.ones(shape=(3,)), np.ones(shape=(3,))],
|
||||
[np.ones(shape=(3,)), np.zeros(shape=(3,))],
|
||||
),
|
||||
)
|
||||
@pytest.mark.parametrize("accumulate", (False, True))
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_nonzero_index_put_(self, ie_device, precision, ir_version, input_data, indices, accumulate):
|
||||
self.input_tensor = input_data["input_tensor"]
|
||||
self.input_tensor = np.random.randn(*input_data["input_shape"]).astype(np.float32)
|
||||
self.values = input_data["values"]
|
||||
for i in range(len(indices)):
|
||||
if type(indices[i]) is list:
|
||||
indices[i] = np.random.randint(0, 2, indices[i])
|
||||
self.indices_0 = indices[0]
|
||||
self.indices_1 = indices[1]
|
||||
self._test(*self.create_model(accumulate), ie_device, precision, ir_version, trace_model=True, use_convert_model=True)
|
||||
@ -167,7 +170,7 @@ class TestNonZero_IndexPut(PytorchLayerTest):
|
||||
class TestMask_IndexPut(PytorchLayerTest):
|
||||
def _prepare_input(self):
|
||||
return (np.random.randn(100, 5).astype(np.float32),np.random.randn(100, 5).astype(np.float32))
|
||||
|
||||
|
||||
def create_model(self):
|
||||
class aten_index_put_mask(torch.nn.Module):
|
||||
def forward(self, x, y):
|
||||
|
@ -8,8 +8,10 @@ import torch
|
||||
from pytorch_layer_test_class import PytorchLayerTest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', (np.random.randn(2, 1, 3), np.random.randn(3, 7),
|
||||
np.random.randn(1, 1, 4, 4)))
|
||||
@pytest.mark.parametrize('input_tensor',
|
||||
[
|
||||
[2, 1, 3], [3, 7], [1, 1, 4, 4]
|
||||
])
|
||||
class TestLen(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
@ -40,13 +42,13 @@ class TestLen(PytorchLayerTest):
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_len(self, ie_device, precision, ir_version, input_tensor):
|
||||
self.input_tensor = input_tensor
|
||||
self.input_tensor = np.random.randn(*input_tensor).astype(np.float32)
|
||||
self._test(*self.create_model(), ie_device, precision, ir_version)
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_len_int_list(self, ie_device, precision, ir_version, input_tensor):
|
||||
self.input_tensor = input_tensor
|
||||
self.input_tensor = np.random.randn(*input_tensor).astype(np.float32)
|
||||
self._test(*self.create_model_int_list(),
|
||||
ie_device, precision, ir_version, use_convert_model=True)
|
||||
|
||||
|
@ -23,8 +23,8 @@ class TestNarrow(PytorchLayerTest):
|
||||
|
||||
return aten_narrow(), ref_net, "aten::narrow"
|
||||
|
||||
@pytest.mark.parametrize("input_tensor", [
|
||||
np.random.randn(3, 3), np.random.randn(3, 4, 5)
|
||||
@pytest.mark.parametrize("input_shape", [
|
||||
[3, 3], [3, 4, 5]
|
||||
])
|
||||
@pytest.mark.parametrize("dim", [
|
||||
np.array(0).astype(np.int32), np.array(1).astype(np.int32), np.array(-1).astype(np.int32)
|
||||
@ -37,8 +37,8 @@ class TestNarrow(PytorchLayerTest):
|
||||
])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_narrow(self, input_tensor, dim, start, length, ie_device, precision, ir_version):
|
||||
self.input_tensor = input_tensor
|
||||
def test_narrow(self, input_shape, dim, start, length, ie_device, precision, ir_version):
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self.dim = dim
|
||||
self.start = start
|
||||
self.length = length
|
||||
|
@ -8,12 +8,12 @@ from pytorch_layer_test_class import PytorchLayerTest
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input_rhs",
|
||||
(
|
||||
np.random.randn(2, 5, 3, 4).astype(np.float32),
|
||||
np.random.randn(1, 5, 3, 4).astype(np.float32),
|
||||
np.random.randn(1).astype(np.float32),
|
||||
),
|
||||
"input_shape_rhs",
|
||||
[
|
||||
[2, 5, 3, 4],
|
||||
[1, 5, 3, 4],
|
||||
[1]
|
||||
]
|
||||
)
|
||||
class TestRemainder(PytorchLayerTest):
|
||||
def _prepare_input(self):
|
||||
@ -30,8 +30,8 @@ class TestRemainder(PytorchLayerTest):
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_remainder(self, ie_device, precision, ir_version, input_rhs):
|
||||
self.input_rhs = input_rhs
|
||||
def test_remainder(self, ie_device, precision, ir_version, input_shape_rhs):
|
||||
self.input_rhs = np.random.randn(*input_shape_rhs).astype(np.float32)
|
||||
self._test(*self.create_model(), ie_device, precision, ir_version, use_convert_model=True)
|
||||
|
||||
|
||||
|
@ -12,9 +12,8 @@ import torch
|
||||
{'repeats': 2, 'dim': 2},
|
||||
{'repeats': [2, 3], 'dim': 1},
|
||||
{'repeats': [3, 2, 1], 'dim': 3},
|
||||
{'repeats': [3, 2, 1], 'dim': 3},
|
||||
{'repeats': 2, 'dim': None},
|
||||
{'repeats': [random.randint(1, 5) for _ in range(36)], 'dim': None}))
|
||||
{'repeats': [36], 'dim': None}))
|
||||
class TestRepeatInterleaveConstRepeats(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
@ -39,6 +38,9 @@ class TestRepeatInterleaveConstRepeats(PytorchLayerTest):
|
||||
@pytest.mark.precommit
|
||||
def test_repeat_interleave_const_repeats(self, ie_device, precision, ir_version, input_data):
|
||||
repeats = input_data['repeats']
|
||||
if type(repeats) is list and len(repeats) == 1:
|
||||
repeats = [random.randint(1, 5) for _ in range(repeats[0])]
|
||||
|
||||
dim = input_data['dim']
|
||||
self._test(*self.create_model_const_repeat(repeats, dim),
|
||||
ie_device, precision, ir_version)
|
||||
|
@ -39,7 +39,9 @@ class TestROIAlign(PytorchLayerTest):
|
||||
return (torchvision_roi_align(output_size, spatial_scale, sampling_ratio, aligned),
|
||||
ref_net, "torchvision::roi_align")
|
||||
|
||||
@pytest.mark.parametrize('input_tensor', (np.random.randn(4, 5, 6, 7).astype(np.float32),))
|
||||
@pytest.mark.parametrize('input_shape', [
|
||||
[4, 5, 6, 7],
|
||||
])
|
||||
@pytest.mark.parametrize('boxes', (np.array([[1, 2, 2, 3, 3]]).astype(np.float32),
|
||||
np.array([[0, 1, 2, 5, 4],
|
||||
[2, 1, 2, 5, 4],
|
||||
@ -50,9 +52,9 @@ class TestROIAlign(PytorchLayerTest):
|
||||
@pytest.mark.parametrize('aligned', (True, False))
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_roi_align(self, ie_device, precision, ir_version, input_tensor, boxes, output_size,
|
||||
def test_roi_align(self, ie_device, precision, ir_version, input_shape, boxes, output_size,
|
||||
spatial_scale, sampling_ratio, aligned):
|
||||
self.input_tensor = input_tensor
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self.boxes = boxes
|
||||
self._test(*self.create_model(output_size, spatial_scale, sampling_ratio, aligned),
|
||||
ie_device, precision, ir_version, trace_model=True)
|
||||
|
@ -34,24 +34,34 @@ class TestRsub(PytorchLayerTest):
|
||||
|
||||
return model(), ref_net, "aten::rsub"
|
||||
|
||||
@pytest.mark.parametrize('input_data', [(np.random.randn(2, 3, 4).astype(np.float32),
|
||||
np.array(5).astype(np.float32),
|
||||
np.random.randn(1)),])
|
||||
|
||||
@pytest.mark.parametrize('input_data',
|
||||
[
|
||||
[[2, 3, 4], np.array(5).astype(np.float32), [1]]
|
||||
])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_rsub_f(self, ie_device, precision, ir_version, input_data):
|
||||
self.input_data = input_data
|
||||
def test_rsub(self, ie_device, precision, ir_version, input_data):
|
||||
self.input_data = []
|
||||
for input in input_data:
|
||||
if type(input) is list:
|
||||
self.input_data.append(np.random.randn(*input).astype(np.float32))
|
||||
else:
|
||||
self.input_data.append(input)
|
||||
self._test(*self.create_model(second_type="float"), ie_device, precision, ir_version, use_convert_model=True)
|
||||
|
||||
@pytest.mark.parametrize('input_data', [(np.random.randn(2, 3, 4).astype(np.float32),
|
||||
np.array(5).astype(int),
|
||||
np.random.randn(1)),])
|
||||
|
||||
@pytest.mark.parametrize('input_data',
|
||||
[
|
||||
[[2, 3, 4], np.array(5).astype(int), [1]]
|
||||
])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_rsub_i(self, ie_device, precision, ir_version, input_data):
|
||||
self.input_data = input_data
|
||||
def test_rsub(self, ie_device, precision, ir_version, input_data):
|
||||
self.input_data = []
|
||||
for input in input_data:
|
||||
if type(input) is list:
|
||||
self.input_data.append(np.random.randn(*input).astype(np.float32))
|
||||
else:
|
||||
self.input_data.append(input)
|
||||
self._test(*self.create_model(second_type="int"), ie_device, precision, ir_version, use_convert_model=True)
|
||||
|
||||
|
||||
|
@ -36,11 +36,11 @@ class TestSortConstants(PytorchLayerTest):
|
||||
ref_net = None
|
||||
return aten_sort(dim, descending, stable), ref_net, "aten::sort"
|
||||
|
||||
@pytest.mark.parametrize("input_tensor", [
|
||||
np.random.rand(16),
|
||||
np.random.rand(1, 4),
|
||||
np.random.rand(4, 4),
|
||||
np.random.rand(4, 4, 4),
|
||||
@pytest.mark.parametrize("input_shape", [
|
||||
[16],
|
||||
[1, 4],
|
||||
[4, 4],
|
||||
[4, 4, 4],
|
||||
np.array([1, 2, 4, 6, 5, 8, 7]),
|
||||
np.array([6, 5, 4, 2, 3, 0, 1]),
|
||||
np.array([1, 1, 1, 2, 1, 3, 1, 4, 2, 5, 1, 2, 4, 4, 0]),
|
||||
@ -78,9 +78,13 @@ class TestSortConstants(PytorchLayerTest):
|
||||
])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_sort(self, input_tensor, descending, stable, ie_device, precision, ir_version):
|
||||
self.input_tensor = input_tensor
|
||||
dims = len(input_tensor.shape)
|
||||
def test_sort(self, input_shape, descending, stable, ie_device, precision, ir_version):
|
||||
self.input_tensor = []
|
||||
if type(input_shape) is list:
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
else:
|
||||
self.input_tensor = input_shape
|
||||
dims = len(self.input_tensor.shape)
|
||||
for dim in range(-dims, dims):
|
||||
self._test(*self.create_model(dim, descending, stable),
|
||||
ie_device, precision, ir_version)
|
||||
|
@ -17,7 +17,7 @@ class TestStack2D(PytorchLayerTest):
|
||||
class aten_stack(torch.nn.Module):
|
||||
def __init__(self, dim):
|
||||
super(aten_stack, self).__init__()
|
||||
self.dim = dim
|
||||
self.dim = dim
|
||||
|
||||
def forward(self, x, y):
|
||||
inputs = [x, y]
|
||||
@ -27,18 +27,22 @@ class TestStack2D(PytorchLayerTest):
|
||||
|
||||
return aten_stack(dim), ref_net, "aten::stack"
|
||||
|
||||
@pytest.mark.parametrize("input_tensor", ([
|
||||
[np.random.rand(1, 3, 3), np.random.rand(1, 3, 3)],
|
||||
[np.random.rand(4, 4, 2), np.random.rand(4, 4, 2)],
|
||||
[np.random.rand(8, 1, 1, 9), np.random.rand(8, 1, 1, 9)]
|
||||
]))
|
||||
@pytest.mark.parametrize("input_shape",
|
||||
[
|
||||
[1, 3, 3],
|
||||
[4, 4, 2],
|
||||
[8, 1, 1, 9]
|
||||
])
|
||||
@pytest.mark.parametrize("dim", ([
|
||||
0, 1, 2,
|
||||
]))
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_stack2D(self, input_tensor, dim, ie_device, precision, ir_version):
|
||||
self.input_tensors = input_tensor
|
||||
def test_stack2D(self, input_shape, dim, ie_device, precision, ir_version):
|
||||
self.input_tensors = [
|
||||
np.random.randn(*input_shape).astype(np.float32),
|
||||
np.random.randn(*input_shape).astype(np.float32),
|
||||
]
|
||||
self._test(*self.create_model(dim), ie_device, precision, ir_version)
|
||||
|
||||
|
||||
@ -52,7 +56,7 @@ class TestStack3D(PytorchLayerTest):
|
||||
class aten_stack(torch.nn.Module):
|
||||
def __init__(self, dim):
|
||||
super(aten_stack, self).__init__()
|
||||
self.dim = dim
|
||||
self.dim = dim
|
||||
|
||||
def forward(self, x, y, z):
|
||||
inputs = [x, y, z]
|
||||
@ -62,16 +66,21 @@ class TestStack3D(PytorchLayerTest):
|
||||
|
||||
return aten_stack(dim), ref_net, "aten::stack"
|
||||
|
||||
@pytest.mark.parametrize("input_tensor", ([
|
||||
[np.random.rand(1, 3, 3), np.random.rand(1, 3, 3), np.random.rand(1, 3, 3)],
|
||||
[np.random.rand(4, 4, 2), np.random.rand(4, 4, 2), np.random.rand(4, 4, 2)],
|
||||
[np.random.rand(8, 1, 1, 9), np.random.rand(8, 1, 1, 9), np.random.rand(8, 1, 1, 9)]
|
||||
]))
|
||||
@pytest.mark.parametrize("input_shape",
|
||||
[
|
||||
[1, 3, 3],
|
||||
[4, 4, 2],
|
||||
[8, 1, 1, 9]
|
||||
])
|
||||
@pytest.mark.parametrize("dim", ([
|
||||
0, 1, 2,
|
||||
]))
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_stack3D(self, input_tensor, dim, ie_device, precision, ir_version):
|
||||
self.input_tensors = input_tensor
|
||||
def test_stack3D(self, input_shape, dim, ie_device, precision, ir_version):
|
||||
self.input_tensors = [
|
||||
np.random.randn(*input_shape).astype(np.float32),
|
||||
np.random.randn(*input_shape).astype(np.float32),
|
||||
np.random.randn(*input_shape).astype(np.float32)
|
||||
]
|
||||
self._test(*self.create_model(dim), ie_device, precision, ir_version)
|
||||
|
@ -37,19 +37,22 @@ class TestSub(PytorchLayerTest):
|
||||
|
||||
return aten_sub(inplace), ref_net, op_name
|
||||
|
||||
@pytest.mark.parametrize('input_data', [(np.random.randn(2, 3, 4).astype(np.float32),
|
||||
np.random.randn(
|
||||
2, 3, 4).astype(np.float32),
|
||||
np.random.randn(1)),
|
||||
(np.random.randn(4, 2, 3).astype(np.float32),
|
||||
np.random.randn(
|
||||
1, 2, 3).astype(np.float32),
|
||||
np.random.randn(1)), ])
|
||||
@pytest.mark.parametrize('input_shapes',
|
||||
[
|
||||
[
|
||||
[2, 3, 4], [2, 3, 4], [1]
|
||||
],
|
||||
[
|
||||
[4, 2, 3], [1, 2, 3], [1]
|
||||
]
|
||||
])
|
||||
@pytest.mark.parametrize("inplace", [True, False])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_sub(self, ie_device, precision, ir_version, input_data, inplace):
|
||||
self.input_data = input_data
|
||||
def test_sub(self, ie_device, precision, ir_version, input_shapes, inplace):
|
||||
self.input_data = []
|
||||
for input_shape in input_shapes:
|
||||
self.input_data.append(np.random.randn(*input_shape).astype(np.float32))
|
||||
self._test(*self.create_model(inplace), ie_device, precision, ir_version, use_convert_model=True)
|
||||
|
||||
|
||||
|
@ -32,9 +32,9 @@ class TestTopK(PytorchLayerTest):
|
||||
|
||||
return aten_topk(k, dim, largest, sort), ref_net, "aten::topk"
|
||||
|
||||
@pytest.mark.parametrize(("input_tensor"), [
|
||||
np.random.rand(7, 5, 5, 4),
|
||||
np.random.rand(5, 6, 6, 7, 8),
|
||||
@pytest.mark.parametrize(("input_shape"), [
|
||||
[7, 5, 5, 4],
|
||||
[5, 6, 6, 7, 8]
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize(("k"), [
|
||||
@ -62,6 +62,6 @@ class TestTopK(PytorchLayerTest):
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == 'true', reason="Ticket - 115085")
|
||||
def test_topK(self, input_tensor, k, dim, largest, sort, ie_device, precision, ir_version):
|
||||
self.input_tensor = input_tensor
|
||||
def test_topK(self, input_shape, k, dim, largest, sort, ie_device, precision, ir_version):
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self._test(*self.create_model(k, dim, largest, sort), ie_device, precision, ir_version)
|
||||
|
@ -7,6 +7,44 @@ import torch.nn.functional as F
|
||||
|
||||
from pytorch_layer_test_class import PytorchLayerTest
|
||||
|
||||
OPS = {
|
||||
"aten::rsqrt": torch.rsqrt,
|
||||
"aten::sqrt": torch.sqrt,
|
||||
"aten::exp": torch.exp,
|
||||
"aten::exp_": torch.exp_,
|
||||
"aten::relu": torch.relu,
|
||||
"aten::relu_": torch.relu_,
|
||||
"aten::ceil": torch.ceil,
|
||||
"aten::ceil_": torch.ceil_,
|
||||
"aten::floor": torch.floor,
|
||||
"aten::floor_": torch.floor_,
|
||||
"aten::sigmoid": torch.sigmoid,
|
||||
"aten::sigmoid_": torch.sigmoid_,
|
||||
"aten::cos": torch.cos,
|
||||
"aten::cos_": torch.cos_,
|
||||
"aten::sin": torch.sin,
|
||||
"aten::sin_": torch.sin_,
|
||||
"aten::tan": torch.tan,
|
||||
"aten::tan_": torch.tan_,
|
||||
"aten::cosh": torch.cosh,
|
||||
"aten::cosh_": torch.cosh_,
|
||||
"aten::sinh": torch.sinh,
|
||||
"aten::sinh_": torch.sinh_,
|
||||
"aten::tanh": torch.tanh,
|
||||
"aten::tanh_": torch.tanh_,
|
||||
"aten::acos": torch.acos,
|
||||
"aten::acos_": torch.acos_,
|
||||
"aten::asin": torch.asin,
|
||||
"aten::asin_": torch.asin_,
|
||||
"aten::atan": torch.atan,
|
||||
"aten::atan_": torch.atan_,
|
||||
"aten::acosh": torch.acosh,
|
||||
"aten::acosh_": torch.acosh_,
|
||||
"aten::asinh": torch.asinh,
|
||||
"aten::asinh_": torch.asinh_,
|
||||
"aten::atanh": torch.atanh,
|
||||
"aten::atanh_": torch.atanh_
|
||||
}
|
||||
|
||||
class unary_op_net(torch.nn.Module):
|
||||
def __init__(self, op, dtype):
|
||||
@ -29,60 +67,62 @@ class TestUnaryOp(PytorchLayerTest):
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64, torch.int8, torch.uint8, torch.int32, torch.int64])
|
||||
@pytest.mark.parametrize("op,op_type", [
|
||||
(torch.rsqrt, "aten::rsqrt"),
|
||||
(torch.sqrt, "aten::sqrt"),
|
||||
(torch.exp, "aten::exp"),
|
||||
(torch.relu, "aten::relu"),
|
||||
(torch.relu_, "aten::relu_"),
|
||||
(torch.ceil, "aten::ceil"),
|
||||
(torch.ceil_, "aten::ceil_"),
|
||||
(torch.floor, "aten::floor"),
|
||||
(torch.floor_, "aten::floor_"),
|
||||
(torch.sigmoid, "aten::sigmoid"),
|
||||
@pytest.mark.parametrize("op_type",
|
||||
[
|
||||
"aten::rsqrt",
|
||||
"aten::sqrt",
|
||||
"aten::exp",
|
||||
"aten::relu",
|
||||
"aten::relu_",
|
||||
"aten::ceil",
|
||||
"aten::ceil_",
|
||||
"aten::floor",
|
||||
"aten::floor_",
|
||||
"aten::sigmoid",
|
||||
# trigonometry
|
||||
(torch.cos, "aten::cos"),
|
||||
(torch.sin, "aten::sin"),
|
||||
(torch.tan, "aten::tan"),
|
||||
(torch.cosh, "aten::cosh"),
|
||||
(torch.sinh, "aten::sinh"),
|
||||
(torch.tanh, "aten::tanh"),
|
||||
(torch.acos, "aten::acos"),
|
||||
(torch.asin, "aten::asin"),
|
||||
(torch.atan, "aten::atan"),
|
||||
(torch.acosh, "aten::acosh"),
|
||||
(torch.asinh, "aten::asinh"),
|
||||
(torch.atanh, "aten::atanh"),
|
||||
"aten::cos",
|
||||
"aten::sin",
|
||||
"aten::tan",
|
||||
"aten::cosh",
|
||||
"aten::sinh",
|
||||
"aten::tanh",
|
||||
"aten::acos",
|
||||
"aten::asin",
|
||||
"aten::atan",
|
||||
"aten::acosh",
|
||||
"aten::asinh",
|
||||
"aten::atanh"
|
||||
])
|
||||
def test_unary_op(self, op, op_type, dtype, ie_device, precision, ir_version):
|
||||
def test_unary_op(self, op_type, dtype, ie_device, precision, ir_version):
|
||||
self.dtype = dtype
|
||||
self._test(unary_op_net(op, dtype), None, op_type,
|
||||
self._test(unary_op_net(OPS[op_type], dtype), None, op_type,
|
||||
ie_device, precision, ir_version)
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
@pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
|
||||
@pytest.mark.parametrize("op,op_type", [
|
||||
@pytest.mark.parametrize("op_type",
|
||||
[
|
||||
# some pytorch inplace ops do not support int
|
||||
(torch.exp_, "aten::exp_"),
|
||||
(torch.sigmoid_, "aten::sigmoid_"),
|
||||
"aten::exp_",
|
||||
"aten::sigmoid_",
|
||||
# trigonometry
|
||||
(torch.cos_, "aten::cos_"),
|
||||
(torch.sin_, "aten::sin_"),
|
||||
(torch.tan_, "aten::tan_"),
|
||||
(torch.cosh_, "aten::cosh_"),
|
||||
(torch.sinh_, "aten::sinh_"),
|
||||
(torch.tanh_, "aten::tanh_"),
|
||||
(torch.acos_, "aten::acos_"),
|
||||
(torch.asin_, "aten::asin_"),
|
||||
(torch.atan_, "aten::atan_"),
|
||||
(torch.acosh_, "aten::acosh_"),
|
||||
(torch.asinh_, "aten::asinh_"),
|
||||
(torch.atanh_, "aten::atanh_"),
|
||||
"aten::cos_",
|
||||
"aten::sin_",
|
||||
"aten::tan_",
|
||||
"aten::cosh_",
|
||||
"aten::sinh_",
|
||||
"aten::tanh_",
|
||||
"aten::acos_",
|
||||
"aten::asin_",
|
||||
"aten::atan_",
|
||||
"aten::acosh_",
|
||||
"aten::asinh_",
|
||||
"aten::atanh_"
|
||||
])
|
||||
def test_unary_op_float(self, op, op_type, dtype, ie_device, precision, ir_version):
|
||||
def test_unary_op_float(self, op_type, dtype, ie_device, precision, ir_version):
|
||||
self.dtype = dtype
|
||||
self._test(unary_op_net(op, dtype), None, op_type,
|
||||
self._test(unary_op_net(OPS[op_type], dtype), None, op_type,
|
||||
ie_device, precision, ir_version)
|
||||
|
||||
|
||||
|
@ -11,9 +11,10 @@ from pytorch_layer_test_class import PytorchLayerTest
|
||||
@pytest.mark.parametrize('dimension', (0, 1, 2))
|
||||
@pytest.mark.parametrize('size', (1, 2))
|
||||
@pytest.mark.parametrize('step', (1, 2, 3, 4))
|
||||
@pytest.mark.parametrize('input_tensor', (np.random.randn(2, 2, 5).astype(np.float32),
|
||||
np.random.randn(3, 3, 3, 3).astype(np.float32),
|
||||
np.random.randn(2, 3, 4, 5).astype(np.float32)))
|
||||
@pytest.mark.parametrize('input_shape',
|
||||
[
|
||||
[2, 2, 5], [3, 3, 3, 3], [2, 3, 4, 5]
|
||||
])
|
||||
class TestUnfold(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
@ -37,7 +38,7 @@ class TestUnfold(PytorchLayerTest):
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_unfold(self, ie_device, precision, ir_version, dimension, size, step, input_tensor):
|
||||
self.input_tensor = input_tensor
|
||||
def test_unfold(self, ie_device, precision, ir_version, dimension, size, step, input_shape):
|
||||
self.input_tensor = np.random.randn(*input_shape).astype(np.float32)
|
||||
self._test(*self.create_model(dimension, size, step),
|
||||
ie_device, precision, ir_version)
|
||||
|
@ -8,8 +8,15 @@ import torch
|
||||
from pytorch_layer_test_class import PytorchLayerTest
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input_data', [(np.random.randn(2, 3, 2), np.array(2), np.array(6)),
|
||||
(np.random.randn(4), np.array(2), np.array(2))])
|
||||
@pytest.mark.parametrize('input_shapes',
|
||||
[
|
||||
[
|
||||
[2, 3, 2], np.array(2), np.array(6)
|
||||
],
|
||||
[
|
||||
[4], np.array(2), np.array(2)
|
||||
]
|
||||
])
|
||||
class TestViewListConstruct(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
@ -27,11 +34,21 @@ class TestViewListConstruct(PytorchLayerTest):
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_view_list_construct(self, ie_device, precision, ir_version, input_data):
|
||||
self.input_data = input_data
|
||||
def test_view_list_construct(self, ie_device, precision, ir_version, input_shapes):
|
||||
self.input_data = []
|
||||
for input_shape in input_shapes:
|
||||
if type(input_shape) is list:
|
||||
self.input_data.append(np.random.randn(*input_shape).astype(np.float32))
|
||||
else:
|
||||
self.input_data.append(input_shape)
|
||||
self._test(*self.create_model(), ie_device, precision, ir_version)
|
||||
|
||||
@pytest.mark.parametrize('input_data', [(np.random.randn(4), np.array(2))])
|
||||
@pytest.mark.parametrize('input_shapes',
|
||||
[
|
||||
[
|
||||
[4], np.array(2)
|
||||
]
|
||||
])
|
||||
class TestViewDtype(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
@ -49,12 +66,22 @@ class TestViewDtype(PytorchLayerTest):
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_view_dtype(self, ie_device, precision, ir_version, input_data):
|
||||
self.input_data = input_data
|
||||
def test_view_dtype(self, ie_device, precision, ir_version, input_shapes):
|
||||
self.input_data = []
|
||||
for input_shape in input_shapes:
|
||||
if type(input_shape) is list:
|
||||
self.input_data.append(np.random.randn(*input_shape).astype(np.float32))
|
||||
else:
|
||||
self.input_data.append(input_shape)
|
||||
self._test(*self.create_model(), ie_device, precision, ir_version)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('input_data', [(np.random.randn(4), np.random.randn(2, 2))])
|
||||
@pytest.mark.parametrize('input_shapes',
|
||||
[
|
||||
[
|
||||
[4], [2, 2]
|
||||
]
|
||||
])
|
||||
class TestViewSize(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
@ -72,13 +99,27 @@ class TestViewSize(PytorchLayerTest):
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_view_size(self, ie_device, precision, ir_version, input_data):
|
||||
self.input_data = input_data
|
||||
def test_view_size(self, ie_device, precision, ir_version, input_shapes):
|
||||
self.input_data = []
|
||||
for input_shape in input_shapes:
|
||||
if type(input_shape) is list:
|
||||
self.input_data.append(np.random.randn(*input_shape).astype(np.float32))
|
||||
else:
|
||||
self.input_data.append(input_shape)
|
||||
self._test(*self.create_model(), ie_device, precision, ir_version)
|
||||
|
||||
@pytest.mark.parametrize('input_data', [(np.random.randn(2, 3, 2), 2, 6),
|
||||
(np.random.randn(4), 2, 2),
|
||||
(np.random.randn(4), 2, 2.1)])
|
||||
@pytest.mark.parametrize('input_shapes',
|
||||
[
|
||||
[
|
||||
[2, 3, 2], 2, 6
|
||||
],
|
||||
[
|
||||
[4], 2, 2
|
||||
],
|
||||
[
|
||||
[4], 2, 2.1
|
||||
]
|
||||
])
|
||||
class TestView(PytorchLayerTest):
|
||||
|
||||
def _prepare_input(self):
|
||||
@ -101,6 +142,11 @@ class TestView(PytorchLayerTest):
|
||||
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_view(self, ie_device, precision, ir_version, input_data):
|
||||
self.input_data = input_data
|
||||
def test_view(self, ie_device, precision, ir_version, input_shapes):
|
||||
self.input_data = []
|
||||
for input_shape in input_shapes:
|
||||
if type(input_shape) is list:
|
||||
self.input_data.append(np.random.randn(*input_shape).astype(np.float32))
|
||||
else:
|
||||
self.input_data.append(input_shape)
|
||||
self._test(*self.create_model(), ie_device, precision, ir_version)
|
||||
|
Loading…
Reference in New Issue
Block a user