[PT FE] Fix failing translation of aten::index_put_ (#16140)

* Initial commit

* Fix for reading processed list

* Format code

* Cleanup

* cleanup

* Cleanup

* cleanup test

* Add comment

* Add rt_info

* fix type

* Update src/frontends/pytorch/src/transforms/aten_index_put_replacer.cpp

Co-authored-by: Maxim Vafin <maxim.vafin@intel.com>

---------

Co-authored-by: Andrei Kochin <andrei.kochin@intel.com>
Co-authored-by: Maxim Vafin <maxim.vafin@intel.com>
This commit is contained in:
Mateusz Mikolajczyk
2023-03-09 21:14:58 +01:00
committed by GitHub
parent 654f3d988f
commit 31489931cf
6 changed files with 301 additions and 144 deletions

View File

@@ -3,6 +3,7 @@
import itertools
import warnings
from copy import deepcopy
import numpy as np
from common.constants import test_device, test_precision
@@ -51,7 +52,7 @@ class PytorchLayerTest:
model = torch.jit.script(model)
else:
torch_inputs = [torch.from_numpy(inp) for inp in inputs]
model = torch.jit.trace(model, torch_inputs)
model = torch.jit.trace(model, deepcopy(torch_inputs))
if kwargs.get('freeze_model', True):
model = torch.jit.freeze(model)
graph = model.inlined_graph
@@ -91,14 +92,14 @@ class PytorchLayerTest:
# OV infer:
core = Core()
compiled = core.compile_model(om, ie_device)
infer_res = compiled(inputs)
infer_res = compiled(deepcopy(inputs))
if hasattr(self, 'skip_framework') and self.skip_framework:
warnings.warn('Framework is skipped')
return
# Framework infer:
fw_res = model(*torch_inps)
fw_res = model(*deepcopy(torch_inps))
if not isinstance(fw_res, (tuple)):
fw_res = (fw_res,)

View File

@@ -14,9 +14,7 @@ class TestIndexPut_SingleIndices(PytorchLayerTest):
return (self.input_tensor, self.values)
def create_model(self, indices, accumulate):
class aten_index_put_(torch.nn.Module):
def __init__(self, indices, accumulate):
super().__init__()
self.indices = indices
@@ -30,31 +28,43 @@ class TestIndexPut_SingleIndices(PytorchLayerTest):
return aten_index_put_(indices, accumulate), ref_net, "aten::index_put_"
@pytest.mark.parametrize('input_data', ({'input_tensor': np.random.randn(5).astype(np.float32),
'values': np.array(11).astype(np.float32)},
{'input_tensor': np.random.randn(3, 3).astype(np.float32),
'values': np.array([10, 11, 12]).astype(np.float32)}))
@pytest.mark.parametrize('indices', (torch.tensor([0], dtype=torch.long),
torch.tensor([-1, -2], dtype=torch.long),
torch.tensor([0, -1, -2], dtype=torch.long),
torch.tensor([1, 2], dtype=torch.long),
torch.tensor([0, 1, 2], dtype=torch.long)))
@pytest.mark.parametrize('accumulate', (True, False))
@pytest.mark.parametrize(
"input_data",
(
{
"input_tensor": np.random.randn(5).astype(np.float32),
"values": np.array(11).astype(np.float32)},
{
"input_tensor": np.random.randn(3, 3).astype(np.float32),
"values": np.array([10, 11, 12]).astype(np.float32),
},
),
)
@pytest.mark.parametrize(
"indices",
(
torch.tensor([0], dtype=torch.long),
torch.tensor([-1, -2], dtype=torch.long),
torch.tensor([0, -1, -2], dtype=torch.long),
torch.tensor([1, 2], dtype=torch.long),
torch.tensor([0, 1, 2], dtype=torch.long),
),
)
@pytest.mark.parametrize("accumulate", (True, False))
@pytest.mark.nightly
@pytest.mark.precommit
def test_index_put_single_indices(self, ie_device, precision, ir_version, input_data, indices, accumulate):
self.input_tensor = input_data['input_tensor']
self.values = input_data['values']
self.input_tensor = input_data["input_tensor"]
self.values = input_data["values"]
self._test(*self.create_model(indices, accumulate), ie_device, precision, ir_version)
class TestIndexPut_ManyIndices(PytorchLayerTest):
def _prepare_input(self):
return (self.input_tensor, self.values)
def create_model(self, indices, accumulate):
class aten_index_put_(torch.nn.Module):
def __init__(self, indices, accumulate):
super().__init__()
self.indices_first = indices[0]
@@ -69,26 +79,87 @@ class TestIndexPut_ManyIndices(PytorchLayerTest):
return aten_index_put_(indices, accumulate), ref_net, "aten::index_put_"
@pytest.mark.parametrize('input_data', ({'input_tensor': np.random.randn(3, 3).astype(np.float32),
'values': np.array(12).astype(np.float32)},
{'input_tensor': np.random.randn(3, 3, 3).astype(np.float32),
'values': np.array([10, 11, 12]).astype(np.float32)},))
@pytest.mark.parametrize('indices', ((torch.tensor([0], dtype=torch.long),
torch.tensor([2], dtype=torch.long)),
(torch.tensor([1, 2], dtype=torch.long),
torch.tensor([0, 1], dtype=torch.long)),
(torch.tensor([0, 1], dtype=torch.long),
torch.tensor([0, 1], dtype=torch.long)),
(torch.tensor([0], dtype=torch.long),
torch.tensor([-2], dtype=torch.long)),
(torch.tensor([-1, -2], dtype=torch.long),
torch.tensor([0, 1], dtype=torch.long)),
(torch.tensor([0, -1], dtype=torch.long),
torch.tensor([0, -1], dtype=torch.long))))
@pytest.mark.parametrize('accumulate', (True, False))
@pytest.mark.parametrize(
"input_data",
(
{
"input_tensor": np.random.randn(3, 3).astype(np.float32),
"values": np.array(12).astype(np.float32)
},
{
"input_tensor": np.random.randn(3, 3, 3).astype(np.float32),
"values": np.array([10, 11, 12]).astype(np.float32),
},
),
)
@pytest.mark.parametrize(
"indices",
(
(torch.tensor([0], dtype=torch.long), torch.tensor([2], dtype=torch.long)),
(torch.tensor([1, 2], dtype=torch.long), torch.tensor([0, 1], dtype=torch.long)),
(torch.tensor([0, 1], dtype=torch.long), torch.tensor([0, 1], dtype=torch.long)),
(torch.tensor([0], dtype=torch.long), torch.tensor([-2], dtype=torch.long)),
(torch.tensor([-1, -2], dtype=torch.long), torch.tensor([0, 1], dtype=torch.long)),
(torch.tensor([0, -1], dtype=torch.long), torch.tensor([0, -1], dtype=torch.long)),
),
)
@pytest.mark.parametrize("accumulate", (True, False))
@pytest.mark.nightly
@pytest.mark.precommit
def test_index_put_many_indices(self, ie_device, precision, ir_version, input_data, indices, accumulate):
self.input_tensor = input_data['input_tensor']
self.values = input_data['values']
self._test(*self.create_model(indices, accumulate), ie_device, precision, ir_version)
self.input_tensor = input_data["input_tensor"]
self.values = input_data["values"]
self._test(*self.create_model(indices, accumulate), ie_device, precision, ir_version)
class TestNonZero_IndexPut(PytorchLayerTest):
def _prepare_input(self):
return (self.input_tensor, self.values, self.indices_0, self.indices_1)
def create_model(self, accumulate):
class aten_index_put_(torch.nn.Module):
def __init__(self, accumulate):
super().__init__()
self.accumulate = accumulate
def forward(self, input_tensor, values, indices_0, indices_1):
nonzero = (indices_0 == indices_1).nonzero(as_tuple=True)[0]
input_tensor.index_put_((nonzero,), values, self.accumulate)
return input_tensor
ref_net = None
return aten_index_put_(accumulate), ref_net, "aten::index_put_"
@pytest.mark.parametrize(
"input_data",
(
{
"input_tensor": np.random.randn(3).astype(np.float32),
"values": np.array(11).astype(np.float32),
},
{
"input_tensor": np.random.randn(3, 3).astype(np.float32),
"values": np.array([10, 11, 12]).astype(np.float32),
},
),
)
@pytest.mark.parametrize(
"indices",
(
(np.random.randint(low=0, high=2, size=(1,)), np.random.randint(low=0, high=2, size=(1,))),
(np.random.randint(low=0, high=2, size=(2,)), np.random.randint(low=0, high=2, size=(2,))),
(np.array([0, 1, 0]), np.array([1, 1, 0])),
(np.ones(shape=(3,)), np.ones(shape=(3,))),
(np.ones(shape=(3,)), np.zeros(shape=(3,))),
),
)
@pytest.mark.parametrize("accumulate", (False, True))
@pytest.mark.nightly
@pytest.mark.precommit
def test_nonzero_index_put_(self, ie_device, precision, ir_version, input_data, indices, accumulate):
self.input_tensor = input_data["input_tensor"]
self.values = input_data["values"]
self.indices_0 = indices[0]
self.indices_1 = indices[1]
self._test(*self.create_model(accumulate), ie_device, precision, ir_version, trace_model=True)