[PT FE]: fix issues with squeeze producing dynamic rank (#18748)
* [PT FE]: fix issues with squeeze producing dynamic rank * Update test_squeeze.py * Update test_squeeze.py * Update test_slice.py * Apply suggestions from code review
This commit is contained in:
@@ -5,16 +5,6 @@
|
||||
#include "openvino/op/squeeze.hpp"
|
||||
|
||||
#include "openvino/frontend/pytorch/node_context.hpp"
|
||||
#include "openvino/op/constant.hpp"
|
||||
#include "openvino/op/equal.hpp"
|
||||
#include "openvino/op/gather.hpp"
|
||||
#include "openvino/op/logical_and.hpp"
|
||||
#include "openvino/op/logical_not.hpp"
|
||||
#include "openvino/op/non_zero.hpp"
|
||||
#include "openvino/op/range.hpp"
|
||||
#include "openvino/op/reduce_logical_or.hpp"
|
||||
#include "openvino/op/reshape.hpp"
|
||||
#include "openvino/op/shape_of.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
@@ -26,44 +16,11 @@ using namespace ov::op;
|
||||
|
||||
OutputVector translate_squeeze(const NodeContext& context) {
|
||||
num_inputs_check(context, 1, 2);
|
||||
auto input = context.get_input(0);
|
||||
auto x = context.get_input(0);
|
||||
if (context.input_is_none(1)) {
|
||||
return {context.mark_node(std::make_shared<v0::Squeeze>(input))};
|
||||
return {context.mark_node(std::make_shared<v0::Squeeze>(x))};
|
||||
}
|
||||
// Cannot provide dimensions to ov v0::Squeeze directly due to mismatch in behavior between OV and PyTorch:
|
||||
// If provided dimension cannot be squeezed, OV raises exception, PyTorch returns dimension unmodified.
|
||||
auto normalized_axis_input = normalize_axis(context, context.get_input(1), input);
|
||||
auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0}));
|
||||
auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1}));
|
||||
auto shape_rank = get_shape_rank(context, input, true, element::i32);
|
||||
auto input_shape = std::get<0>(shape_rank);
|
||||
auto input_rank = std::get<1>(shape_rank);
|
||||
// Create boolean mask containing True on 1, else 0.
|
||||
auto squeezable_dimensions_mask = context.mark_node(std::make_shared<v1::Equal>(input_shape, const_1));
|
||||
auto input_shape_indices =
|
||||
context.mark_node(std::make_shared<v4::Range>(const_0, input_rank, const_1, element::i32));
|
||||
// Add additional dimension to axis indices, allowing to use broadcast in equal to create boolean mask,
|
||||
// where True indicates that input dimension was selected to be squeezed.
|
||||
auto dim_reshape_shape = context.mark_node(v0::Constant::create(element::i32, Shape{2}, {-1, 1}));
|
||||
auto reshaped_normalized_axis_input =
|
||||
context.mark_node(std::make_shared<v1::Reshape>(normalized_axis_input, dim_reshape_shape, false));
|
||||
auto selected_mask_to_squeeze =
|
||||
context.mark_node(std::make_shared<v1::Equal>(input_shape_indices, reshaped_normalized_axis_input));
|
||||
selected_mask_to_squeeze =
|
||||
context.mark_node(std::make_shared<v1::ReduceLogicalOr>(selected_mask_to_squeeze, const_0));
|
||||
// Create mask indicating elements that are both selected to be squeezed, and are squeezable (have 1 dimension).
|
||||
auto dimension_mask_to_squeeze = context.mark_node(
|
||||
std::make_shared<v1::LogicalAnd>(selected_mask_to_squeeze, squeezable_dimensions_mask, "none"));
|
||||
// From input_shape, gather only those that shouldn't be squeezed, either because they weren't selected or were
|
||||
// unsqueezable.
|
||||
auto dimension_mask_to_preserve = context.mark_node(std::make_shared<v1::LogicalNot>(dimension_mask_to_squeeze));
|
||||
auto dimension_idxs_to_preserve = context.mark_node(std::make_shared<v3::NonZero>(dimension_mask_to_preserve));
|
||||
dimension_idxs_to_preserve = context.mark_node(std::make_shared<v0::Squeeze>(dimension_idxs_to_preserve));
|
||||
auto dimensions_to_preserve =
|
||||
context.mark_node(std::make_shared<v8::Gather>(input_shape, dimension_idxs_to_preserve, const_0));
|
||||
// Use reshape to remove dimensions that were selected and were squeezable.
|
||||
auto reshape = context.mark_node(std::make_shared<v1::Reshape>(input, dimensions_to_preserve, false));
|
||||
return {context.mark_node(reshape)};
|
||||
return {context.mark_node(std::make_shared<v0::Squeeze>(x, context.get_input(1)))};
|
||||
};
|
||||
|
||||
} // namespace op
|
||||
|
||||
@@ -84,8 +84,8 @@ class TestSliceAndSqueeze(PytorchLayerTest):
|
||||
super().__init__()
|
||||
|
||||
def forward(self, x):
|
||||
a = torch.squeeze(x, -1)
|
||||
return a[:, None, :, :]
|
||||
a = torch.squeeze(x, 1)
|
||||
return a[:, None, :]
|
||||
|
||||
ref_net = None
|
||||
|
||||
|
||||
@@ -29,9 +29,16 @@ class TestSqueeze(PytorchLayerTest):
|
||||
|
||||
return aten_squeeze(dim), ref_net, "aten::squeeze"
|
||||
|
||||
@pytest.mark.parametrize("dim", [-2, -1, 0, None, [-1, -2], 1, 2])
|
||||
@pytest.mark.parametrize("dim,dynamic_shapes", [(-2, True), (0, True), (None, False)])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_squeeze(self, dim, ie_device, precision, ir_version):
|
||||
def test_squeeze(self, dim, dynamic_shapes, ie_device, precision, ir_version):
|
||||
self._test(*self.create_model(dim), ie_device, precision, ir_version, dynamic_shapes=dynamic_shapes)
|
||||
|
||||
@pytest.mark.xfail(reason='OpenVINO squeeze does not support dimension is not equal to 1.')
|
||||
@pytest.mark.parametrize("dim", [-1, 2])
|
||||
@pytest.mark.nightly
|
||||
@pytest.mark.precommit
|
||||
def test_squeeze_non_1(self, dim, ie_device, precision, ir_version):
|
||||
# Dynamic shapes are introducing dynamic rank, with is not suppoerted by Squeeze operation.
|
||||
self._test(*self.create_model(dim), ie_device, precision, ir_version, dynamic_shapes=False)
|
||||
|
||||
Reference in New Issue
Block a user