Review opset1 squeeze for shape inference aspects (#13716)

* Review squeeze operator for
- label preserve and propagation
- partial values preserve and propagation
- interval shapes propagation

* Review static shape inference tests

* Add template shape inference for squeeze

* Update include for OV_EXPECT_THROW

* Correct has_static_axes initialization

* remove test_squeeze from xfail 44968

* Fix inference parameter op with no data
- add additional test to check if axes are parameter without data
- revert xfail marks for squeeze tests

* Fix inference parameter op with no data
- add additional test to check if axes are parameter without data
- revert xfail marks for squeeze tests

* Refactor axes acquire logic
This commit is contained in:
Pawel Raasz 2022-11-04 11:42:05 +01:00 committed by GitHub
parent 28a118be39
commit 0646d49d35
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 517 additions and 402 deletions

View File

@ -96,34 +96,6 @@ void shape_infer(const ov::opset1::Reshape* op,
input_shape);
}
template <class T>
void shape_infer(const ov::opset1::Squeeze* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1);
std::vector<int64_t> axes;
bool axes_is_constant = get_data_as_int64<T>(1, op, axes, constant_data);
NODE_VALIDATION_CHECK(op, axes_is_constant, "Shape inference lacks input data");
auto& input_shape = input_shapes[0];
OPENVINO_ASSERT(input_shape.is_static());
auto& output_shape = output_shapes[0];
output_shape = T{};
ov::normalize_axes(op, input_shape.rank().get_length(), axes);
for (uint64_t idx = 0; idx < input_shape.size(); ++idx) {
if (std::find(axes.begin(), axes.end(), idx) == axes.end()) {
output_shape.push_back(input_shape[idx]);
} else {
NODE_VALIDATION_CHECK(op,
input_shape[idx] == 1,
"provided axis value is invalid. Only axes of size 1 may be removed.");
}
}
}
template <class T>
inline void dynamic_shape(T& output_shape) {
OPENVINO_UNREACHABLE("This code should be executed only for PartialShape class");

View File

@ -0,0 +1,98 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/op/squeeze.hpp"
#include "utils.hpp"
namespace ov {
namespace op {
namespace v0 {
/**
* \brief Do Squeeze shape inference.
*
* \tparam T Type of input/output shapes.
*
* \param op Squeeze operator pointer.
* \param input_shapes Squeeze input shapes.
* \param output_shapes Output shapes result of squeeze shape inference.
* \param constant_data Map of constant data.
*/
template <class T>
void shape_infer(const Squeeze* op,
const std::vector<T>& input_shapes,
std::vector<T>& output_shapes,
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {}) {
using DimType = typename std::iterator_traits<typename T::iterator>::value_type;
NODE_VALIDATION_CHECK(op, output_shapes.size() == 1);
const auto number_of_inputs = input_shapes.size();
const auto& arg_shape = input_shapes[0];
auto& output_shape = output_shapes[0];
std::unique_ptr<std::set<int64_t>> unique_axes;
if (number_of_inputs == 1) {
unique_axes.reset(new std::set<int64_t>());
} else if (number_of_inputs == 2) {
const auto& axes_shape = input_shapes[1];
NODE_VALIDATION_CHECK(op,
axes_shape.is_dynamic() || is_rank_compatible_any_of(axes_shape.rank(), {0, 1}),
"Second input (axes) should not be of rank higher than 1. Got: ",
axes_shape.rank().get_length());
std::vector<int64_t> axes;
if (arg_shape.rank().is_static() && axes_shape.is_static() &&
get_data_as_int64<T>(1, op, axes, constant_data)) {
normalize_axes(op, arg_shape.rank().get_length(), axes);
unique_axes.reset(new std::set<int64_t>(axes.cbegin(), axes.cend()));
}
} else {
// Invalid number of inputs, empty error message for backward compatibility.
NODE_VALIDATION_CHECK(op, false);
}
if (arg_shape.rank().is_static() && (unique_axes != nullptr)) {
std::vector<DimType> out_dims;
out_dims.reserve(arg_shape.rank().get_length());
if (unique_axes->empty()) {
// According to specification, if only first input provided` or axes are empty
// remove all dimensions equal to 1.
std::copy_if(arg_shape.cbegin(), arg_shape.cend(), back_inserter(out_dims), [](const DimType& dim) {
return !dim.compatible(1);
});
} else {
int64_t idx = 0;
auto rm_axis_iter = unique_axes->cbegin();
auto rm_axis_end = unique_axes->cend();
// Returns true if dimension not squeezable on axis from input axes.
const auto not_squeezable_at_axis = [&op, &rm_axis_iter, &rm_axis_end, &idx](const DimType& dim) {
if ((rm_axis_iter != rm_axis_end) && (*rm_axis_iter == idx++)) {
NODE_VALIDATION_CHECK(op,
dim.compatible(1),
"provided axis value is invalid. Only axes of size 1 may be removed.");
++rm_axis_iter;
return false;
} else {
return true;
}
};
std::copy_if(arg_shape.cbegin(), arg_shape.cend(), back_inserter(out_dims), not_squeezable_at_axis);
}
// When arg shape has got static rank but shape is dynamic and output shape dimensions is empty
// make dynamic output.
output_shape = arg_shape.is_dynamic() && out_dims.empty() ? PartialShape::dynamic() : T(out_dims);
} else {
output_shape = PartialShape::dynamic();
}
}
} // namespace v0
} // namespace op
} // namespace ov

View File

@ -10,11 +10,10 @@
#include <set>
#include "itt.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/runtime/reference/copy.hpp"
#include "ngraph/validation_util.hpp"
#include "squeeze_shape_inference.hpp"
using namespace std;
using namespace ngraph;
@ -33,90 +32,12 @@ op::Squeeze::Squeeze(const Output<Node>& data) : Op({data}) {
void op::Squeeze::validate_and_infer_types() {
OV_OP_SCOPE(v0_Squeeze_validate_and_infer_types);
auto data = input_value(0);
bool data_has_dynamic_rank = data.get_partial_shape().rank().is_dynamic();
bool data_has_dynamic_shape = data.get_partial_shape().is_dynamic();
auto data_partial_shape = data.get_partial_shape();
std::shared_ptr<op::v0::Constant> axes_constant;
if (get_input_size() == 1) {
// Handling the case when Squeeze op is created with a single input - data.
// This way the following code (validation, shape inference) can be used in both cases.
axes_constant = make_shared<op::v0::Constant>(element::i64, ov::Shape{0}, vector<int64_t>{});
} else {
auto axes_node = input_value(1).get_node_shared_ptr();
auto axes_pshape = get_input_partial_shape(1);
axes_constant = get_constant_from_source(axes_node);
const auto input_shapes = get_node_input_partial_shapes(*this);
auto output_shapes = std::vector<ov::PartialShape>(1);
shape_infer(this, input_shapes, output_shapes);
NODE_VALIDATION_CHECK(this,
axes_pshape.rank().compatible(0) || axes_pshape.rank().compatible(1),
"Second input (axes) should not be of rank higher than 1. Got: ",
axes_pshape.rank().get_length());
}
bool axes_is_empty_constant = (axes_constant && axes_constant->get_data_ptr() != nullptr)
? axes_constant->cast_vector<int64_t>().empty()
: false;
if (data_has_dynamic_rank || !axes_constant || !axes_constant->get_data_ptr() ||
(data_has_dynamic_shape && axes_is_empty_constant)) {
// If data has a static rank despite being dynamic, it's possible none
// of the dimensions will be equal to 1. If so, the input shape can be
// propagated at this point to the output shape.
if (!data_has_dynamic_rank && axes_is_empty_constant) {
bool no_squeezable_dimension_present = true;
uint64_t data_rank = data_partial_shape.rank().get_length();
for (uint64_t idx = 0; idx < data_rank; ++idx) {
if (data_partial_shape[idx].compatible(1)) {
no_squeezable_dimension_present = false;
break;
}
}
if (no_squeezable_dimension_present) {
set_output_type(0, get_input_element_type(0), data_partial_shape);
return;
}
}
set_output_type(0, get_input_element_type(0), ov::PartialShape::dynamic());
return;
}
uint64_t data_rank = data_partial_shape.rank().get_length();
// Get value of axes from Constant
auto axes = normalize_axes(this->description(), axes_constant->cast_vector<int64_t>(), data_rank);
// Prepare set of unique axes marked to be removed from input data.
vector<bool> axes_to_squeeze(data_rank, false);
if (axes_is_empty_constant) {
auto data_shape = data.get_shape();
// Default behaviour is to remove all single dimension axes.
for (uint64_t idx = 0; idx < data_rank; ++idx) {
if (data_shape.at(idx) == 1) {
axes_to_squeeze.at(idx) = true;
}
}
} else {
set<size_t, greater<size_t>> unique_axes(begin(axes), end(axes));
for (uint64_t axis : unique_axes) {
if (!data_has_dynamic_shape) {
auto data_shape = data.get_shape();
NODE_VALIDATION_CHECK(this,
(data_shape.at(axis) == 1),
"provided axis value is invalid. Only axes of size 1 may be removed.");
}
axes_to_squeeze.at(axis) = true;
}
}
vector<Dimension> output_data_shape;
for (uint64_t idx = 0; idx < data_rank; ++idx) {
if (!axes_to_squeeze.at(idx)) {
output_data_shape.push_back(data_partial_shape[idx]);
}
}
set_output_type(0, get_input_element_type(0), ov::PartialShape(output_data_shape));
set_output_type(0, get_input_element_type(0), output_shapes[0]);
}
bool ngraph::op::v0::Squeeze::visit_attributes(AttributeVisitor& visitor) {
@ -136,87 +57,33 @@ shared_ptr<Node> op::Squeeze::clone_with_new_inputs(const OutputVector& new_args
}
}
namespace squeeze {
namespace {
template <element::Type_t ET>
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out) {
const auto data_rank = arg0->get_partial_shape().rank().get_length();
const auto axes_num = shape_size(arg1->get_shape());
auto out_shape = arg0->get_shape();
if (axes_num == 0) {
out_shape.erase(remove(out_shape.begin(), out_shape.end(), 1), out_shape.end());
} else {
auto norm_axes =
normalize_axes("",
std::vector<int64_t>(arg1->get_data_ptr<ET>(), arg1->get_data_ptr<ET>() + axes_num),
data_rank);
set<size_t, greater<size_t>> ordered_axes(norm_axes.begin(), norm_axes.end());
for (const auto& axis : ordered_axes) {
if (out_shape[axis] != 1) {
throw ngraph_error("Squeeze dimension is not equal to 1");
}
out_shape.erase(out_shape.begin() + axis);
}
}
out->set_shape(out_shape);
runtime::reference::copy(arg0->get_data_ptr<char>(),
out->get_data_ptr<char>(),
shape_size(out_shape) * out->get_element_type().size());
return true;
}
bool evaluate(const HostTensorPtr& arg0, const HostTensorPtr& out) {
auto out_shape = arg0->get_shape();
out_shape.erase(remove(out_shape.begin(), out_shape.end(), 1), out_shape.end());
out->set_shape(out_shape);
runtime::reference::copy(arg0->get_data_ptr<char>(),
out->get_data_ptr<char>(),
shape_size(out_shape) * out->get_element_type().size());
return true;
}
bool evaluate_squeeze(const HostTensorPtr& arg0, const HostTensorPtr& arg1, const HostTensorPtr& out) {
auto element_type = arg1->get_element_type();
bool rc = true;
switch (element_type) {
NGRAPH_TYPE_CASE(evaluate_squeeze, i8, arg0, arg1, out);
NGRAPH_TYPE_CASE(evaluate_squeeze, i16, arg0, arg1, out);
NGRAPH_TYPE_CASE(evaluate_squeeze, i32, arg0, arg1, out);
NGRAPH_TYPE_CASE(evaluate_squeeze, i64, arg0, arg1, out);
NGRAPH_TYPE_CASE(evaluate_squeeze, u8, arg0, arg1, out);
NGRAPH_TYPE_CASE(evaluate_squeeze, u16, arg0, arg1, out);
NGRAPH_TYPE_CASE(evaluate_squeeze, u32, arg0, arg1, out);
NGRAPH_TYPE_CASE(evaluate_squeeze, u64, arg0, arg1, out);
default:
rc = false;
break;
}
return rc;
}
bool evaluate_squeeze(const HostTensorPtr& arg0, const HostTensorPtr& out) {
return evaluate(arg0, out);
}
} // namespace
} // namespace squeeze
bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
OV_OP_SCOPE(v0_Squeeze_evaluate);
NGRAPH_CHECK(validate_host_tensor_vector(inputs, inputs.size()));
NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1));
if (inputs.size() == 1) {
return squeeze::evaluate_squeeze(inputs[0], outputs[0]);
}
if (has_evaluate()) {
auto output_shapes = std::vector<PartialShape>{outputs[0]->get_partial_shape()};
auto input_shapes = std::vector<PartialShape>{inputs[0]->get_partial_shape()};
auto constant_data = std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>();
return squeeze::evaluate_squeeze(inputs[0], inputs[1], outputs[0]);
if (inputs.size() == 2) {
input_shapes.push_back(inputs[1]->get_partial_shape());
constant_data.emplace(1, inputs[1]);
}
shape_infer(this, input_shapes, output_shapes, constant_data);
auto out_shape = output_shapes[0].get_shape();
outputs[0]->set_shape(out_shape);
ngraph::runtime::reference::copy(inputs[0]->get_data_ptr<char>(),
outputs[0]->get_data_ptr<char>(),
shape_size(out_shape) * outputs[0]->get_element_type().size());
return true;
}
return false;
}
bool op::v0::Squeeze::has_evaluate() const {

View File

@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/test_assertions.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"

View File

@ -2,6 +2,7 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/test_assertions.hpp"
#include "dimension_tracker.hpp"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"

View File

@ -4,187 +4,266 @@
#include <dimension_tracker.hpp>
#include "gtest/gtest.h"
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "ngraph/ngraph.hpp"
#include "sequnce_generator.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, squeeze) {
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 4, 1, 4, 1, 8});
auto axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{2}, vector<int64_t>{0, 2});
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
ASSERT_EQ(squeeze->get_shape(), (Shape{4, 4, 1, 8}));
axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{0}, vector<int64_t>{});
auto squeeze_default_axes = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32);
ASSERT_EQ(squeeze_default_axes->get_shape(), (Shape{4, 4, 8}));
}
TEST(type_prop, squeeze_unsqueezable_no_axes) {
auto param = make_shared<op::Parameter>(element::f32, PartialShape{Dimension(2, 5), Dimension(3, 4), 6});
auto squeeze = make_shared<op::Squeeze>(param);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_TRUE(squeeze->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(2, 5), Dimension(3, 4), 6}));
}
TEST(type_prop, squeeze_no_axes) {
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 4, 1, 4, 1, 8});
auto squeeze = make_shared<op::Squeeze>(param);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
ASSERT_EQ(squeeze->get_shape(), (Shape{4, 4, 8}));
auto axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{0}, vector<int64_t>{});
auto squeeze_default_axes = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32);
ASSERT_EQ(squeeze_default_axes->get_shape(), (Shape{4, 4, 8}));
}
TEST(type_prop, squeeze_dynamic_static_rank) {
auto param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic(6));
auto axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{2}, vector<int64_t>{0, 2});
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_TRUE(squeeze->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4)));
axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{0}, vector<int64_t>{});
auto squeeze_default_axes = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32);
EXPECT_TRUE(squeeze_default_axes->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
TEST(type_prop, squeeze_dynamic_dynamic_rank) {
auto param = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{2}, vector<int64_t>{0, 2});
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_TRUE(squeeze->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{0}, vector<int64_t>{});
auto squeeze_default_axes = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32);
EXPECT_TRUE(squeeze_default_axes->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
TEST(type_prop, squeeze_axes_dynamic) {
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 4, 1, 4, 1, 8});
auto axes_node = make_shared<ngraph::op::Parameter>(element::u64, PartialShape::dynamic());
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
ASSERT_TRUE(squeeze->get_output_partial_shape(0).same_scheme(PartialShape::dynamic()));
}
using namespace testing;
TEST(type_prop, squeeze_axes_invalid_value) {
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
auto axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{2}, vector<int64_t>{0, 2});
auto axes_node = make_shared<op::Constant>(element::u64, Shape{2}, vector<int64_t>{0, 2});
try {
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
FAIL() << "Squeeze axis invalid value not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "provided axis value is invalid. Only axes of size 1 may be removed.");
} catch (...) {
FAIL() << "Deduced type check failed for unexpected reason";
}
OV_EXPECT_THROW(auto s = make_shared<op::Squeeze>(param, axes_node),
NodeValidationFailure,
HasSubstr("provided axis value is invalid. Only axes of size 1 may be removed."));
}
TEST(type_prop, squeeze_axes_invalid_rank) {
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
auto axes_node = make_shared<ngraph::op::Constant>(element::i32, Shape{2, 1}, vector<int32_t>{0, 2});
auto axes_node = make_shared<op::Constant>(element::i32, Shape{2, 1}, vector<int32_t>{0, 2});
try {
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
FAIL() << "Squeeze axis invalid rank not detected";
} catch (const NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Second input (axes) should not be of rank higher than 1.");
} catch (...) {
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, squeeze_negative_axes) {
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 4, 1, 4, 1, 8});
auto axes_node = make_shared<ngraph::op::Constant>(element::i64, Shape{2}, vector<int64_t>{-6, -4});
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
ASSERT_EQ(squeeze->get_shape(), (Shape{4, 4, 1, 8}));
axes_node = make_shared<ngraph::op::Constant>(element::u64, Shape{0}, vector<int64_t>{});
auto squeeze_default_axes = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32);
ASSERT_EQ(squeeze_default_axes->get_shape(), (Shape{4, 4, 8}));
OV_EXPECT_THROW(auto s = make_shared<op::Squeeze>(param, axes_node),
NodeValidationFailure,
HasSubstr("Second input (axes) should not be of rank higher than 1."));
}
TEST(type_prop, squeeze_incorrect_negative_axes) {
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 4, 1, 4, 1, 8});
auto axes_node = make_shared<ngraph::op::Constant>(element::i64, Shape{2}, vector<int64_t>{-6, -10});
auto axes_node = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{-6, -10});
try {
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
FAIL() << "Squeeze axis invalid value not detected";
} catch (ngraph_error& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Parameter axis -10 out of the tensor rank range");
} catch (...) {
FAIL() << "Deduced type check failed for unexpected reason";
OV_EXPECT_THROW(auto s = make_shared<op::Squeeze>(param, axes_node),
ov::Exception,
HasSubstr("Parameter axis -10 out of the tensor rank range"));
}
using TypePropTestParam = std::tuple<PartialShape, std::vector<int64_t>, PartialShape>;
class SqueezeTest : public WithParamInterface<TypePropTestParam>, public UnSqueezeFixture {
protected:
void SetUp() override {
std::tie(p_shape, axes, exp_shape) = GetParam();
UnSqueezeFixture::SetUp();
}
std::pair<std::vector<size_t>, std::vector<size_t>> make_in_exp_labels() const {
std::vector<size_t> in_labels;
std::generate_n(std::back_inserter(in_labels), p_shape.size(), ov::SeqGen<size_t>(1));
std::set<int64_t> axes_to_remove;
if (axes.empty()) {
for (auto dim = p_shape.begin(); dim != p_shape.end(); ++dim) {
if (*dim == 1 || exp_shape.rank().is_dynamic()) {
axes_to_remove.insert(std::distance(p_shape.begin(), dim));
}
}
} else {
for (const auto& axis : axes) {
axes_to_remove.insert(axis < 0 ? axis + p_shape.size() : axis);
}
}
auto rm_iter = axes_to_remove.begin();
size_t rm_idx = 0;
auto exp_labels = in_labels;
exp_labels.erase(std::remove_if(exp_labels.begin(),
exp_labels.end(),
[&](size_t& label) {
if ((rm_iter != axes_to_remove.end()) && (*rm_iter == rm_idx++)) {
return ++rm_iter, true;
} else {
return false;
}
}),
exp_labels.end());
return {in_labels, exp_labels};
}
std::vector<int64_t> axes;
};
const auto static_partial_shapes_test_values =
Values(std::make_tuple(PartialShape{1}, std::vector<int64_t>{0}, PartialShape{}),
std::make_tuple(PartialShape{1, 2}, std::vector<int64_t>{0}, PartialShape{2}),
std::make_tuple(PartialShape{1, 2}, std::vector<int64_t>{-2}, PartialShape{2}),
std::make_tuple(PartialShape{1, 2, 1}, std::vector<int64_t>{0}, PartialShape{2, 1}),
std::make_tuple(PartialShape{1, 2}, std::vector<int64_t>{-2, -2}, PartialShape{2}),
std::make_tuple(PartialShape{1, 4, 1, 4, 1, 8}, std::vector<int64_t>{0, 2}, PartialShape{4, 4, 1, 8}),
std::make_tuple(PartialShape{1, 4, 1, 4, 1, 8}, std::vector<int64_t>{-6, -4}, PartialShape{4, 4, 1, 8}));
const auto empty_axes_test_values =
Values(std::make_tuple(PartialShape{1, 4, 1, 4, 1, 8}, std::vector<int64_t>{}, PartialShape{4, 4, 8}),
std::make_tuple(PartialShape{Dimension(2, 5), Dimension(3, 4), 6},
std::vector<int64_t>{},
PartialShape{Dimension(2, 5), Dimension(3, 4), 6}),
std::make_tuple(PartialShape::dynamic(6), std::vector<int64_t>{}, PartialShape::dynamic()),
std::make_tuple(PartialShape{Dimension::dynamic(), 1, Dimension::dynamic()},
std::vector<int64_t>{},
PartialShape::dynamic()),
std::make_tuple(PartialShape::dynamic(), std::vector<int64_t>{}, PartialShape::dynamic()));
INSTANTIATE_TEST_SUITE_P(
type_prop_shrink_dynamic_shape,
SqueezeTest,
Values(std::make_tuple(PartialShape::dynamic(6), std::vector<int64_t>{0, 2}, PartialShape::dynamic(4)),
std::make_tuple(PartialShape{Dimension::dynamic(), 1, Dimension::dynamic()},
std::vector<int64_t>{0, 2},
PartialShape{1}),
std::make_tuple(PartialShape::dynamic(), std::vector<int64_t>{0, 2}, PartialShape::dynamic())),
PrintToStringParamName());
INSTANTIATE_TEST_SUITE_P(type_prop_shrink_shape,
SqueezeTest,
static_partial_shapes_test_values,
PrintToStringParamName());
INSTANTIATE_TEST_SUITE_P(type_prop_shrink_shape_default_axes,
SqueezeTest,
empty_axes_test_values,
PrintToStringParamName());
TEST_P(SqueezeTest, partial_shape_dimension_propagation_const_axis_i32) {
const auto axes_node = std::make_shared<op::Constant>(element::i32, Shape{axes.size()}, axes);
const auto squeeze = std::make_shared<op::v0::Squeeze>(param, axes_node);
EXPECT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_EQ(squeeze->get_output_partial_shape(0), exp_shape);
}
TEST(type_prop, squeeze_scalar_axes) {
auto param = make_shared<op::Parameter>(element::f32, Shape{1, 4, 1, 4, 1, 8});
auto axes_node = make_shared<ngraph::op::Constant>(element::i64, Shape{}, vector<int64_t>{2});
auto squeeze = make_shared<op::Squeeze>(param, axes_node);
TEST_P(SqueezeTest, partial_shape_dimension_propagation_parameter_axes_no_data) {
const auto axes_node = std::make_shared<op::Parameter>(element::u64, PartialShape{Shape{axes.size()}});
const auto squeeze = std::make_shared<op::v0::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
ASSERT_EQ(squeeze->get_shape(), (Shape{1, 4, 4, 1, 8}));
int squeeze_index = 0;
axes_node = make_shared<ngraph::op::Constant>(element::i64, Shape{}, squeeze_index);
squeeze = make_shared<op::Squeeze>(param, axes_node);
ASSERT_EQ(squeeze->get_element_type(), element::f32);
ASSERT_EQ(squeeze->get_shape(), (Shape{4, 1, 4, 1, 8}));
EXPECT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_EQ(squeeze->get_output_partial_shape(0), PartialShape::dynamic());
}
TEST(type_prop, squeeze_dynamic_value_and_label_propagation) {
Dimension marked_0 = Dimension(3);
ov::DimensionTracker::set_label(marked_0, 10);
PartialShape target_0 = PartialShape{marked_0, 4};
TEST_P(SqueezeTest, partial_shape_dimension_propagation_dynamic_axes) {
const auto axes_node = std::make_shared<op::Parameter>(element::u64, PartialShape::dynamic());
const auto squeeze = std::make_shared<op::v0::Squeeze>(param, axes_node);
auto param = std::make_shared<op::Parameter>(element::f32, Shape{1});
auto param_0 = std::make_shared<op::Parameter>(element::f32, target_0);
auto shape_0 = std::make_shared<op::ShapeOf>(param_0);
EXPECT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_EQ(squeeze->get_output_partial_shape(0), PartialShape::dynamic());
}
const auto& et = element::i64;
std::vector<int64_t> zero{0};
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
TEST_P(SqueezeTest, labels_propagation) {
if (p_shape.rank().is_dynamic()) {
GTEST_SKIP() << "No dimension to set label";
}
std::vector<size_t> in_labels, exp_labels;
std::tie(in_labels, exp_labels) = make_in_exp_labels();
set_shape_labels(p_shape, in_labels);
param = make_shared<op::Parameter>(element::f32, p_shape);
const auto axes_node = std::make_shared<op::Constant>(element::i32, Shape{axes.size()}, axes);
const auto squeeze = std::make_shared<op::v0::Squeeze>(param, axes_node);
EXPECT_EQ(get_shape_labels(squeeze->get_output_partial_shape(0)), exp_labels);
}
using SqueezeShapeTests = SqueezeTest;
INSTANTIATE_TEST_SUITE_P(type_prop_shrink_shape_no_axes,
SqueezeShapeTests,
static_partial_shapes_test_values,
PrintToStringParamName());
TEST_P(SqueezeShapeTests, shape_dimension_propagation_const_axis_i64) {
param = std::make_shared<op::Parameter>(element::f64, p_shape.to_shape());
const auto axes_node = std::make_shared<op::Constant>(element::i64, Shape{axes.size()}, axes);
const auto squeeze = std::make_shared<op::v0::Squeeze>(param, axes_node);
EXPECT_EQ(squeeze->get_element_type(), element::f64);
EXPECT_EQ(squeeze->get_output_partial_shape(0), exp_shape.to_shape());
}
using SqueezeNoAxesTest = SqueezeTest;
INSTANTIATE_TEST_SUITE_P(type_prop_shrink_shape_no_axes,
SqueezeNoAxesTest,
empty_axes_test_values,
PrintToStringParamName());
TEST_P(SqueezeNoAxesTest, partial_shape_dimension_propagation_no_axes) {
const auto squeeze = std::make_shared<op::v0::Squeeze>(param);
EXPECT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_EQ(squeeze->get_output_partial_shape(0), exp_shape);
}
using SqueezeScalarAxisTest = SqueezeTest;
INSTANTIATE_TEST_SUITE_P(
type_prop_shrink_shape_no_axes,
SqueezeScalarAxisTest,
Values(std::make_tuple(PartialShape{1, 2}, std::vector<int64_t>{0}, PartialShape{2}),
std::make_tuple(PartialShape{3, 1, 2}, std::vector<int64_t>{1}, PartialShape{3, 2}),
std::make_tuple(PartialShape{3, 1, 2, 1, 1, 5}, std::vector<int64_t>{4}, PartialShape{3, 1, 2, 1, 5})),
PrintToStringParamName());
TEST_P(SqueezeScalarAxisTest, axis_value_as_vector) {
const auto axes_node = std::make_shared<op::Constant>(element::i32, Shape{}, axes);
const auto squeeze = std::make_shared<op::v0::Squeeze>(param, axes_node);
EXPECT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_EQ(squeeze->get_output_partial_shape(0), exp_shape);
}
TEST_P(SqueezeScalarAxisTest, axis_value_as_integer) {
const auto axes_node = std::make_shared<op::Constant>(element::i32, Shape{}, axes.front());
const auto squeeze = std::make_shared<op::v0::Squeeze>(param, axes_node);
EXPECT_EQ(squeeze->get_element_type(), element::f32);
EXPECT_EQ(squeeze->get_output_partial_shape(0), exp_shape);
}
using SqueezeBoundTest = UnSqueezeBoundTest;
INSTANTIATE_TEST_SUITE_P(
type_prop_bounds_propagate,
SqueezeBoundTest,
Values(std::make_tuple(PartialShape::dynamic(6), PartialShape::dynamic(1)),
std::make_tuple(PartialShape{Dimension(-1)}, PartialShape{Dimension(-1)}),
std::make_tuple(PartialShape{Dimension::dynamic(), 8}, PartialShape{Dimension::dynamic()}),
std::make_tuple(PartialShape{Dimension(4, 8), Dimension::dynamic()}, PartialShape{Dimension(4, 8)}),
std::make_tuple(PartialShape{Dimension(20, -1), Dimension::dynamic()}, PartialShape::dynamic(1)),
std::make_tuple(PartialShape{Dimension(-1, 5), Dimension::dynamic()}, PartialShape{Dimension(-1, 5)}),
std::make_tuple(PartialShape{15}, PartialShape{15}),
std::make_tuple(PartialShape{2, 6}, PartialShape{2})),
PrintToStringParamName());
/**
* \brief Check label and dynamic value propagation.
*
* Test use evaluate label, lower/upper.
*/
TEST_P(SqueezeBoundTest, propagate_label_and_dynamic_value) {
PartialShape labeled_shape = PartialShape{p_shape};
std::generate_n(std::back_inserter(in_labels), labeled_shape.size(), ov::SeqGen<size_t>(1));
set_shape_labels(labeled_shape, in_labels);
constexpr auto et = element::i64;
const auto labeled_param = std::make_shared<op::Parameter>(et, labeled_shape);
const auto labeled_shape_of = std::make_shared<op::ShapeOf>(labeled_param);
const auto zero = std::vector<int64_t>{0};
const auto axis = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto gather = std::make_shared<op::v7::Gather>(shape_0, indices, axis);
const auto indices = std::make_shared<op::v0::Constant>(et, Shape{}, zero);
const auto gather = std::make_shared<op::v7::Gather>(labeled_shape_of, indices, axis);
const auto axis_1 = std::make_shared<op::v0::Constant>(et, Shape{2}, std::vector<int64_t>{0, 1});
const auto unsqueeze = std::make_shared<op::v0::Unsqueeze>(gather, axis_1);
const auto squeeze = std::make_shared<op::v0::Squeeze>(unsqueeze, axis);
auto bc = std::make_shared<op::v1::Broadcast>(param, squeeze);
ASSERT_EQ(bc->get_shape(), (Shape{3}));
const auto bc = std::make_shared<op::v1::Broadcast>(param, squeeze);
const auto& output_shape = bc->get_output_partial_shape(0);
ASSERT_EQ(ov::DimensionTracker::get_label(output_shape[0]), 10);
EXPECT_EQ(bc->get_output_partial_shape(0), exp_shape);
const auto labels = get_shape_labels(bc->get_output_partial_shape(0));
EXPECT_THAT(labels, ElementsAre(in_labels.front()));
}

View File

@ -79,24 +79,13 @@ TEST(type_prop, unsqueeze_empty_axes) {
FAIL() << "Deduced type check failed for unexpected reason";
}
}
class UnsqueezeTestCommon : public Test {
protected:
void SetUp() override {
param = std::make_shared<op::Parameter>(element::f32, p_shape);
}
PartialShape p_shape, exp_shape;
std::shared_ptr<op::Parameter> param;
};
using TypePropTestParam = std::tuple<PartialShape, std::vector<int64_t>, PartialShape>;
class UnsqueezeTest : public WithParamInterface<TypePropTestParam>, public UnsqueezeTestCommon {
class UnsqueezeTest : public WithParamInterface<TypePropTestParam>, public UnSqueezeFixture {
protected:
void SetUp() override {
std::tie(p_shape, axes, exp_shape) = GetParam();
UnsqueezeTestCommon::SetUp();
UnSqueezeFixture::SetUp();
}
std::pair<std::vector<size_t>, std::vector<size_t>> make_in_exp_labels() const {
@ -257,17 +246,7 @@ TEST_P(UnsqueezeTest, labels_propagation) {
EXPECT_EQ(get_shape_labels(unsqueeze->get_output_partial_shape(0)), exp_labels);
}
using BoundTestParam = std::tuple<PartialShape, PartialShape>;
class UnsqueezeBoundTest : public WithParamInterface<BoundTestParam>, public UnsqueezeTestCommon {
protected:
void SetUp() override {
std::tie(p_shape, exp_shape) = GetParam();
UnsqueezeTestCommon::SetUp();
}
std::vector<size_t> in_labels;
};
using UnsqueezeBoundTest = UnSqueezeBoundTest;
INSTANTIATE_TEST_SUITE_P(
type_prop_bounds_propagate,

View File

@ -61,6 +61,7 @@
#include "space_to_batch_shape_inference.hpp"
#include "space_to_depth_shape_inference.hpp"
#include "split_shape_inference.hpp"
#include "squeeze_shape_inference.hpp"
#include "static_shape.hpp"
#include "strided_slice_shape_inference.hpp"
#include "tile_shape_inference.hpp"

View File

@ -31,34 +31,6 @@ TEST(StaticShapeInferenceTest, ReshapeTest) {
ASSERT_EQ(static_output_shapes[0], StaticShape({3, 150}));
}
TEST(StaticShapeInferenceTest, SqueezeTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto pattern = std::make_shared<ov::op::v0::Constant>(element::i32, Shape{2}, std::vector<int32_t>{-3, 0});
auto reduce =
std::make_shared<op::v0::Squeeze>(data, pattern);
std::vector<StaticShape> static_input_shapes = {StaticShape{1, 6, 1, 7, 1}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
shape_inference(reduce.get(), static_input_shapes, static_output_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({6, 7, 1}));
}
TEST(StaticShapeInferenceTest, UnsqueezeTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});
auto pattern = std::make_shared<ov::op::v0::Constant>(element::i32, Shape{2}, std::vector<int32_t>{-3, 0});
auto reduce =
std::make_shared<op::v0::Unsqueeze>(data, pattern);
std::vector<StaticShape> static_input_shapes = {StaticShape{2, 3, 4, 5, 6}, StaticShape{2}},
static_output_shapes = {StaticShape{}};
shape_inference(reduce.get(), static_input_shapes, static_output_shapes);
ASSERT_EQ(static_output_shapes[0], StaticShape({1, 2, 3, 4, 1, 5, 6}));
}
TEST(StaticShapeInferenceTest, ShapeOf5DTest) {
auto data = std::make_shared<ov::op::v0::Parameter>(element::f32, PartialShape{-1, -1, -1, -1});

View File

@ -0,0 +1,119 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "common_test_utils/test_assertions.hpp"
#include "gmock/gmock.h"
#include "openvino/op/constant.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/squeeze.hpp"
#include "squeeze_shape_inference.hpp"
#include "utils.hpp"
using namespace ov;
using namespace ov::intel_cpu;
using namespace testing;
class SqueezeStaticShapeInferenceAssertTest : public OpStaticShapeInferenceTest<op::v0::Squeeze> {
protected:
void SetUp() override {
output_shapes = ShapeVector(1);
}
};
TEST_F(SqueezeStaticShapeInferenceAssertTest, no_axes) {
const auto arg = std::make_shared<op::v0::Parameter>(element::f64, PartialShape{-1, -1});
const auto axes = std::make_shared<op::v0::Parameter>(element::i64, PartialShape{1});
const auto op = make_op(arg, axes);
input_shapes = ShapeVector{{5, 6}, axes->get_shape()};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
NodeValidationFailure,
HasSubstr("Check 'constant != nullptr'"));
}
TEST_F(SqueezeStaticShapeInferenceAssertTest, parameter_static_shape_axes_no_data) {
const auto arg = std::make_shared<op::v0::Parameter>(element::f64, Shape{2, 1, 3, 1});
const auto axes = std::make_shared<op::v0::Parameter>(element::i64, Shape{2});
const auto op = make_op(arg, axes);
input_shapes = ShapeVector{arg->get_shape(), axes->get_shape()};
OV_EXPECT_THROW(shape_inference(op.get(), input_shapes, output_shapes),
NodeValidationFailure,
HasSubstr("Check 'constant != nullptr'"));
}
using TestParams = std::tuple<ShapeVector, // Input shapes
std::vector<int64_t>, // Squeeze axes
StaticShape // Expected shape
>;
class SqueezeStaticShapeInferenceTest : public SqueezeStaticShapeInferenceAssertTest,
public WithParamInterface<TestParams> {
protected:
void SetUp() override {
SqueezeStaticShapeInferenceAssertTest::SetUp();
std::tie(input_shapes, axes, exp_shape) = GetParam();
output_shapes = ShapeVector(1);
arg = std::make_shared<op::v0::Parameter>(element::f32, input_shapes.front().get_shape());
}
std::vector<int64_t> axes;
std::shared_ptr<op::v0::Parameter> arg;
};
INSTANTIATE_TEST_SUITE_P(1d_shapes,
SqueezeStaticShapeInferenceTest,
Values(make_tuple(ShapeVector{{1}, {1}}, std::vector<int64_t>{-1}, StaticShape({})),
make_tuple(ShapeVector{{1}, {1}}, std::vector<int64_t>{0}, StaticShape({}))),
PrintToStringParamName());
INSTANTIATE_TEST_SUITE_P(
multi_dim_shapes,
SqueezeStaticShapeInferenceTest,
Values(make_tuple(ShapeVector{{1, 2, 3, 1}, {2}}, std::vector<int64_t>{0, 3}, StaticShape({2, 3})),
make_tuple(ShapeVector{{2, 1, 1, 4}, {2}}, std::vector<int64_t>{2, 1}, StaticShape({2, 4})),
make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector<int64_t>{0, 2, 4}, StaticShape({3, 2})),
make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector<int64_t>{4, 2, 0}, StaticShape({3, 2})),
make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector<int64_t>{2, 0, 4}, StaticShape({3, 2})),
make_tuple(ShapeVector{{10, 1, 0, 1, 3, 1, 1}, {4}},
std::vector<int64_t>{1, -1, 3, -2},
StaticShape({10, 0, 3})),
make_tuple(ShapeVector{{10, 1, 0, 1, 3, 1, 1}, {}}, std::vector<int64_t>{}, StaticShape({10, 0, 3})),
make_tuple(ShapeVector{{2, 1, 7, 8, 3}, {1}}, std::vector<int64_t>{1}, StaticShape({2, 7, 8, 3}))),
PrintToStringParamName());
INSTANTIATE_TEST_SUITE_P(
multi_dim_shapes_repeated_axis,
SqueezeStaticShapeInferenceTest,
Values(make_tuple(ShapeVector{{2, 1, 3}, {2}}, std::vector<int64_t>{1, 1}, StaticShape({2, 3})),
make_tuple(ShapeVector{{3, 1, 2, 1}, {3}}, std::vector<int64_t>{1, -1, 1}, StaticShape({3, 2})),
make_tuple(ShapeVector{{3, 1, 2, 1}, {3}}, std::vector<int64_t>{1, -1, 1, -1}, StaticShape({3, 2})),
make_tuple(ShapeVector{{1, 3, 1, 2, 1}, {3}}, std::vector<int64_t>{2, -1, 2, -1, 0}, StaticShape({3, 2})),
make_tuple(ShapeVector{{2, 6, 7, 8, 1}, {2}}, std::vector<int64_t>{-1, -1}, StaticShape({2, 6, 7, 8}))),
PrintToStringParamName());
TEST_P(SqueezeStaticShapeInferenceTest, shape_inference_empty_const_map) {
const auto axes_node = std::make_shared<op::v0::Constant>(element::i64, Shape{axes.size()}, axes);
const auto op = make_op(arg, axes_node);
shape_inference(op.get(), input_shapes, output_shapes);
ASSERT_EQ(output_shapes.front(), exp_shape);
}
TEST_P(SqueezeStaticShapeInferenceTest, shape_inference_with_const_map) {
const auto axes_node = std::make_shared<op::v0::Parameter>(element::i64, Shape{1});
const auto op = make_op(arg, axes_node);
const auto axes_const = std::make_shared<op::v0::Constant>(element::i64, ov::Shape{axes.size()}, axes);
const auto axes_tensor = std::make_shared<ngraph::runtime::HostTensor>(axes_const);
const std::map<size_t, std::shared_ptr<ngraph::runtime::HostTensor>>& constant_data = {{1, axes_tensor}};
shape_inference(op.get(), input_shapes, output_shapes, constant_data);
ASSERT_EQ(output_shapes.front(), exp_shape);
}

View File

@ -11,6 +11,7 @@
#include "primitive_type_base.h"
#include "reshape_inst.h"
#include "shape_nodes.hpp"
#include "squeeze_shape_inference.hpp"
#include "unsqueeze_shape_inference.hpp"
namespace cldnn {

View File

@ -4,8 +4,6 @@
#pragma once
#pragma once
#include <gtest/gtest.h>
#include <string>
@ -15,6 +13,7 @@
#include <ie_blob.h>
#include <ie_common.h>
#include <ie_preprocess.hpp>
#include "openvino/util/pp.hpp"
inline bool strContains(const std::string & str, const std::string & substr) {
return str.find(substr) != std::string::npos;
@ -74,6 +73,16 @@ inline bool strDoesnotContain(const std::string & str, const std::string & subst
} \
}
#define OV_EXPECT_THROW(statement, exception, exception_what_matcher) \
try { \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
FAIL() << "Expected exception " << OV_PP_TOSTRING(exception); \
} catch (const exception& ex) { \
EXPECT_THAT(ex.what(), exception_what_matcher); \
} catch (...) { \
FAIL() << "Unknown exception"; \
}
inline void compare_blob(InferenceEngine::Blob::Ptr lhs, InferenceEngine::Blob::Ptr rhs) {
ASSERT_EQ(lhs.get(), rhs.get());
//TODO: add blob specific comparison for general case

View File

@ -6,20 +6,10 @@
#include "gmock/gmock.h"
#include "openvino/core/partial_shape.hpp"
#include "openvino/util/pp.hpp"
#include "openvino/op/parameter.hpp"
#define EXPECT_HAS_SUBSTRING(haystack, needle) EXPECT_PRED_FORMAT2(testing::IsSubstring, needle, haystack)
#define OV_EXPECT_THROW(statement, exception, exception_what_matcher) \
try { \
statement; \
FAIL() << "Expected exception " << OV_PP_TOSTRING(exception); \
} catch (const exception& ex) { \
EXPECT_THAT(ex.what(), exception_what_matcher); \
} catch (...) { \
FAIL() << "Unknown exception"; \
}
struct PrintToDummyParamName {
template <class ParamType>
std::string operator()(const ::testing::TestParamInfo<ParamType>& info) const {
@ -30,3 +20,29 @@ struct PrintToDummyParamName {
std::vector<size_t> get_shape_labels(const ov::PartialShape& p_shape);
void set_shape_labels(ov::PartialShape& p_shape, const std::vector<size_t>& labels);
/**
* \brief Test fixture for Unsqueeze/Squeeze type_prop tests.
*/
class UnSqueezeFixture : public testing::Test {
protected:
void SetUp() override {
param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, p_shape);
}
ov::PartialShape p_shape, exp_shape;
std::shared_ptr<ov::op::v0::Parameter> param;
};
using BoundTestParam = std::tuple<ov::PartialShape, ov::PartialShape>;
/** \brief Test fixture for Unsqueeze/Squeeze type_prop bound tests. */
class UnSqueezeBoundTest : public testing::WithParamInterface<BoundTestParam>, public UnSqueezeFixture {
protected:
void SetUp() override {
std::tie(p_shape, exp_shape) = GetParam();
UnSqueezeFixture::SetUp();
}
std::vector<size_t> in_labels;
};