diff --git a/src/core/tests/conditional_compilation/ngraph_cc_collect.cpp b/src/core/tests/conditional_compilation/ngraph_cc_collect.cpp index c3d6b19dd49..2495f9201f8 100644 --- a/src/core/tests/conditional_compilation/ngraph_cc_collect.cpp +++ b/src/core/tests/conditional_compilation/ngraph_cc_collect.cpp @@ -2,12 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include +#include -#include "gtest/gtest.h" +#include "openvino/op/abs.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/opsets/opset.hpp" #ifdef SELECTIVE_BUILD_ANALYZER # define SELECTIVE_BUILD_ANALYZER_ON diff --git a/src/core/tests/conditional_compilation/ngraph_cc_off.cpp b/src/core/tests/conditional_compilation/ngraph_cc_off.cpp index 17aadecb761..b57b6a2bfff 100644 --- a/src/core/tests/conditional_compilation/ngraph_cc_off.cpp +++ b/src/core/tests/conditional_compilation/ngraph_cc_off.cpp @@ -2,12 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include +#include -#include "gtest/gtest.h" +#include "openvino/op/abs.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/opsets/opset.hpp" #ifdef SELECTIVE_BUILD_ANALYZER # define SELECTIVE_BUILD_ANALYZER_ON diff --git a/src/core/tests/conditional_compilation/ngraph_cc_on.cpp b/src/core/tests/conditional_compilation/ngraph_cc_on.cpp index 5d6ccbcc1f8..0be632ed916 100644 --- a/src/core/tests/conditional_compilation/ngraph_cc_on.cpp +++ b/src/core/tests/conditional_compilation/ngraph_cc_on.cpp @@ -2,12 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include +#include -#include "gtest/gtest.h" +#include "openvino/op/abs.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/opsets/opset.hpp" #ifdef SELECTIVE_BUILD_ANALYZER # define SELECTIVE_BUILD_ANALYZER_ON @@ -33,7 +32,7 @@ TEST(conditional_compilation, disabled_op_scope) { EXPECT_EQ(n, 42); // Simple Scope1 is disabled and throws exception - ASSERT_THROW(OV_OP_SCOPE(Scope1), ngraph::ngraph_error); + ASSERT_THROW(OV_OP_SCOPE(Scope1), ov::Exception); #undef ov_op_Scope0 } diff --git a/src/core/tests/frontend/decoder_transformation_extension.cpp b/src/core/tests/frontend/decoder_transformation_extension.cpp index ba0cc168efa..b0f2c6fad8e 100644 --- a/src/core/tests/frontend/decoder_transformation_extension.cpp +++ b/src/core/tests/frontend/decoder_transformation_extension.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include -#include "gtest/gtest.h" #include "openvino/frontend/extension/decoder_transformation.hpp" +#include "openvino/pass/manager.hpp" using namespace ov::frontend; diff --git a/src/core/tests/frontend/frontend_manager.cpp b/src/core/tests/frontend/frontend_manager.cpp index bec98268851..1e62a2a39e3 100644 --- a/src/core/tests/frontend/frontend_manager.cpp +++ b/src/core/tests/frontend/frontend_manager.cpp @@ -2,15 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // +#include +#include + #include -#include -#include #include "common_test_utils/file_utils.hpp" -#include "gmock/gmock.h" -#include "gtest/gtest.h" -#include "ngraph/file_util.hpp" -#include "ngraph/util.hpp" +#include "openvino/frontend/exception.hpp" +#include "openvino/frontend/manager.hpp" #include "openvino/util/file_util.hpp" using namespace ov::frontend; @@ -156,9 +155,9 @@ TEST(FrontEndManagerTest, testDefaultInputModel) { ASSERT_ANY_THROW(im->cut_and_add_new_output(nullptr, "")); ASSERT_ANY_THROW(im->add_output(nullptr)); ASSERT_ANY_THROW(im->remove_output(nullptr)); - ASSERT_ANY_THROW(im->set_partial_shape(nullptr, ngraph::Shape{})); + ASSERT_ANY_THROW(im->set_partial_shape(nullptr, ov::Shape{})); ASSERT_ANY_THROW(im->get_partial_shape(nullptr)); - ASSERT_ANY_THROW(im->set_element_type(nullptr, ngraph::element::Type{})); + ASSERT_ANY_THROW(im->set_element_type(nullptr, ov::element::Type{})); ASSERT_ANY_THROW(im->set_tensor_value(nullptr, nullptr)); ASSERT_ANY_THROW(im->set_tensor_partial_value(nullptr, nullptr, nullptr)); } diff --git a/src/core/tests/frontend/mock_frontend.cpp b/src/core/tests/frontend/mock_frontend.cpp index 64294380511..a70c4d54e1a 100644 --- a/src/core/tests/frontend/mock_frontend.cpp +++ b/src/core/tests/frontend/mock_frontend.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ngraph/visibility.hpp" #include "openvino/frontend/exception.hpp" #include "openvino/frontend/manager.hpp" #include "openvino/frontend/visibility.hpp" @@ -10,7 +9,6 @@ #define MOCK_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS -using namespace ngraph; using namespace ov::frontend; class InputModelMock : public InputModel { @@ -102,16 +100,16 @@ public: FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); } - void set_partial_shape(const Place::Ptr& place, const PartialShape& shape) override { + void set_partial_shape(const Place::Ptr& place, const ov::PartialShape& shape) override { FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); } - PartialShape get_partial_shape(const Place::Ptr& place) const override { + ov::PartialShape get_partial_shape(const Place::Ptr& place) const override { FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); return {}; } - void set_element_type(const Place::Ptr& place, const element::Type& type) override { + void set_element_type(const Place::Ptr& place, const ov::element::Type& type) override { FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); } @@ -185,13 +183,14 @@ public: std::shared_ptr convert(const InputModel::Ptr& model) const override { FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); - auto shape = Shape{1, 2, 300, 300}; + auto shape = ov::Shape{1, 2, 300, 300}; auto param = std::make_shared(ov::element::f32, shape); std::vector data(ov::shape_size(shape), 1.f); auto constant = ov::opset8::Constant::create(ov::element::f32, shape, data); auto op = std::make_shared(param, constant); auto res = std::make_shared(op); - auto ov_model = std::make_shared(ResultVector({res}), ParameterVector({param}), "mock1_model"); + auto ov_model = + std::make_shared(ov::ResultVector({res}), ov::ParameterVector({param}), "mock1_model"); ov_model->get_rt_info()["mock_test"] = std::string(1024, 't'); return ov_model; } diff --git a/src/core/tests/frontend/progress_reporter.cpp b/src/core/tests/frontend/progress_reporter.cpp index aca7eb30c60..93b2f18d7b5 100644 --- a/src/core/tests/frontend/progress_reporter.cpp +++ b/src/core/tests/frontend/progress_reporter.cpp @@ -4,7 +4,8 @@ #include "openvino/frontend/extension/progress_reporter.hpp" -#include "gtest/gtest.h" +#include + #include "openvino/frontend/exception.hpp" using namespace ov::frontend; diff --git a/src/core/tests/pass/serialization/cleanup.cpp b/src/core/tests/pass/serialization/cleanup.cpp index 8ab1f60ab46..a2e19cfc5fa 100644 --- a/src/core/tests/pass/serialization/cleanup.cpp +++ b/src/core/tests/pass/serialization/cleanup.cpp @@ -29,7 +29,7 @@ protected: }; namespace { -std::shared_ptr CreateTestFunction(const std::string& name, const ngraph::PartialShape& ps) { +std::shared_ptr create_test_model(const std::string& name, const ov::PartialShape& ps) { const auto param = std::make_shared(ov::element::f16, ps); const auto convert = std::make_shared(param, ov::element::f32); const auto result = std::make_shared(convert); @@ -38,7 +38,7 @@ std::shared_ptr CreateTestFunction(const std::string& name, co } // namespace TEST_F(SerializationCleanupTest, SerializationShouldWork) { - const auto f = CreateTestFunction("StaticFunction", ngraph::PartialShape{2, 2}); + const auto f = create_test_model("StaticFunction", ov::PartialShape{2, 2}); ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(f); @@ -48,7 +48,7 @@ TEST_F(SerializationCleanupTest, SerializationShouldWork) { } TEST_F(SerializationCleanupTest, SerializationShouldWorkWithDynamicFunction) { - const auto f = CreateTestFunction("DynamicFunction", ngraph::PartialShape{ngraph::Dimension()}); + const auto f = create_test_model("DynamicFunction", ov::PartialShape{ov::Dimension()}); ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(f); diff --git a/src/core/tests/pass/serialization/const_compression.cpp b/src/core/tests/pass/serialization/const_compression.cpp index f5aa1354b13..134e4e15854 100644 --- a/src/core/tests/pass/serialization/const_compression.cpp +++ b/src/core/tests/pass/serialization/const_compression.cpp @@ -44,9 +44,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsI32) { auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -61,9 +61,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsI64) { auto A = ov::opset8::Constant::create(ov::element::i64, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::i64, shape, {1, 2, 3, 4, 5, 6, 7, 8}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -78,9 +78,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsFP16) { auto A = ov::opset8::Constant::create(ov::element::f16, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::f16, shape, {1, 2, 3, 4, 5, 6, 7, 8}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -95,9 +95,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsFP32) { auto A = ov::opset8::Constant::create(ov::element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -113,9 +113,9 @@ TEST_F(SerializationConstantCompressionTest, NonIdenticalConstantsI64) { auto A = ov::opset8::Constant::create(ov::element::i64, shape, {2, 2}); auto B = ov::opset8::Constant::create(ov::element::i64, shape, {0, 128}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -132,9 +132,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsTimesTwo) { auto C = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3}); auto D = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B, C, D}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B, C, D}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -153,9 +153,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsTimesTwoMultipleO auto E = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto F = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B, C, D, E, F}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B, C, D, E, F}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -170,9 +170,9 @@ TEST_F(SerializationConstantCompressionTest, NonIdenticalConstants) { auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::i32, shape, {2, 2, 3, 4, 5, 6, 7, 8}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -187,9 +187,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsDifferentTypesI32 auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 0, 2, 0, 3, 0, 4, 0}); auto B = ov::opset8::Constant::create(ov::element::i64, ov::Shape({1, 2, 2}), {1, 2, 3, 4}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); @@ -204,9 +204,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsDifferentTypesI32 auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2}); auto B = ov::opset8::Constant::create(ov::element::i8, ov::Shape({1, 2, 4}), {1, 0, 0, 0, 2, 0, 0, 0}); - auto ngraph_a = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); + auto model = std::make_shared(ov::NodeVector{A, B}, ov::ParameterVector{}); - ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); + ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); diff --git a/src/core/tests/pass/serialization/custom_ops.cpp b/src/core/tests/pass/serialization/custom_ops.cpp index 15d1eb2b018..13bcb14ead2 100644 --- a/src/core/tests/pass/serialization/custom_ops.cpp +++ b/src/core/tests/pass/serialization/custom_ops.cpp @@ -3,12 +3,13 @@ // #include -#include #include "common_test_utils/common_utils.hpp" #include "common_test_utils/file_utils.hpp" -#include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph/pass/serialize.hpp" +#include "common_test_utils/graph_comparator.hpp" +#include "ie_iextension.h" +#include "openvino/pass/manager.hpp" +#include "openvino/pass/serialize.hpp" #include "openvino/runtime/core.hpp" class CustomOpsSerializationTest : public ::testing::Test { diff --git a/src/core/tests/pass/serialization/rt_info_serialization.cpp b/src/core/tests/pass/serialization/rt_info_serialization.cpp index 1988af25886..78c25323a8c 100644 --- a/src/core/tests/pass/serialization/rt_info_serialization.cpp +++ b/src/core/tests/pass/serialization/rt_info_serialization.cpp @@ -4,12 +4,12 @@ #include -#include - -#include "common_test_utils/ngraph_test_utils.hpp" -#include "ngraph/pass/serialize.hpp" +#include "common_test_utils/common_utils.hpp" +#include "common_test_utils/file_utils.hpp" +#include "common_test_utils/test_common.hpp" #include "openvino/frontend/manager.hpp" #include "openvino/opsets/opset8.hpp" +#include "openvino/pass/manager.hpp" #include "transformations/rt_info/attributes.hpp" class RTInfoSerializationTest : public ov::test::TestsCommon { diff --git a/src/core/tests/pass/serialization/serialize.cpp b/src/core/tests/pass/serialization/serialize.cpp index a1e4b078686..791c46790dd 100644 --- a/src/core/tests/pass/serialization/serialize.cpp +++ b/src/core/tests/pass/serialization/serialize.cpp @@ -11,8 +11,6 @@ #include "common_test_utils/file_utils.hpp" #include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/test_common.hpp" -#include "ngraph/pass/manager.hpp" -#include "ngraph/pass/serialize.hpp" #include "openvino/util/file_util.hpp" #include "read_ir.hpp" diff --git a/src/core/tests/pass/serialization/tensor_iterator.cpp b/src/core/tests/pass/serialization/tensor_iterator.cpp index f929b1fdee9..d122ede1b16 100644 --- a/src/core/tests/pass/serialization/tensor_iterator.cpp +++ b/src/core/tests/pass/serialization/tensor_iterator.cpp @@ -7,7 +7,7 @@ #include "common_test_utils/common_utils.hpp" #include "common_test_utils/data_utils.hpp" #include "common_test_utils/file_utils.hpp" -#include "common_test_utils/ngraph_test_utils.hpp" +#include "common_test_utils/graph_comparator.hpp" #include "gtest/gtest.h" #include "ie_blob.h" #include "ie_core.hpp" diff --git a/src/core/tests/pass/serialization/tensor_names.cpp b/src/core/tests/pass/serialization/tensor_names.cpp index 10c39b0c198..bc3541cf90e 100644 --- a/src/core/tests/pass/serialization/tensor_names.cpp +++ b/src/core/tests/pass/serialization/tensor_names.cpp @@ -29,10 +29,9 @@ protected: }; TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) { - std::shared_ptr function; + std::shared_ptr model; { - auto parameter = - std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); + auto parameter = std::make_shared(ov::element::Type_t::f32, ov::Shape{1, 3, 10, 10}); parameter->set_friendly_name("parameter"); parameter->get_output_tensor(0).set_names({"input"}); auto relu_prev = std::make_shared(parameter); @@ -41,18 +40,18 @@ TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) { auto relu = std::make_shared(relu_prev); relu->set_friendly_name("relu"); relu->get_output_tensor(0).set_names({"relu,t", "identity"}); - const ngraph::ResultVector results{std::make_shared(relu)}; + const ov::ResultVector results{std::make_shared(relu)}; results[0]->set_friendly_name("out"); - ngraph::ParameterVector params{parameter}; - function = std::make_shared(results, params, "TensorNames"); + ov::ParameterVector params{parameter}; + model = std::make_shared(results, params, "TensorNames"); } - ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(function); + ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(model); auto result = ov::test::readModel(m_out_xml_path, m_out_bin_path); const auto fc = FunctionsComparator::with_default() .enable(FunctionsComparator::ATTRIBUTES) .enable(FunctionsComparator::CONST_VALUES); - const auto res = fc.compare(result, function); + const auto res = fc.compare(result, model); EXPECT_TRUE(res.valid) << res.message; } diff --git a/src/core/tests/type_prop/abs.cpp b/src/core/tests/type_prop/abs.cpp index 911422cd2de..164b5548fe0 100644 --- a/src/core/tests/type_prop/abs.cpp +++ b/src/core/tests/type_prop/abs.cpp @@ -4,6 +4,6 @@ #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_abs, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/acos.cpp b/src/core/tests/type_prop/acos.cpp index dc7e94d7537..77b4aa60c61 100644 --- a/src/core/tests/type_prop/acos.cpp +++ b/src/core/tests/type_prop/acos.cpp @@ -4,6 +4,6 @@ #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_acos, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/acosh.cpp b/src/core/tests/type_prop/acosh.cpp index 118233b1767..14cf67e944e 100644 --- a/src/core/tests/type_prop/acosh.cpp +++ b/src/core/tests/type_prop/acosh.cpp @@ -4,6 +4,6 @@ #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_acosh, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/adaptive_avg_pool.cpp b/src/core/tests/type_prop/adaptive_avg_pool.cpp index 9c203d1eecd..3426dde22c2 100644 --- a/src/core/tests/type_prop/adaptive_avg_pool.cpp +++ b/src/core/tests/type_prop/adaptive_avg_pool.cpp @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/opsets/opset10.hpp" using namespace std; diff --git a/src/core/tests/type_prop/adaptive_max_pool.cpp b/src/core/tests/type_prop/adaptive_max_pool.cpp index cee7f5c4fa2..376a05ecd8b 100644 --- a/src/core/tests/type_prop/adaptive_max_pool.cpp +++ b/src/core/tests/type_prop/adaptive_max_pool.cpp @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/opsets/opset10.hpp" using namespace std; diff --git a/src/core/tests/type_prop/add.cpp b/src/core/tests/type_prop/add.cpp index be26357aa3f..2592ee9add5 100644 --- a/src/core/tests/type_prop/add.cpp +++ b/src/core/tests/type_prop/add.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/add.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_add, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/arithmetic_ops.hpp b/src/core/tests/type_prop/arithmetic_ops.hpp index 77ecc449bd9..482d28e4c1f 100644 --- a/src/core/tests/type_prop/arithmetic_ops.hpp +++ b/src/core/tests/type_prop/arithmetic_ops.hpp @@ -13,15 +13,14 @@ // limitations under the License. //***************************************************************************** +#include + #include #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" #include "openvino/op/util/attr_types.hpp" -using namespace ngraph; using namespace testing; template @@ -30,443 +29,555 @@ class ArithmeticOperator : public testing::Test {}; TYPED_TEST_SUITE_P(ArithmeticOperator); TYPED_TEST_P(ArithmeticOperator, default_constructor) { - auto A = std::make_shared(element::f32, PartialShape{-1, 4, 1, 6, Dimension(1, 6), Dimension(2, 6)}); - auto B = std::make_shared(element::f32, PartialShape{-1, 1, 5, 6, Dimension(5, 8), Dimension(5, 8)}); + auto A = std::make_shared( + ov::element::f32, + ov::PartialShape{-1, 4, 1, 6, ov::Dimension(1, 6), ov::Dimension(2, 6)}); + auto B = std::make_shared( + ov::element::f32, + ov::PartialShape{-1, 1, 5, 6, ov::Dimension(5, 8), ov::Dimension(5, 8)}); const auto op = std::make_shared(); op->set_argument(0, A); op->set_argument(1, B); - auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE); + auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NONE); op->set_autob(autob); - EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NONE); - ASSERT_THROW(op->validate_and_infer_types(), NodeValidationFailure); + EXPECT_EQ(op->get_autob(), ov::op::AutoBroadcastType::NONE); + ASSERT_THROW(op->validate_and_infer_types(), ov::NodeValidationFailure); - autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY); + autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY); op->set_autob(autob); - EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY); + EXPECT_EQ(op->get_autob(), ov::op::AutoBroadcastType::NUMPY); op->validate_and_infer_types(); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{-1, 4, 5, 6, Dimension(5, 8), Dimension(5, 6)})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_output_partial_shape(0), + (ov::PartialShape{-1, 4, 5, 6, ov::Dimension(5, 8), ov::Dimension(5, 6)})); } TYPED_TEST_P(ArithmeticOperator, shape_inference_2D) { - auto A = std::make_shared(element::f32, Shape{2, 2}); - auto B = std::make_shared(element::f32, Shape{2, 2}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2}); + auto B = std::make_shared(ov::element::f32, ov::Shape{2, 2}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 2})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 2})); } TYPED_TEST_P(ArithmeticOperator, shape_inference_4D) { - auto A = std::make_shared(element::f32, Shape{2, 2, 3, 3}); - auto B = std::make_shared(element::f32, Shape{2, 2, 3, 3}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2, 3, 3}); + auto B = std::make_shared(ov::element::f32, ov::Shape{2, 2, 3, 3}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 2, 3, 3})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 2, 3, 3})); } TYPED_TEST_P(ArithmeticOperator, default_autobroadcast) { - auto A = std::make_shared(element::f32, Shape{2, 2}); - auto B = std::make_shared(element::f32, Shape{2, 2}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2}); + auto B = std::make_shared(ov::element::f32, ov::Shape{2, 2}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 2})); - EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 2})); + EXPECT_EQ(op->get_autob(), ov::op::AutoBroadcastType::NUMPY); } TYPED_TEST_P(ArithmeticOperator, no_autobroadcast) { - auto A = std::make_shared(element::f32, Shape{2, 2}); - auto B = std::make_shared(element::f32, Shape{2, 2}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2}); + auto B = std::make_shared(ov::element::f32, ov::Shape{2, 2}); - const auto op = std::make_shared(A, B, op::AutoBroadcastType::NONE); + const auto op = std::make_shared(A, B, ov::op::AutoBroadcastType::NONE); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 2})); - EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NONE); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 2})); + EXPECT_EQ(op->get_autob(), ov::op::AutoBroadcastType::NONE); } TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_scalar_numpy_broadcast) { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{1}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{1}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); } TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_1D_numpy_broadcast) { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{5}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); } TYPED_TEST_P(ArithmeticOperator, shape_inference_2D_x_4D_numpy_broadcast) { - auto A = std::make_shared(element::f32, Shape{4, 5}); - auto B = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); } TYPED_TEST_P(ArithmeticOperator, shape_inference_3D_x_4D_numpy_broadcast) { - auto A = std::make_shared(element::f32, Shape{1, 4, 5}); - auto B = std::make_shared(element::f32, Shape{2, 3, 1, 1}); + auto A = std::make_shared(ov::element::f32, ov::Shape{1, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{2, 3, 1, 1}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); } TYPED_TEST_P(ArithmeticOperator, shape_inference_4D_x_3D_numpy_broadcast) { - auto A = std::make_shared(element::f32, Shape{8, 1, 6, 1}); - auto B = std::make_shared(element::f32, Shape{7, 1, 5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{8, 1, 6, 1}); + auto B = std::make_shared(ov::element::f32, ov::Shape{7, 1, 5}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5})); - EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{8, 7, 6, 5})); + EXPECT_EQ(op->get_autob(), ov::op::AutoBroadcastType::NUMPY); } TYPED_TEST_P(ArithmeticOperator, static_shape_pdpd_doc_examples) { { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{3, 4}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{3, 4}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 1); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{3, 1}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{3, 1}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 1); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{5}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 3); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 3); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{1, 3}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{1, 3}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 0); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 0); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{3, 1, 5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{3, 1, 5}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 1); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{2, 3, 4, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{2, 3, 4, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } } TYPED_TEST_P(ArithmeticOperator, static_shape_inference_4D_x_4D_pdpd_broadcast) { { - auto A = std::make_shared(element::f32, Shape{8, 1, 6, 5}); - auto B = std::make_shared(element::f32, Shape{8, 1, 6, 5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{8, 1, 6, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{8, 1, 6, 5}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{8, 1, 6, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{8, 1, 6, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } { - auto A = std::make_shared(element::f32, Shape{8, 7, 6, 5}); - auto B = std::make_shared(element::f32, Shape{8, 1, 6, 5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{8, 7, 6, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{8, 1, 6, 5}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{8, 7, 6, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } } TYPED_TEST_P(ArithmeticOperator, static_shape_inference_4D_x_3D_ax_default_pdpd_broadcast) { - auto A = std::make_shared(element::f32, Shape{8, 7, 6, 5}); - auto B = std::make_shared(element::f32, Shape{7, 1, 5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{8, 7, 6, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{7, 1, 5}); - const auto op = std::make_shared(A, B, op::AutoBroadcastType::PDPD); + const auto op = std::make_shared(A, B, ov::op::AutoBroadcastType::PDPD); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{8, 7, 6, 5})); - EXPECT_EQ(op->get_autob().m_type, op::AutoBroadcastType::PDPD); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{8, 7, 6, 5})); + EXPECT_EQ(op->get_autob().m_type, ov::op::AutoBroadcastType::PDPD); } TYPED_TEST_P(ArithmeticOperator, incompatible_element_types) { - auto A = std::make_shared(element::f32, Shape{2, 2, 3, 3}); - auto B = std::make_shared(element::i32, Shape{2, 2, 3, 3}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 2, 3, 3}); + auto B = std::make_shared(ov::element::i32, ov::Shape{2, 2, 3, 3}); - ASSERT_THROW(const auto unused = std::make_shared(A, B), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, incompatible_boolean_type) { - auto A = std::make_shared(element::boolean, Shape{2, 2, 3, 3}); - auto B = std::make_shared(element::boolean, Shape{2, 2, 3, 3}); + auto A = std::make_shared(ov::element::boolean, ov::Shape{2, 2, 3, 3}); + auto B = std::make_shared(ov::element::boolean, ov::Shape{2, 2, 3, 3}); - ASSERT_THROW(const auto unused = std::make_shared(A, B), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_1D_x_1D_incompatible) { - auto A = std::make_shared(element::f32, Shape{3}); - auto B = std::make_shared(element::f32, Shape{4}); + auto A = std::make_shared(ov::element::f32, ov::Shape{3}); + auto B = std::make_shared(ov::element::f32, ov::Shape{4}); - ASSERT_THROW(const auto unused = std::make_shared(A, B), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_3D_x_3D_incompatible) { - auto A = std::make_shared(element::f32, Shape{3, 5, 6}); - auto B = std::make_shared(element::f32, Shape{4, 10, 12}); + auto A = std::make_shared(ov::element::f32, ov::Shape{3, 5, 6}); + auto B = std::make_shared(ov::element::f32, ov::Shape{4, 10, 12}); - ASSERT_THROW(const auto unused = std::make_shared(A, B), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_5D_x_5D_incompatible) { - auto A = std::make_shared(element::f32, Shape{389, 112, 12}); - auto B = std::make_shared(element::f32, Shape{389, 112, 19}); + auto A = std::make_shared(ov::element::f32, ov::Shape{389, 112, 12}); + auto B = std::make_shared(ov::element::f32, ov::Shape{389, 112, 19}); - ASSERT_THROW(const auto unused = std::make_shared(A, B), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = std::make_shared(A, B), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_axis_less_than_negative_1_pdpd_incompatible) { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 5}); - auto B = std::make_shared(element::f32, Shape{3, 1}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); + auto B = std::make_shared(ov::element::f32, ov::Shape{3, 1}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, -2); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, -2); - ASSERT_THROW(const auto unused = std::make_shared(A, B, autob), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = std::make_shared(A, B, autob), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, shape_inference_dst_smaller_than_src_pdpd_broadcast) { - auto A = std::make_shared(element::f32, Shape{2, 3, 4, 1}); - auto B = std::make_shared(element::f32, Shape{2, 3, 4, 5}); + auto A = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 1}); + auto B = std::make_shared(ov::element::f32, ov::Shape{2, 3, 4, 5}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); - ASSERT_THROW(const auto unused = std::make_shared(A, B, autob), ngraph::NodeValidationFailure); + ASSERT_THROW(const auto unused = std::make_shared(A, B, autob), ov::NodeValidationFailure); } TYPED_TEST_P(ArithmeticOperator, fully_dynamic_shape_broadcast_numpy) { - auto param = std::make_shared(element::f32, PartialShape::dynamic()); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NUMPY); + auto param = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY); const auto op = std::make_shared(param, param, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic()); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(ArithmeticOperator, fully_dynamic_shape_broadcast_none) { - auto param = std::make_shared(element::f32, PartialShape::dynamic()); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::NONE); + auto param = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NONE); const auto op = std::make_shared(param, param, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic()); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(ArithmeticOperator, fully_dynamic_shape_broadcast_pdpd) { - auto param = std::make_shared(element::f32, PartialShape::dynamic()); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD); + auto param = std::make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD); const auto op = std::make_shared(param, param, autob); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic()); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_3D) { - Dimension dynamic = Dimension::dynamic(); - auto A = std::make_shared(element::f32, PartialShape{dynamic, dynamic, 6}); - auto B = std::make_shared(element::f32, PartialShape{dynamic, dynamic, 6}); + ov::Dimension dynamic = ov::Dimension::dynamic(); + auto A = std::make_shared(ov::element::f32, ov::PartialShape{dynamic, dynamic, 6}); + auto B = std::make_shared(ov::element::f32, ov::PartialShape{dynamic, dynamic, 6}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{dynamic, dynamic, 6})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_output_partial_shape(0), (ov::PartialShape{dynamic, dynamic, 6})); } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_5D) { - Dimension dynamic = Dimension::dynamic(); - auto A = std::make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); - auto B = std::make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); + ov::Dimension dynamic = ov::Dimension::dynamic(); + auto A = + std::make_shared(ov::element::f32, ov::PartialShape{dynamic, 4, dynamic, dynamic, 6}); + auto B = + std::make_shared(ov::element::f32, ov::PartialShape{dynamic, 4, dynamic, dynamic, 6}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_output_partial_shape(0), (PartialShape{dynamic, 4, dynamic, dynamic, 6})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_output_partial_shape(0), (ov::PartialShape{dynamic, 4, dynamic, dynamic, 6})); } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_broadcast_none) { - auto A = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), Dimension(6, -1), Dimension(-1, 6), -1, 8}); - auto B = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), Dimension(6, -1), Dimension(-1, 6), -1, 8}); + auto A = std::make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(2, 7), ov::Dimension(6, -1), ov::Dimension(-1, 6), -1, 8}); + auto B = std::make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(2, 7), ov::Dimension(6, -1), ov::Dimension(-1, 6), -1, 8}); - const auto op = std::make_shared(A, B, op::AutoBroadcastType::NONE); + const auto op = std::make_shared(A, B, ov::op::AutoBroadcastType::NONE); - EXPECT_EQ(op->get_element_type(), element::f32); + EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), - (PartialShape{Dimension(1, 3), Dimension(2, 7), Dimension(6, -1), Dimension(-1, 6), -1, 8})); + (ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + ov::Dimension(6, -1), + ov::Dimension(-1, 6), + -1, + 8})); } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_equal_rank_broadcast_numpy) { // Equal rank - auto A = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(1, 3), Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); - auto B = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); + auto A = std::make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(1, 3), + ov::Dimension(1, 3), + ov::Dimension(4, 8), + -1, + 1, + -1, + 1, + 3}); + auto B = std::make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + 1, + ov::Dimension(1, 3), + ov::Dimension(4, 8), + -1, + 1, + 3}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); + EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), - (PartialShape{Dimension(1, 3), Dimension(2, 7), -1, Dimension(4, 8), -1, Dimension(4, 8), -1, 1, 3})); + (ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + ov::Dimension(4, 8), + -1, + ov::Dimension(4, 8), + -1, + 1, + 3})); } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_a_rank_smaller_broadcast_numpy) { // `A` rank smaller - auto A = - std::make_shared(element::f32, PartialShape{Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); - auto B = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); + auto A = std::make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(4, 8), -1, 1, -1, 1, 3}); + auto B = std::make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + 1, + ov::Dimension(1, 3), + ov::Dimension(4, 8), + -1, + 1, + 3}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); + EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), - (PartialShape{Dimension(1, 3), Dimension(2, 7), -1, Dimension(4, 8), -1, Dimension(4, 8), -1, 1, 3})); + (ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + ov::Dimension(4, 8), + -1, + ov::Dimension(4, 8), + -1, + 1, + 3})); } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_b_rank_smaller_broadcast_numpy) { // `B` rank smaller - auto A = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); - auto B = - std::make_shared(element::f32, PartialShape{Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); + auto A = std::make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + 1, + ov::Dimension(1, 3), + ov::Dimension(4, 8), + -1, + 1, + 3}); + auto B = std::make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(4, 8), -1, 1, -1, 1, 3}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); + EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), - (PartialShape{Dimension(1, 3), Dimension(2, 7), -1, Dimension(4, 8), -1, Dimension(4, 8), -1, 1, 3})); + (ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + ov::Dimension(4, 8), + -1, + ov::Dimension(4, 8), + -1, + 1, + 3})); } TYPED_TEST_P(ArithmeticOperator, dynamic_shape_intervals_broadcast_pdpd) { { // Equal rank - auto A = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), Dimension(1, 6), /* Dimension(6, -1), */ -1, 8}); - auto B = std::make_shared(element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), 1, /* 1, */ -1, 8}); + auto A = std::make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + ov::Dimension(1, 6), + /* Dimension(6, -1), */ -1, + 8}); + auto B = std::make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(2, 7), 1, /* 1, */ -1, 8}); - const auto op = std::make_shared(A, B, op::AutoBroadcastType::PDPD); + const auto op = std::make_shared(A, B, ov::op::AutoBroadcastType::PDPD); - EXPECT_EQ(op->get_element_type(), element::f32); + EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), - (PartialShape{Dimension(1, 3), Dimension(2, 7), Dimension(1, 6), /* Dimension(6, -1), */ -1, 8})); + (ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + ov::Dimension(1, 6), + /* Dimension(6, -1), */ -1, + 8})); } { // `A` rank smaller - auto A = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(1, 3), Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); - auto B = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); + auto A = std::make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(1, 3), + ov::Dimension(1, 3), + ov::Dimension(4, 8), + -1, + 1, + -1, + 1, + 3}); + auto B = std::make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + 1, + ov::Dimension(1, 3), + ov::Dimension(4, 8), + -1, + 1, + 3}); - const auto autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 0); + const auto autob = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 0); const auto op = std::make_shared(A, B, autob); - EXPECT_EQ(op->get_element_type(), element::f32); + EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), - (PartialShape{Dimension(1, 3), Dimension(2, 7), -1, Dimension(4, 8), -1, Dimension(4, 8), -1, 1, 3})); + (ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + ov::Dimension(4, 8), + -1, + ov::Dimension(4, 8), + -1, + 1, + 3})); } { // `B` rank smaller - auto A = std::make_shared( - element::f32, - PartialShape{Dimension(1, 3), Dimension(2, 7), -1, 1, Dimension(1, 3), Dimension(4, 8), -1, 1, 3}); - auto B = std::make_shared(element::f32, - PartialShape{Dimension(1, 3), Dimension(4, 8), -1, 1, -1, 1, 3}); + auto A = std::make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + 1, + ov::Dimension(1, 3), + ov::Dimension(4, 8), + -1, + 1, + 3}); + auto B = std::make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension(1, 3), ov::Dimension(4, 8), -1, 1, -1, 1, 3}); const auto op = std::make_shared(A, B); - EXPECT_EQ(op->get_element_type(), element::f32); + EXPECT_EQ(op->get_element_type(), ov::element::f32); EXPECT_EQ(op->get_output_partial_shape(0), - (PartialShape{Dimension(1, 3), Dimension(2, 7), -1, Dimension(4, 8), -1, Dimension(4, 8), -1, 1, 3})); + (ov::PartialShape{ov::Dimension(1, 3), + ov::Dimension(2, 7), + -1, + ov::Dimension(4, 8), + -1, + ov::Dimension(4, 8), + -1, + 1, + 3})); } } TYPED_TEST_P(ArithmeticOperator, labels_a_dynamic_mixed_dims_broadcast_numpy) { // All dimensions of A have labels, B without labels - PartialShape pshape_A{Dimension(-1), Dimension(3), Dimension(1), Dimension(2, 128)}; - PartialShape pshape_B{Dimension(-1), Dimension(3), Dimension(2, 224), Dimension(1)}; + ov::PartialShape pshape_A{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(1), ov::Dimension(2, 128)}; + ov::PartialShape pshape_B{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(2, 224), ov::Dimension(1)}; - PartialShape expected_shape = {-1, 3, Dimension(2, 224), Dimension(2, 128)}; + ov::PartialShape expected_shape = {-1, 3, ov::Dimension(2, 224), ov::Dimension(2, 128)}; set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(expected_shape, {10, 11, 0, 13}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -477,16 +588,16 @@ TYPED_TEST_P(ArithmeticOperator, labels_a_dynamic_mixed_dims_broadcast_numpy) { TYPED_TEST_P(ArithmeticOperator, labels_b_dynamic_mixed_dims_broadcast_numpy) { // All dimensions of B have labels, A without labels - PartialShape pshape_A{Dimension(-1), Dimension(3), Dimension(1), Dimension(2, 128)}; - PartialShape pshape_B{Dimension(-1), Dimension(3), Dimension(2, 224), Dimension(1)}; + ov::PartialShape pshape_A{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(1), ov::Dimension(2, 128)}; + ov::PartialShape pshape_B{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(2, 224), ov::Dimension(1)}; - PartialShape expected_shape = {-1, 3, Dimension(2, 224), Dimension(2, 128)}; + ov::PartialShape expected_shape = {-1, 3, ov::Dimension(2, 224), ov::Dimension(2, 128)}; set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {20, 21, 22, 0}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -497,17 +608,17 @@ TYPED_TEST_P(ArithmeticOperator, labels_b_dynamic_mixed_dims_broadcast_numpy) { TYPED_TEST_P(ArithmeticOperator, labels_different_interval_mixed_dims_broadcast_numpy) { // Both params have dimensions with different labels - PartialShape pshape_A{Dimension(-1), Dimension(3), Dimension(1), Dimension(2, 128)}; - PartialShape pshape_B{Dimension(-1), Dimension(3), Dimension(2, 224), Dimension(1)}; + ov::PartialShape pshape_A{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(1), ov::Dimension(2, 128)}; + ov::PartialShape pshape_B{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(2, 224), ov::Dimension(1)}; - PartialShape expected_shape = {-1, 3, Dimension(2, 224), Dimension(2, 128)}; + ov::PartialShape expected_shape = {-1, 3, ov::Dimension(2, 224), ov::Dimension(2, 128)}; set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {0, 21, 22, 13}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -518,18 +629,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_mixed_dims_broadcast_ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_b_and_fully_dyn_a_broadcast_numpy) { // Both params have dimension labels, output has label B - Dimension dim_0_A = Dimension(-1); - Dimension dim_0_B = Dimension(2, 4); + ov::Dimension dim_0_A = ov::Dimension(-1); + ov::Dimension dim_0_B = ov::Dimension(2, 4); ov::DimensionTracker::set_label(dim_0_A, 10); ov::DimensionTracker::set_label(dim_0_B, 20); - PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; - PartialShape expected_shape = {Dimension(2, 4), 3, 224, 224}; + ov::PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; + ov::PartialShape expected_shape = {ov::Dimension(2, 4), 3, 224, 224}; ov::TensorLabel expected_labels{20, 0, 0, 0}; - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -540,18 +651,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_b_and_fully_dyn_a_bro TYPED_TEST_P(ArithmeticOperator, labels_different_interval_a_and_fully_dyn_b_broadcast_numpy) { // Both params have dimension labels, output has label A - Dimension dim_0_A = Dimension(2, 4); - Dimension dim_0_B = Dimension(-1); + ov::Dimension dim_0_A = ov::Dimension(2, 4); + ov::Dimension dim_0_B = ov::Dimension(-1); ov::DimensionTracker::set_label(dim_0_A, 10); ov::DimensionTracker::set_label(dim_0_B, 20); - PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; - PartialShape expected_shape = {Dimension(2, 4), 3, 224, 224}; + ov::PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; + ov::PartialShape expected_shape = {ov::Dimension(2, 4), 3, 224, 224}; ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -562,17 +673,17 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_a_and_fully_dyn_b_bro TYPED_TEST_P(ArithmeticOperator, labels_equal_interval_dims_without_one_broadcast_numpy) { // Both params have dynamic interval dimension the same labels - PartialShape pshape_A{Dimension(2, 4), Dimension(8, 16), Dimension(8, 16), Dimension(8, 16)}; - PartialShape pshape_B{Dimension(2, 4), Dimension(4, 12), Dimension(10, 12), Dimension(16, 24)}; + ov::PartialShape pshape_A{ov::Dimension(2, 4), ov::Dimension(8, 16), ov::Dimension(8, 16), ov::Dimension(8, 16)}; + ov::PartialShape pshape_B{ov::Dimension(2, 4), ov::Dimension(4, 12), ov::Dimension(10, 12), ov::Dimension(16, 24)}; - PartialShape expected_shape = {Dimension(2, 4), Dimension(8, 12), Dimension(10, 12), 16}; + ov::PartialShape expected_shape = {ov::Dimension(2, 4), ov::Dimension(8, 12), ov::Dimension(10, 12), 16}; set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(pshape_B, {10, 11, 12, 13}); set_shape_labels(expected_shape, {10, 11, 12, 13}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -583,17 +694,17 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_interval_dims_without_one_broadcas TYPED_TEST_P(ArithmeticOperator, labels_different_interval_dims_without_one_broadcast_numpy) { // Both params have dynamic interval dimension different labels - PartialShape pshape_A{Dimension(2, 4), Dimension(8, 16), Dimension(8, 16), Dimension(8, 16)}; - PartialShape pshape_B{Dimension(2, 4), Dimension(4, 12), Dimension(10, 12), Dimension(16, 24)}; + ov::PartialShape pshape_A{ov::Dimension(2, 4), ov::Dimension(8, 16), ov::Dimension(8, 16), ov::Dimension(8, 16)}; + ov::PartialShape pshape_B{ov::Dimension(2, 4), ov::Dimension(4, 12), ov::Dimension(10, 12), ov::Dimension(16, 24)}; - PartialShape expected_shape = {Dimension(2, 4), Dimension(8, 12), Dimension(10, 12), 16}; + ov::PartialShape expected_shape = {ov::Dimension(2, 4), ov::Dimension(8, 12), ov::Dimension(10, 12), 16}; ov::TensorLabel expected_labels{20, 21, 22, 23}; set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(pshape_B, {20, 21, 22, 23}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -607,21 +718,21 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_batch_without_one_equ auto table_of_equivalence = std::make_shared(); ov::DimensionTracker dim_tracker(table_of_equivalence); - Dimension dim_0_A = Dimension(2, 4); - Dimension dim_0_B = Dimension(2, 4); + ov::Dimension dim_0_A = ov::Dimension(2, 4); + ov::Dimension dim_0_B = ov::Dimension(2, 4); dim_tracker.set_up_for_tracking(dim_0_A, 10); dim_tracker.set_up_for_tracking(dim_0_B, 20); - PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; + ov::PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); - PartialShape expected_shape = {Dimension(2, 4), 3, 224, 224}; + ov::PartialShape expected_shape = {ov::Dimension(2, 4), 3, 224, 224}; ov::TensorLabel expected_labels{20, 0, 0, 0}; auto eq_table = table_of_equivalence->get_equivalence_table(); @@ -634,18 +745,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_interval_batch_without_one_equ TYPED_TEST_P(ArithmeticOperator, labels_different_fully_dynamic_batch_broadcast_numpy) { // Both params have fully dynamic dimension and different labels - Dimension dim_0_A = Dimension(-1); - Dimension dim_0_B = Dimension(-1); + ov::Dimension dim_0_A = ov::Dimension(-1); + ov::Dimension dim_0_B = ov::Dimension(-1); ov::DimensionTracker::set_label(dim_0_A, 10); ov::DimensionTracker::set_label(dim_0_B, 20); - PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; - PartialShape expected_shape = {-1, 3, 224, 224}; + ov::PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; + ov::PartialShape expected_shape = {-1, 3, 224, 224}; ov::TensorLabel expected_labels{0, 0, 0, 0}; - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -656,18 +767,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_fully_dynamic_batch_broadcast_ TYPED_TEST_P(ArithmeticOperator, labels_equal_fully_dynamic_batch_broadcast_numpy) { // Both params have fully dynamic dimension and the same labels - Dimension dim_0_A = Dimension(-1); - Dimension dim_0_B = Dimension(-1); + ov::Dimension dim_0_A = ov::Dimension(-1); + ov::Dimension dim_0_B = ov::Dimension(-1); ov::DimensionTracker::set_label(dim_0_A, 10); ov::DimensionTracker::set_label(dim_0_B, 10); - PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; - PartialShape expected_shape = {-1, 3, 224, 224}; + ov::PartialShape pshape_A = {dim_0_A, 3, 224, 1}, pshape_B = {dim_0_B, 3, 1, 224}; + ov::PartialShape expected_shape = {-1, 3, 224, 224}; ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -677,15 +788,15 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_fully_dynamic_batch_broadcast_nump } TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_a_broadcast_numpy) { - Dimension b = -1; + ov::Dimension b = -1; ov::DimensionTracker::set_label(b, 10); - PartialShape A = {b, 3, 224, 224}, B = {1, 3, 1, 1}; - PartialShape expected_shape{b, 3, 224, 224}; + ov::PartialShape A = {b, 3, 224, 224}, B = {1, 3, 1, 1}; + ov::PartialShape expected_shape{b, 3, 224, 224}; ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(element::f64, A); - auto param_B = std::make_shared(element::f64, B); + auto param_A = std::make_shared(ov::element::f64, A); + auto param_B = std::make_shared(ov::element::f64, B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -695,15 +806,15 @@ TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_a_broadcast_numpy) { } TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_b_broadcast_numpy) { - Dimension b = -1; + ov::Dimension b = -1; ov::DimensionTracker::set_label(b, 10); - PartialShape B = {b, 3, 224, 224}, A = {1, 3, 1, 1}; - PartialShape expected_shape{b, 3, 224, 224}; + ov::PartialShape B = {b, 3, 224, 224}, A = {1, 3, 1, 1}; + ov::PartialShape expected_shape{b, 3, 224, 224}; ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(element::f64, A); - auto param_B = std::make_shared(element::f64, B); + auto param_A = std::make_shared(ov::element::f64, A); + auto param_B = std::make_shared(ov::element::f64, B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -713,17 +824,17 @@ TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_b_broadcast_numpy) { } TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_and_higher_rank_a_broadcast_numpy) { - Dimension b = -1; + ov::Dimension b = -1; ov::DimensionTracker::set_label(b, 10); - PartialShape pshape_A{b, -1, -1, -1}; - PartialShape pshape_B{3, 1, 1}; - PartialShape expected_shape{b, 3, -1, -1}; + ov::PartialShape pshape_A{b, -1, -1, -1}; + ov::PartialShape pshape_B{3, 1, 1}; + ov::PartialShape expected_shape{b, 3, -1, -1}; ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(element::f64, pshape_A); - auto param_B = std::make_shared(element::f64, pshape_B); + auto param_A = std::make_shared(ov::element::f64, pshape_A); + auto param_B = std::make_shared(ov::element::f64, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -733,17 +844,17 @@ TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_and_higher_rank_a_broadcast_nu } TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_and_higher_rank_b_broadcast_numpy) { - Dimension b = -1; + ov::Dimension b = -1; ov::DimensionTracker::set_label(b, 10); - PartialShape pshape_A{3, 1, 1}; - PartialShape pshape_B{b, -1, -1, -1}; - PartialShape expected_shape{b, 3, -1, -1}; + ov::PartialShape pshape_A{3, 1, 1}; + ov::PartialShape pshape_B{b, -1, -1, -1}; + ov::PartialShape expected_shape{b, 3, -1, -1}; ov::TensorLabel expected_labels{10, 0, 0, 0}; - auto param_A = std::make_shared(element::f64, pshape_A); - auto param_B = std::make_shared(element::f64, pshape_B); + auto param_A = std::make_shared(ov::element::f64, pshape_A); + auto param_B = std::make_shared(ov::element::f64, pshape_B); const auto op = std::make_shared(param_A, param_B); const auto out_shape = op->get_output_partial_shape(0); @@ -754,18 +865,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_dyn_batch_and_higher_rank_b_broadcast_nu TYPED_TEST_P(ArithmeticOperator, labels_different_static_shape_broadcast_numpy) { // Static shape, different labels - PartialShape pshape_A{Dimension(2), Dimension(1), Dimension(224), Dimension(1)}; - PartialShape pshape_B{Dimension(2), Dimension(1), Dimension(1), Dimension(128)}; - PartialShape expected_shape{2, 1, 224, 128}; + ov::PartialShape pshape_A{ov::Dimension(2), ov::Dimension(1), ov::Dimension(224), ov::Dimension(1)}; + ov::PartialShape pshape_B{ov::Dimension(2), ov::Dimension(1), ov::Dimension(1), ov::Dimension(128)}; + ov::PartialShape expected_shape{2, 1, 224, 128}; // Different labels set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {20, 21, 12, 23}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NUMPY); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NUMPY); const auto out_shape = op->get_output_partial_shape(0); @@ -775,18 +886,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_static_shape_broadcast_numpy) TYPED_TEST_P(ArithmeticOperator, labels_equal_static_shape_broadcast_numpy) { // Static shape, the same labels - PartialShape pshape_A{2, 1, 224, 1}; - PartialShape pshape_B{2, 1, 1, 128}; - PartialShape expected_shape{2, 1, 224, 128}; + ov::PartialShape pshape_A{2, 1, 224, 1}; + ov::PartialShape pshape_B{2, 1, 1, 128}; + ov::PartialShape expected_shape{2, 1, 224, 128}; // Equal labels set_shape_labels(pshape_A, {30, 31, 32, 33}); set_shape_labels(pshape_B, {30, 31, 32, 33}); set_shape_labels(expected_shape, {30, 31, 32, 33}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NUMPY); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NUMPY); const auto out_shape = op->get_output_partial_shape(0); @@ -796,18 +907,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_static_shape_broadcast_numpy) { TYPED_TEST_P(ArithmeticOperator, labels_different_static_shape_broadcast_none) { // Static shape - PartialShape pshape_A{2, 3, 224, 128}; - PartialShape pshape_B{2, 3, 224, 128}; - PartialShape expected_shape{2, 3, 224, 128}; + ov::PartialShape pshape_A{2, 3, 224, 128}; + ov::PartialShape pshape_B{2, 3, 224, 128}; + ov::PartialShape expected_shape{2, 3, 224, 128}; // Different labels set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {20, 21, 22, 23}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NONE); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NONE); auto out_shape = op->get_output_partial_shape(0); @@ -817,18 +928,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_static_shape_broadcast_none) { TYPED_TEST_P(ArithmeticOperator, labels_equal_static_shape_broadcast_none) { // Static shape - PartialShape pshape_A{2, 3, 224, 128}; - PartialShape pshape_B{2, 3, 224, 128}; - PartialShape expected_shape{2, 3, 224, 128}; + ov::PartialShape pshape_A{2, 3, 224, 128}; + ov::PartialShape pshape_B{2, 3, 224, 128}; + ov::PartialShape expected_shape{2, 3, 224, 128}; // Equal labels set_shape_labels(pshape_A, {30, 31, 32, 33}); set_shape_labels(pshape_B, {30, 31, 32, 33}); set_shape_labels(expected_shape, {30, 31, 32, 33}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NONE); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NONE); auto out_shape = op->get_output_partial_shape(0); @@ -838,18 +949,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_equal_static_shape_broadcast_none) { TYPED_TEST_P(ArithmeticOperator, labels_different_dynamic_shape_broadcast_none) { // Dynamic shape - PartialShape pshape_A{Dimension(-1), Dimension(3), Dimension(2, 224), Dimension(1, 128)}; - PartialShape pshape_B{Dimension(-1), Dimension(3), Dimension(2, 224), Dimension(1, 128)}; - PartialShape expected_shape{-1, 3, Dimension(2, 224), Dimension(1, 128)}; + ov::PartialShape pshape_A{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(2, 224), ov::Dimension(1, 128)}; + ov::PartialShape pshape_B{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(2, 224), ov::Dimension(1, 128)}; + ov::PartialShape expected_shape{-1, 3, ov::Dimension(2, 224), ov::Dimension(1, 128)}; // Different labels set_shape_labels(pshape_A, {10, 11, 12, 13}); set_shape_labels(pshape_B, {20, 21, 22, 23}); set_shape_labels(expected_shape, {20, 21, 22, 23}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NONE); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NONE); const auto out_shape = op->get_output_partial_shape(0); @@ -859,18 +970,18 @@ TYPED_TEST_P(ArithmeticOperator, labels_different_dynamic_shape_broadcast_none) TYPED_TEST_P(ArithmeticOperator, labels_equal_dynamic_shape_broadcast_none) { // Dynamic shape - PartialShape pshape_A{Dimension(-1), Dimension(3), Dimension(2, 224), Dimension(1, 128)}; - PartialShape pshape_B{Dimension(-1), Dimension(3), Dimension(2, 224), Dimension(1, 128)}; - PartialShape expected_shape{-1, 3, Dimension(2, 224), Dimension(1, 128)}; + ov::PartialShape pshape_A{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(2, 224), ov::Dimension(1, 128)}; + ov::PartialShape pshape_B{ov::Dimension(-1), ov::Dimension(3), ov::Dimension(2, 224), ov::Dimension(1, 128)}; + ov::PartialShape expected_shape{-1, 3, ov::Dimension(2, 224), ov::Dimension(1, 128)}; // Equal labels set_shape_labels(pshape_A, {30, 31, 32, 33}); set_shape_labels(pshape_B, {30, 31, 32, 33}); set_shape_labels(expected_shape, {30, 31, 32, 33}); - auto param_A = std::make_shared(element::f32, pshape_A); - auto param_B = std::make_shared(element::f32, pshape_B); - const auto op = std::make_shared(param_A, param_B, op::AutoBroadcastType::NONE); + auto param_A = std::make_shared(ov::element::f32, pshape_A); + auto param_B = std::make_shared(ov::element::f32, pshape_B); + const auto op = std::make_shared(param_A, param_B, ov::op::AutoBroadcastType::NONE); const auto out_shape = op->get_output_partial_shape(0); diff --git a/src/core/tests/type_prop/asin.cpp b/src/core/tests/type_prop/asin.cpp index 5c067020078..7ee29e86bcf 100644 --- a/src/core/tests/type_prop/asin.cpp +++ b/src/core/tests/type_prop/asin.cpp @@ -4,6 +4,6 @@ #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_asin, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/asinh.cpp b/src/core/tests/type_prop/asinh.cpp index a4ed726ad16..52bf0fb6e8e 100644 --- a/src/core/tests/type_prop/asinh.cpp +++ b/src/core/tests/type_prop/asinh.cpp @@ -4,6 +4,6 @@ #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_asinh, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/assign.cpp b/src/core/tests/type_prop/assign.cpp index 0f37e9dcc9a..b31820bad05 100644 --- a/src/core/tests/type_prop/assign.cpp +++ b/src/core/tests/type_prop/assign.cpp @@ -2,23 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/assign.hpp" + +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/op/util/variable.hpp" -#include "ngraph/opsets/opset5.hpp" -#include "ngraph/opsets/opset6.hpp" +#include "openvino/core/model.hpp" +#include "openvino/op/read_value.hpp" +#include "openvino/op/util/variable.hpp" using namespace std; -using namespace ngraph; TEST(type_prop, assign_variable_not_found) { - auto A = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto A = make_shared(ov::element::f32, ov::Shape{1, 2, 64, 64}); try { - auto space_to_depth = make_shared(A, "variable_id"); + auto space_to_depth = make_shared(A, "variable_id"); // Should have thrown, so fail if it didn't FAIL() << "Should not find variable with variable_id"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Can't find variable with id = variable_id")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -26,45 +27,51 @@ TEST(type_prop, assign_variable_not_found) { } TEST(type_prop, assign_deduce) { - auto input = make_shared(element::f32, Shape{1, 2, 64, 64}); - auto read_value = make_shared(input, "variable_id"); - auto assign = make_shared(read_value, "variable_id"); + auto input = make_shared(ov::element::f32, ov::Shape{1, 2, 64, 64}); + auto read_value = make_shared(input, "variable_id"); + auto assign = make_shared(read_value, "variable_id"); - ASSERT_EQ(assign->get_element_type(), element::f32); - ASSERT_EQ(assign->get_shape(), (Shape{1, 2, 64, 64})); + ASSERT_EQ(assign->get_element_type(), ov::element::f32); + ASSERT_EQ(assign->get_shape(), (ov::Shape{1, 2, 64, 64})); } TEST(type_prop, assign_read_value_new_shape) { - auto input = make_shared(element::f16, Shape{4, 3, 2, 1}); + auto input = make_shared(ov::element::f16, ov::Shape{4, 3, 2, 1}); - auto variable = std::make_shared(VariableInfo{PartialShape::dynamic(), element::dynamic, "ID"}); - auto read_value = make_shared(input, variable); - auto assign = make_shared(read_value, variable); + auto variable = std::make_shared( + ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, "ID"}); + auto read_value = make_shared(input, variable); + auto assign = make_shared(read_value, variable); - ASSERT_EQ(assign->get_element_type(), element::f16); - ASSERT_EQ(assign->get_shape(), (Shape{4, 3, 2, 1})); + ASSERT_EQ(assign->get_element_type(), ov::element::f16); + ASSERT_EQ(assign->get_shape(), (ov::Shape{4, 3, 2, 1})); - auto f = std::make_shared(ResultVector{}, SinkVector{assign}, ParameterVector{input}); + auto m = std::make_shared(ov::ResultVector{}, ov::SinkVector{assign}, ov::ParameterVector{input}); input->set_partial_shape({3, {4, 5}, 8}); - f->validate_nodes_and_infer_types(); + m->validate_nodes_and_infer_types(); - ASSERT_EQ(assign->get_element_type(), element::f16); - ASSERT_EQ(assign->get_output_partial_shape(0), (PartialShape{3, {4, 5}, 8})); - ASSERT_EQ(variable->get_info().data_type, element::f16); - ASSERT_EQ(variable->get_info().data_shape, (PartialShape{3, {4, 5}, 8})); + ASSERT_EQ(assign->get_element_type(), ov::element::f16); + ASSERT_EQ(assign->get_output_partial_shape(0), (ov::PartialShape{3, {4, 5}, 8})); + ASSERT_EQ(variable->get_info().data_type, ov::element::f16); + ASSERT_EQ(variable->get_info().data_shape, (ov::PartialShape{3, {4, 5}, 8})); } TEST(type_prop, variable_comparison) { - auto variable1 = std::make_shared(VariableInfo{PartialShape::dynamic(), element::dynamic, "ID"}); + auto variable1 = std::make_shared( + ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, "ID"}); - auto variable2 = std::make_shared(VariableInfo{PartialShape::dynamic(), element::dynamic, "ID"}); + auto variable2 = std::make_shared( + ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, "ID"}); - auto variable3 = std::make_shared(VariableInfo{PartialShape::dynamic(), element::dynamic, "ID1"}); + auto variable3 = std::make_shared( + ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, "ID1"}); - auto variable4 = std::make_shared(VariableInfo{PartialShape::dynamic(), element::f32, "ID"}); + auto variable4 = std::make_shared( + ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::f32, "ID"}); - auto variable5 = std::make_shared(VariableInfo{Shape{1}, element::dynamic, "ID"}); + auto variable5 = + std::make_shared(ov::op::util::VariableInfo{ov::Shape{1}, ov::element::dynamic, "ID"}); ASSERT_TRUE(variable1->get_info() == variable2->get_info()); ASSERT_FALSE(variable1->get_info() == variable3->get_info()); diff --git a/src/core/tests/type_prop/atan.cpp b/src/core/tests/type_prop/atan.cpp index a845cd5bac3..ecc8d1b8ee1 100644 --- a/src/core/tests/type_prop/atan.cpp +++ b/src/core/tests/type_prop/atan.cpp @@ -4,6 +4,6 @@ #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_atan, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/atanh.cpp b/src/core/tests/type_prop/atanh.cpp index ed30e6d2094..1242d761bfb 100644 --- a/src/core/tests/type_prop/atanh.cpp +++ b/src/core/tests/type_prop/atanh.cpp @@ -4,6 +4,6 @@ #include "unary_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_atanh, UnaryOperator, Type); diff --git a/src/core/tests/type_prop/augru_cell.cpp b/src/core/tests/type_prop/augru_cell.cpp index 2cbbc5a339e..286e254ee8a 100644 --- a/src/core/tests/type_prop/augru_cell.cpp +++ b/src/core/tests/type_prop/augru_cell.cpp @@ -4,8 +4,9 @@ #include "ov_ops/augru_cell.hpp" +#include + #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/core/attribute_visitor.hpp" #include "openvino/opsets/opset9.hpp" @@ -145,42 +146,42 @@ TEST(type_prop, augru_cell_invalid_input_rank) { // Invalid rank for W tensor. auto W = make_shared(element::f32, PartialShape{}); ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, B, A, hidden_size), - ngraph::NodeValidationFailure) + ov::NodeValidationFailure) << "AUGRUCell node was created with invalid data."; // Invalid rank for X tensor. W = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); X = make_shared(element::f32, PartialShape{}); ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, B, A, hidden_size), - ngraph::NodeValidationFailure) + ov::NodeValidationFailure) << "AUGRUCell node was created with invalid data."; // Invalid rank for H_t tensor. X = make_shared(element::f32, PartialShape{batch_size, input_size}); H_t = make_shared(element::f32, PartialShape{}); ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, B, A, hidden_size), - ngraph::NodeValidationFailure) + ov::NodeValidationFailure) << "AUGRUCell node was created with invalid data."; // Invalid rank for R tensor. H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); R = make_shared(element::f32, PartialShape{}); ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, B, A, hidden_size), - ngraph::NodeValidationFailure) + ov::NodeValidationFailure) << "AUGRUCell node was created with invalid data."; // Invalid rank for B tensor. R = make_shared(element::f32, PartialShape{gates_count * hidden_size, input_size}); B = make_shared(element::f32, PartialShape{}); ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, B, A, hidden_size), - ngraph::NodeValidationFailure) + ov::NodeValidationFailure) << "AUGRUCell node was created with invalid data."; // Invalid rank for A tensor. B = make_shared(element::f32, PartialShape{gates_count * hidden_size}); A = make_shared(element::f32, PartialShape{}); ASSERT_THROW(const auto unused = make_shared(X, H_t, W, R, B, A, hidden_size), - ngraph::NodeValidationFailure) + ov::NodeValidationFailure) << "AUGRUCell node was created with invalid data."; } diff --git a/src/core/tests/type_prop/augru_sequence.cpp b/src/core/tests/type_prop/augru_sequence.cpp index c98cdda7a2f..fe3ef4ba3ed 100644 --- a/src/core/tests/type_prop/augru_sequence.cpp +++ b/src/core/tests/type_prop/augru_sequence.cpp @@ -4,10 +4,12 @@ #include "ov_ops/augru_sequence.hpp" +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" #include "openvino/core/attribute_visitor.hpp" +#include "openvino/core/except.hpp" #include "openvino/opsets/opset9.hpp" using namespace std; @@ -172,7 +174,7 @@ TEST(type_prop, augru_sequence_invalid_input_dimension) { for (size_t i = 0; i < augru_sequence->get_input_size(); i++) { augru_sequence = augru_seq_init(params); augru_sequence->set_argument(i, invalid_rank_tensor); - ASSERT_THROW(augru_sequence->validate_and_infer_types(), ngraph::CheckFailure) + ASSERT_THROW(augru_sequence->validate_and_infer_types(), ov::AssertFailure) << "AUGRUSequence node was created with invalid data."; } } diff --git a/src/core/tests/type_prop/avg_pool.cpp b/src/core/tests/type_prop/avg_pool.cpp index 16a69fa9276..96e8e2c61b5 100644 --- a/src/core/tests/type_prop/avg_pool.cpp +++ b/src/core/tests/type_prop/avg_pool.cpp @@ -2,636 +2,708 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/avg_pool.hpp" + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" #include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; using namespace testing; TEST(type_prop, avg_pool_default_ctor) { - PartialShape arg_shape{1, 3, 32}; + ov::PartialShape arg_shape{1, 3, 32}; set_shape_labels(arg_shape, 10); - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(ov::element::f32, arg_shape); - auto mp = make_shared(); + auto mp = make_shared(); mp->set_argument(0, arg); mp->set_pads_begin({2}); mp->set_pads_end({2}); mp->set_kernel({2}); mp->set_strides({1}); - mp->set_rounding_type(op::RoundingType::CEIL); - mp->set_auto_pad(op::PadType::SAME_LOWER); + mp->set_rounding_type(ov::op::RoundingType::CEIL); + mp->set_auto_pad(ov::op::PadType::SAME_LOWER); mp->validate_and_infer_types(); EXPECT_TRUE(mp->get_exclude_pad()); EXPECT_EQ(mp->get_input_size(), 1); EXPECT_EQ(mp->get_output_size(), 1); - EXPECT_EQ(mp->get_output_element_type(0), element::f32); - EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32})); + EXPECT_EQ(mp->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(mp->get_output_partial_shape(0), ov::PartialShape({1, 3, 32})); EXPECT_THAT(get_shape_labels(mp->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label)); - EXPECT_EQ(mp->get_pads_begin(), (Shape{1})); - EXPECT_EQ(mp->get_pads_end(), (Shape{0})); + EXPECT_EQ(mp->get_pads_begin(), (ov::Shape{1})); + EXPECT_EQ(mp->get_pads_end(), (ov::Shape{0})); } TEST(type_prop, avg_pool_auto_padding) { - const PartialShape arg_shape{1, 3, 32}; - const Strides strides{1}; - const Shape pads_begin{0}; - const Shape pads_end{0}; - const Shape kernel_shape{2}; + const ov::PartialShape arg_shape{1, 3, 32}; + const ov::Strides strides{1}; + const ov::Shape pads_begin{0}; + const ov::Shape pads_end{0}; + const ov::Shape kernel_shape{2}; const bool exclude_pad = false; - const auto rounding_mode = op::RoundingType::FLOOR; - const auto auto_pad = op::PadType::SAME_LOWER; + const auto rounding_mode = ov::op::RoundingType::FLOOR; + const auto auto_pad = ov::op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); - auto mp = make_shared(arg, - strides, - pads_begin, - pads_end, - kernel_shape, - exclude_pad, - rounding_mode, - auto_pad); + auto arg = make_shared(ov::element::f32, arg_shape); + auto mp = make_shared(arg, + strides, + pads_begin, + pads_end, + kernel_shape, + exclude_pad, + rounding_mode, + auto_pad); - EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32})); - EXPECT_EQ(mp->get_pads_begin(), (Shape{1})); - EXPECT_EQ(mp->get_pads_end(), (Shape{0})); + EXPECT_EQ(mp->get_output_partial_shape(0), ov::PartialShape({1, 3, 32})); + EXPECT_EQ(mp->get_pads_begin(), (ov::Shape{1})); + EXPECT_EQ(mp->get_pads_end(), (ov::Shape{0})); } TEST(type_prop, avg_pool_explicit_padding_round_ceil_dynamic_dimensions) { - const PartialShape arg_shape{-1, -1, -1}; - const Strides strides{4}; - const Shape pads_begin{2}; - const Shape pads_end{2}; - const Shape kernel_shape{4}; + const ov::PartialShape arg_shape{-1, -1, -1}; + const ov::Strides strides{4}; + const ov::Shape pads_begin{2}; + const ov::Shape pads_end{2}; + const ov::Shape kernel_shape{4}; const bool exclude_pad = true; - const auto rounding_mode = op::RoundingType::CEIL; - const auto auto_pad = op::PadType::EXPLICIT; + const auto rounding_mode = ov::op::RoundingType::CEIL; + const auto auto_pad = ov::op::PadType::EXPLICIT; - auto arg = make_shared(element::f32, arg_shape); - auto mp = make_shared(arg, - strides, - pads_begin, - pads_end, - kernel_shape, - exclude_pad, - rounding_mode, - auto_pad); + auto arg = make_shared(ov::element::f32, arg_shape); + auto mp = make_shared(arg, + strides, + pads_begin, + pads_end, + kernel_shape, + exclude_pad, + rounding_mode, + auto_pad); - EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({-1, -1, {1, -1}})); - EXPECT_EQ(mp->get_pads_begin(), (Shape{2})); - EXPECT_EQ(mp->get_pads_end(), (Shape{2})); + EXPECT_EQ(mp->get_output_partial_shape(0), ov::PartialShape({-1, -1, {1, -1}})); + EXPECT_EQ(mp->get_pads_begin(), (ov::Shape{2})); + EXPECT_EQ(mp->get_pads_end(), (ov::Shape{2})); } TEST(type_prop, avg_pool_auto_padding_4D_nc_dims_dynamic_same_lower) { - const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32}; - const Strides strides{1, 1}; - const Shape pads_begin{0, 0}; - const Shape pads_end{0, 0}; - const Shape kernel_shape{2, 2}; + const ov::PartialShape arg_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 32, 32}; + const ov::Strides strides{1, 1}; + const ov::Shape pads_begin{0, 0}; + const ov::Shape pads_end{0, 0}; + const ov::Shape kernel_shape{2, 2}; const bool exclude_pad = true; - const auto rounding_mode = op::RoundingType::FLOOR; - const auto auto_pad = op::PadType::SAME_LOWER; + const auto rounding_mode = ov::op::RoundingType::FLOOR; + const auto auto_pad = ov::op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); - auto mp = make_shared(arg, - strides, - pads_begin, - pads_end, - kernel_shape, - exclude_pad, - rounding_mode, - auto_pad); + auto arg = make_shared(ov::element::f32, arg_shape); + auto mp = make_shared(arg, + strides, + pads_begin, + pads_end, + kernel_shape, + exclude_pad, + rounding_mode, + auto_pad); - EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), Dimension::dynamic(), 32, 32})); - EXPECT_EQ(mp->get_pads_begin(), (Shape{1, 1})); - EXPECT_EQ(mp->get_pads_end(), (Shape{0, 0})); + EXPECT_EQ(mp->get_output_partial_shape(0), + ov::PartialShape({ov::Dimension::dynamic(), ov::Dimension::dynamic(), 32, 32})); + EXPECT_EQ(mp->get_pads_begin(), (ov::Shape{1, 1})); + EXPECT_EQ(mp->get_pads_end(), (ov::Shape{0, 0})); } TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_upper) { - const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32}; - const Strides strides{1, 1}; - const Shape pads_begin{0, 0}; - const Shape pads_end{0, 0}; - const Shape kernel_shape{2, 2}; + const ov::PartialShape arg_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 32, 32}; + const ov::Strides strides{1, 1}; + const ov::Shape pads_begin{0, 0}; + const ov::Shape pads_end{0, 0}; + const ov::Shape kernel_shape{2, 2}; const bool exclude_pad = false; - const auto rounding_mode = op::RoundingType::FLOOR; - const auto auto_pad = op::PadType::SAME_UPPER; + const auto rounding_mode = ov::op::RoundingType::FLOOR; + const auto auto_pad = ov::op::PadType::SAME_UPPER; - auto arg = make_shared(element::f32, arg_shape); - auto mp = make_shared(arg, - strides, - pads_begin, - pads_end, - kernel_shape, - exclude_pad, - rounding_mode, - auto_pad); + auto arg = make_shared(ov::element::f32, arg_shape); + auto mp = make_shared(arg, + strides, + pads_begin, + pads_end, + kernel_shape, + exclude_pad, + rounding_mode, + auto_pad); - EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), Dimension::dynamic(), 32, 32})); - EXPECT_EQ(mp->get_pads_begin(), (Shape{0, 0})); - EXPECT_EQ(mp->get_pads_end(), (Shape{1, 1})); + EXPECT_EQ(mp->get_output_partial_shape(0), + ov::PartialShape({ov::Dimension::dynamic(), ov::Dimension::dynamic(), 32, 32})); + EXPECT_EQ(mp->get_pads_begin(), (ov::Shape{0, 0})); + EXPECT_EQ(mp->get_pads_end(), (ov::Shape{1, 1})); } TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic) { - const PartialShape arg_shape{1, 3, 32, Dimension::dynamic()}; - const Strides strides{1, 1}; - const Shape pads_begin{1, 1}; - const Shape pads_end{0, 0}; - const Shape kernel_shape{2, 2}; + const ov::PartialShape arg_shape{1, 3, 32, ov::Dimension::dynamic()}; + const ov::Strides strides{1, 1}; + const ov::Shape pads_begin{1, 1}; + const ov::Shape pads_end{0, 0}; + const ov::Shape kernel_shape{2, 2}; const bool exclude_pad = true; - const auto rounding_mode = op::RoundingType::FLOOR; - const auto auto_pad = op::PadType::SAME_LOWER; + const auto rounding_mode = ov::op::RoundingType::FLOOR; + const auto auto_pad = ov::op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); - auto mp = make_shared(arg, - strides, - pads_begin, - pads_end, - kernel_shape, - exclude_pad, - rounding_mode, - auto_pad); + auto arg = make_shared(ov::element::f32, arg_shape); + auto mp = make_shared(arg, + strides, + pads_begin, + pads_end, + kernel_shape, + exclude_pad, + rounding_mode, + auto_pad); - EXPECT_EQ(mp->get_output_partial_shape(0), PartialShape({1, 3, 32, Dimension::dynamic()})); - EXPECT_EQ(mp->get_pads_begin(), (Shape{1, 0})); - EXPECT_EQ(mp->get_pads_end(), (Shape{0, 0})); + EXPECT_EQ(mp->get_output_partial_shape(0), ov::PartialShape({1, 3, 32, ov::Dimension::dynamic()})); + EXPECT_EQ(mp->get_pads_begin(), (ov::Shape{1, 0})); + EXPECT_EQ(mp->get_pads_end(), (ov::Shape{0, 0})); } TEST(type_prop, avg_pool_1d_deduce) { - const auto param = make_shared(element::f32, Shape{64, 3}); - const Shape kernel{10}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{64, 3}); + const ov::Shape kernel{10}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_1d_deduce_strided) { - const auto param = make_shared(element::f32, Shape{64, 3}); - const Shape kernel{10}; - const auto move_strides = Strides{2}; - EXPECT_THROW( - const auto unused = - make_shared(param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{64, 3}); + const ov::Shape kernel{10}; + const auto move_strides = ov::Strides{2}; + EXPECT_THROW(const auto unused = make_shared(param, + move_strides, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_1d_deduce_strided_small_uneven) { - const auto param = make_shared(element::f32, Shape{64, 3}); - const Shape kernel{2}; - const auto move_strides = Strides{2}; - EXPECT_THROW( - const auto unused = - make_shared(param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{64, 3}); + const ov::Shape kernel{2}; + const auto move_strides = ov::Strides{2}; + EXPECT_THROW(const auto unused = make_shared(param, + move_strides, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_1d_deduce_strided_small_even) { - const auto param = make_shared(element::f32, Shape{64, 3}); - const Shape kernel{2}; - const auto move_strides = Strides{2}; - EXPECT_THROW( - const auto unused = - make_shared(param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{64, 3}); + const ov::Shape kernel{2}; + const auto move_strides = ov::Strides{2}; + EXPECT_THROW(const auto unused = make_shared(param, + move_strides, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_2d_deduce) { - const auto param = make_shared(element::f32, Shape{64, 3, 100, 150}); - const Shape kernel{10, 20}; - const auto avg_pool = make_shared(param, - Strides{1, 1}, - Shape{0, 0}, - Shape{0, 0}, - kernel, - true, - op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, ov::Shape{64, 3, 100, 150}); + const ov::Shape kernel{10, 20}; + const auto avg_pool = make_shared(param, + ov::Strides{1, 1}, + ov::Shape{0, 0}, + ov::Shape{0, 0}, + kernel, + true, + ov::op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); - EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 91, 131})); + EXPECT_EQ(avg_pool->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(avg_pool->get_output_shape(0), (ov::Shape{64, 3, 91, 131})); - EXPECT_EQ(avg_pool->get_strides(), (Strides{1, 1})); - EXPECT_EQ(avg_pool->get_kernel(), (Shape{10, 20})); - EXPECT_EQ(avg_pool->get_pads_begin(), (Shape{0, 0})); - EXPECT_EQ(avg_pool->get_pads_end(), (Shape{0, 0})); + EXPECT_EQ(avg_pool->get_strides(), (ov::Strides{1, 1})); + EXPECT_EQ(avg_pool->get_kernel(), (ov::Shape{10, 20})); + EXPECT_EQ(avg_pool->get_pads_begin(), (ov::Shape{0, 0})); + EXPECT_EQ(avg_pool->get_pads_end(), (ov::Shape{0, 0})); } TEST(type_prop, avg_pool_2d_deduce_strided) { - const auto param = make_shared(element::f32, Shape{64, 3, 100, 150}); - const Shape kernel{10, 20}; - const auto move_strides = Strides{2, 3}; - const auto avg_pool = make_shared(param, - move_strides, - Shape{0, 0}, - Shape{0, 0}, - kernel, - true, - op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, ov::Shape{64, 3, 100, 150}); + const ov::Shape kernel{10, 20}; + const auto move_strides = ov::Strides{2, 3}; + const auto avg_pool = make_shared(param, + move_strides, + ov::Shape{0, 0}, + ov::Shape{0, 0}, + kernel, + true, + ov::op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); - EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 46, 44})); + EXPECT_EQ(avg_pool->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(avg_pool->get_output_shape(0), (ov::Shape{64, 3, 46, 44})); - EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3})); - EXPECT_EQ(avg_pool->get_kernel(), (Shape{10, 20})); - EXPECT_EQ(avg_pool->get_pads_begin(), (Shape{0, 0})); - EXPECT_EQ(avg_pool->get_pads_end(), (Shape{0, 0})); + EXPECT_EQ(avg_pool->get_strides(), (ov::Strides{2, 3})); + EXPECT_EQ(avg_pool->get_kernel(), (ov::Shape{10, 20})); + EXPECT_EQ(avg_pool->get_pads_begin(), (ov::Shape{0, 0})); + EXPECT_EQ(avg_pool->get_pads_end(), (ov::Shape{0, 0})); } TEST(type_prop, avg_pool_3d_deduce_strided_small) { - const auto param = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); - const Shape kernel{2, 3, 2}; - const auto move_strides = Strides{2, 3, 4}; - const auto avg_pool = make_shared(param, - move_strides, - Shape{0, 0, 0}, - Shape{0, 0, 0}, - kernel, - true, - op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, ov::Shape{64, 3, 7, 8, 10}); + const ov::Shape kernel{2, 3, 2}; + const auto move_strides = ov::Strides{2, 3, 4}; + const auto avg_pool = make_shared(param, + move_strides, + ov::Shape{0, 0, 0}, + ov::Shape{0, 0, 0}, + kernel, + true, + ov::op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); - EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 3, 2, 3})); + EXPECT_EQ(avg_pool->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(avg_pool->get_output_shape(0), (ov::Shape{64, 3, 3, 2, 3})); - EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3, 4})); - EXPECT_EQ(avg_pool->get_kernel(), (Shape{2, 3, 2})); - EXPECT_EQ(avg_pool->get_pads_begin(), (Shape{0, 0, 0})); - EXPECT_EQ(avg_pool->get_pads_end(), (Shape{0, 0, 0})); + EXPECT_EQ(avg_pool->get_strides(), (ov::Strides{2, 3, 4})); + EXPECT_EQ(avg_pool->get_kernel(), (ov::Shape{2, 3, 2})); + EXPECT_EQ(avg_pool->get_pads_begin(), (ov::Shape{0, 0, 0})); + EXPECT_EQ(avg_pool->get_pads_end(), (ov::Shape{0, 0, 0})); } TEST(type_prop, avg_pool_3d_deduce_strided_padded_small) { - const auto param = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); - const Shape kernel{2, 3, 2}; - const auto move_strides = Strides{2, 3, 4}; - const Shape pads_begin{5, 6, 4}; - const Shape pads_end{6, 4, 5}; - const auto avg_pool = - make_shared(param, move_strides, pads_begin, pads_end, kernel, false, op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, ov::Shape{64, 3, 7, 8, 10}); + const ov::Shape kernel{2, 3, 2}; + const auto move_strides = ov::Strides{2, 3, 4}; + const ov::Shape pads_begin{5, 6, 4}; + const ov::Shape pads_end{6, 4, 5}; + const auto avg_pool = make_shared(param, + move_strides, + pads_begin, + pads_end, + kernel, + false, + ov::op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); - EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 9, 6, 5})); + EXPECT_EQ(avg_pool->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(avg_pool->get_output_shape(0), (ov::Shape{64, 3, 9, 6, 5})); - EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3, 4})); - EXPECT_EQ(avg_pool->get_kernel(), (Shape{2, 3, 2})); - EXPECT_EQ(avg_pool->get_pads_begin(), (Shape{5, 6, 4})); - EXPECT_EQ(avg_pool->get_pads_end(), (Shape{6, 4, 5})); + EXPECT_EQ(avg_pool->get_strides(), (ov::Strides{2, 3, 4})); + EXPECT_EQ(avg_pool->get_kernel(), (ov::Shape{2, 3, 2})); + EXPECT_EQ(avg_pool->get_pads_begin(), (ov::Shape{5, 6, 4})); + EXPECT_EQ(avg_pool->get_pads_end(), (ov::Shape{6, 4, 5})); } TEST(type_prop, avg_pool_invalid_0d_input) { - const auto param = make_shared(element::f32, Shape{}); - const Shape kernel{}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{}); + const ov::Shape kernel{}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_1d_input) { - const auto param = make_shared(element::f32, Shape{2}); - const Shape kernel{}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{2}); + const ov::Shape kernel{}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_2d_input) { - const auto param = make_shared(element::f32, Shape{2, 6}); - const Shape kernel{}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{2, 6}); + const ov::Shape kernel{}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_0_batch_size) { - const auto param = make_shared(element::f32, Shape{0, 6}); - const Shape kernel{1}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{0, 6}); + const ov::Shape kernel{1}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_0_channels) { - const auto param = make_shared(element::f32, Shape{6, 0}); - const Shape kernel{1}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 0}); + const ov::Shape kernel{1}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_many) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); - const Shape kernel{3, 3, 3}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 10, 10}); + const ov::Shape kernel{3, 3, 3}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_few) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); - const Shape kernel{3}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 10, 10}); + const ov::Shape kernel{3}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_movement_stride_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); - const Shape kernel{3, 3}; - const auto move_strides = Strides{2, 3, 8}; - EXPECT_THROW( - const auto unused = - make_shared(param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 10, 10}); + const ov::Shape kernel{3, 3}; + const auto move_strides = ov::Strides{2, 3, 8}; + EXPECT_THROW(const auto unused = make_shared(param, + move_strides, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_padding_below_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); - const Shape kernel{3, 3}; - const auto move_strides = Strides{2, 3}; - const Shape pads_begin{1, 2, 3}; - const Shape pads_end{1, 2}; - EXPECT_THROW(const auto unused = make_shared(param, - move_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 10, 10}); + const ov::Shape kernel{3, 3}; + const auto move_strides = ov::Strides{2, 3}; + const ov::Shape pads_begin{1, 2, 3}; + const ov::Shape pads_end{1, 2}; + EXPECT_THROW(const auto unused = make_shared(param, + move_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_padding_above_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); - const Shape kernel{3, 3}; - const auto move_strides = Strides{2, 3}; - const Shape pads_begin{1, 2}; - const Shape pads_end{1, 2, 3}; - EXPECT_THROW(const auto unused = make_shared(param, - move_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 10, 10}); + const ov::Shape kernel{3, 3}; + const auto move_strides = ov::Strides{2, 3}; + const ov::Shape pads_begin{1, 2}; + const ov::Shape pads_end{1, 2, 3}; + EXPECT_THROW(const auto unused = make_shared(param, + move_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_input_item_size_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 0, 10}); - const Shape kernel{3, 3}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 0, 10}); + const ov::Shape kernel{3, 3}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_window_size_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); - const Shape kernel{3, 0}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 10, 10}); + const ov::Shape kernel{3, 0}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_invalid_dilated_too_large) { - const auto param = make_shared(element::f32, Shape{6, 2, 8, 8}); - const Shape kernel{9, 9}; - EXPECT_THROW( - const auto unused = - make_shared(param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 8, 8}); + const ov::Shape kernel{9, 9}; + EXPECT_THROW(const auto unused = make_shared(param, + ov::Strides{1}, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_larger_than_pre_padding_but_fits_in_post_padding) { - const auto param = make_shared(element::f32, Shape{6, 2, 8, 8}); - const Shape kernel{9, 9}; - const Strides window_strides{1, 1}; - const Shape pads_begin{0, 0}; - const Shape pads_end{1, 1}; - const auto avg_pool = make_shared(param, - window_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 8, 8}); + const ov::Shape kernel{9, 9}; + const ov::Strides window_strides{1, 1}; + const ov::Shape pads_begin{0, 0}; + const ov::Shape pads_end{1, 1}; + const auto avg_pool = make_shared(param, + window_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR); - ASSERT_EQ(avg_pool->get_output_element_type(0), element::f32); - ASSERT_EQ(avg_pool->get_output_shape(0), (Shape{6, 2, 1, 1})); + ASSERT_EQ(avg_pool->get_output_element_type(0), ov::element::f32); + ASSERT_EQ(avg_pool->get_output_shape(0), (ov::Shape{6, 2, 1, 1})); } TEST(type_prop, avg_pool_invalid_movement_stride_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); - const Shape kernel{3, 3}; - const auto move_strides = Strides{0, 1}; - EXPECT_THROW( - const auto unused = - make_shared(param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), - NodeValidationFailure); + const auto param = make_shared(ov::element::f32, ov::Shape{6, 2, 10, 10}); + const ov::Shape kernel{3, 3}; + const auto move_strides = ov::Strides{0, 1}; + EXPECT_THROW(const auto unused = make_shared(param, + move_strides, + ov::Shape{}, + ov::Shape{}, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_partial_rank_dynamic_ok) { - const PartialShape arg_shape{PartialShape::dynamic()}; - const Shape kernel{2, 3, 4, 5}; - const Strides window_movement_strides{1, 1, 1, 1}; - const Shape pads_begin{0, 0, 0, 0}; - const Shape pads_end{0, 0, 0, 0}; + const ov::PartialShape arg_shape{ov::PartialShape::dynamic()}; + const ov::Shape kernel{2, 3, 4, 5}; + const ov::Strides window_movement_strides{1, 1, 1, 1}; + const ov::Shape pads_begin{0, 0, 0, 0}; + const ov::Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); - auto ap = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - false, - op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, arg_shape); + auto ap = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + false, + ov::op::RoundingType::FLOOR); - EXPECT_EQ(ap->get_output_element_type(0), element::f32); - EXPECT_EQ(ap->get_output_partial_shape(0), PartialShape(PartialShape::dynamic(6))); + EXPECT_EQ(ap->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(ap->get_output_partial_shape(0), ov::PartialShape(ov::PartialShape::dynamic(6))); } TEST(type_prop, avg_pool_partial_rank_dynamic_attrib_rank_mismatch) { - const PartialShape arg_shape{PartialShape::dynamic()}; - const Shape kernel{2, 3, 4, 5}; - const Strides window_movement_strides{1, 1, 1, 1, 1}; - const Shape pads_begin{0, 0, 0, 0}; - const Shape pads_end{0, 0, 0, 0}; + const ov::PartialShape arg_shape{ov::PartialShape::dynamic()}; + const ov::Shape kernel{2, 3, 4, 5}; + const ov::Strides window_movement_strides{1, 1, 1, 1, 1}; + const ov::Shape pads_begin{0, 0, 0, 0}; + const ov::Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(ov::element::f32, arg_shape); - EXPECT_THROW(const auto unused = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - false, - op::RoundingType::FLOOR), - NodeValidationFailure); + EXPECT_THROW(const auto unused = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + false, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_partial_rank_static_dynamic_ok) { - const PartialShape arg_shape{PartialShape::dynamic(5)}; - const Shape kernel{2, 3, 4}; - const Strides window_movement_strides{1, 1, 1}; - const Shape pads_begin{0, 0, 0}; - const Shape pads_end{0, 0, 0}; + const ov::PartialShape arg_shape{ov::PartialShape::dynamic(5)}; + const ov::Shape kernel{2, 3, 4}; + const ov::Strides window_movement_strides{1, 1, 1}; + const ov::Shape pads_begin{0, 0, 0}; + const ov::Shape pads_end{0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); - auto ap = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - false, - op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, arg_shape); + auto ap = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + false, + ov::op::RoundingType::FLOOR); - EXPECT_EQ(ap->get_output_element_type(0), element::f32); - EXPECT_EQ(ap->get_output_partial_shape(0), PartialShape({-1, -1, {1, -1}, {1, -1}, {1, -1}})); + EXPECT_EQ(ap->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(ap->get_output_partial_shape(0), ov::PartialShape({-1, -1, {1, -1}, {1, -1}, {1, -1}})); } TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok) { - const PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4}; - const Shape kernel{2, 3, 4}; - const Strides window_movement_strides{1, 1, 1}; - const Shape pads_begin{0, 0, 0}; - const Shape pads_end{0, 0, 0}; + const ov::PartialShape arg_shape{5, ov::Dimension::dynamic(), 8, ov::Dimension::dynamic(), 4}; + const ov::Shape kernel{2, 3, 4}; + const ov::Strides window_movement_strides{1, 1, 1}; + const ov::Shape pads_begin{0, 0, 0}; + const ov::Shape pads_end{0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); - auto ap = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - false, - op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, arg_shape); + auto ap = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + false, + ov::op::RoundingType::FLOOR); - EXPECT_EQ(ap->get_output_element_type(0), element::f32); - EXPECT_EQ(ap->get_output_partial_shape(0), PartialShape(PartialShape{5, -1, 7, {1, -1}, 1})); + EXPECT_EQ(ap->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(ap->get_output_partial_shape(0), ov::PartialShape(ov::PartialShape{5, -1, 7, {1, -1}, 1})); } TEST(type_prop, avg_pool_partial_rank_static_dynamic_attrib_rank_mismatch) { - const PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4}; - const Shape kernel{2, 3, 4, 5}; - const Strides window_movement_strides{1, 1, 1}; - const Shape pads_begin{0, 0, 0}; - const Shape pads_end{0, 0, 0}; + const ov::PartialShape arg_shape{5, ov::Dimension::dynamic(), 8, ov::Dimension::dynamic(), 4}; + const ov::Shape kernel{2, 3, 4, 5}; + const ov::Strides window_movement_strides{1, 1, 1}; + const ov::Shape pads_begin{0, 0, 0}; + const ov::Shape pads_end{0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(ov::element::f32, arg_shape); - EXPECT_THROW(const auto unused = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR), - NodeValidationFailure); + EXPECT_THROW(const auto unused = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_not_too_big) { - const PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4}; - const Shape kernel{9, 3, 4}; - const Strides window_movement_strides{1, 1, 1}; - const Shape pads_begin{0, 0, 0}; - const Shape pads_end{0, 0, 0}; + const ov::PartialShape arg_shape{5, ov::Dimension::dynamic(), 8, ov::Dimension::dynamic(), 4}; + const ov::Shape kernel{9, 3, 4}; + const ov::Strides window_movement_strides{1, 1, 1}; + const ov::Shape pads_begin{0, 0, 0}; + const ov::Shape pads_end{0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(ov::element::f32, arg_shape); - EXPECT_THROW(const auto unused = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR), - NodeValidationFailure); + EXPECT_THROW(const auto unused = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_partial_rank_static_dynamic_padded_window_not_too_big) { - const PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4}; - const Shape kernel{9, 3, 4}; - const Strides window_movement_strides{1, 1, 1}; - const Shape pads_begin{0, 0, 0}; - const Shape pads_end{1, 0, 0}; + const ov::PartialShape arg_shape{5, ov::Dimension::dynamic(), 8, ov::Dimension::dynamic(), 4}; + const ov::Shape kernel{9, 3, 4}; + const ov::Strides window_movement_strides{1, 1, 1}; + const ov::Shape pads_begin{0, 0, 0}; + const ov::Shape pads_end{1, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); - auto ap = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR); + const auto param = make_shared(ov::element::f32, arg_shape); + auto ap = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR); - EXPECT_EQ(ap->get_output_element_type(0), element::f32); - EXPECT_EQ(ap->get_output_partial_shape(0), PartialShape(PartialShape{5, Dimension::dynamic(), 1, {1, -1}, 1})); + EXPECT_EQ(ap->get_output_element_type(0), ov::element::f32); + EXPECT_EQ(ap->get_output_partial_shape(0), + ov::PartialShape(ov::PartialShape{5, ov::Dimension::dynamic(), 1, {1, -1}, 1})); } TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_in_padding) { - const PartialShape arg_shape{5, Dimension::dynamic(), 8, Dimension::dynamic(), 4}; - const Shape kernel{9, 3, 4}; - const Strides window_movement_strides{1, 1, 1}; - const Shape pads_begin{0, 0, 0}; - const Shape pads_end{0, 0, 0}; + const ov::PartialShape arg_shape{5, ov::Dimension::dynamic(), 8, ov::Dimension::dynamic(), 4}; + const ov::Shape kernel{9, 3, 4}; + const ov::Strides window_movement_strides{1, 1, 1}; + const ov::Shape pads_begin{0, 0, 0}; + const ov::Shape pads_end{0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(ov::element::f32, arg_shape); - EXPECT_THROW(const auto unused = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR), - NodeValidationFailure); + EXPECT_THROW(const auto unused = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure); } TEST(type_prop, avg_pool_kernel_dilation_not_compatible_with_padding_begin) { - const PartialShape arg_shape{5, -1, 8}; - const Shape kernel{9}; - const Strides window_movement_strides{1}; - const Shape pads_begin{10}; - const Shape pads_end{0}; + const ov::PartialShape arg_shape{5, -1, 8}; + const ov::Shape kernel{9}; + const ov::Strides window_movement_strides{1}; + const ov::Shape pads_begin{10}; + const ov::Shape pads_end{0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(ov::element::f32, arg_shape); - OV_EXPECT_THROW(const auto unused = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR), - NodeValidationFailure, + OV_EXPECT_THROW(const auto unused = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure, HasSubstr("Kernel after dilation is sometimes entirely in the padding area for axis 0")); } TEST(type_prop, avg_pool_kernel_dilation_not_compatible_with_padding_end) { - const PartialShape arg_shape{5, -1, 8}; - const Shape kernel{9}; - const Strides window_movement_strides{1}; - const Shape pads_begin{0}; - const Shape pads_end{10}; + const ov::PartialShape arg_shape{5, -1, 8}; + const ov::Shape kernel{9}; + const ov::Strides window_movement_strides{1}; + const ov::Shape pads_begin{0}; + const ov::Shape pads_end{10}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(ov::element::f32, arg_shape); - OV_EXPECT_THROW(const auto unused = make_shared(param, - window_movement_strides, - pads_begin, - pads_end, - kernel, - true, - op::RoundingType::FLOOR), - NodeValidationFailure, + OV_EXPECT_THROW(const auto unused = make_shared(param, + window_movement_strides, + pads_begin, + pads_end, + kernel, + true, + ov::op::RoundingType::FLOOR), + ov::NodeValidationFailure, HasSubstr("Kernel after dilation is sometimes entirely in the padding area for axis 0")); } diff --git a/src/core/tests/type_prop/batch_norm.cpp b/src/core/tests/type_prop/batch_norm.cpp index c2a94453a05..16829fd74f7 100644 --- a/src/core/tests/type_prop/batch_norm.cpp +++ b/src/core/tests/type_prop/batch_norm.cpp @@ -2,37 +2,37 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/batch_norm.hpp" + #include "common_test_utils/type_prop.hpp" #include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" using namespace std; -using namespace ngraph; struct BatchNormInferInputs { - element::Type in_et; - PartialShape in_shape; + ov::element::Type in_et; + ov::PartialShape in_shape; std::string in_name; }; struct BatchNormInferParams { - element::Type data_batch_et; - PartialShape data_batch_ps; + ov::element::Type data_batch_et; + ov::PartialShape data_batch_ps; std::vector inputs; double epsilon; }; template -std::shared_ptr makeBatchNormOp(const BatchNormInferParams& p) { +std::shared_ptr makeBatchNormOp(const BatchNormInferParams& p) { if (p.inputs.size() != 4) { throw runtime_error("BatchNormInference requires 4 additional inputs for batch" "normalization transformation"); } - auto data_batch = make_shared(p.data_batch_et, p.data_batch_ps); - auto gamma = make_shared(p.inputs[0].in_et, p.inputs[0].in_shape); - auto beta = make_shared(p.inputs[1].in_et, p.inputs[1].in_shape); - auto mean = make_shared(p.inputs[2].in_et, p.inputs[2].in_shape); - auto variance = make_shared(p.inputs[3].in_et, p.inputs[3].in_shape); + auto data_batch = make_shared(p.data_batch_et, p.data_batch_ps); + auto gamma = make_shared(p.inputs[0].in_et, p.inputs[0].in_shape); + auto beta = make_shared(p.inputs[1].in_et, p.inputs[1].in_shape); + auto mean = make_shared(p.inputs[2].in_et, p.inputs[2].in_shape); + auto variance = make_shared(p.inputs[3].in_et, p.inputs[3].in_shape); return make_shared(data_batch, gamma, beta, mean, variance, p.epsilon); } @@ -42,13 +42,13 @@ class BatchNormTest : public ::testing::Test {}; TYPED_TEST_SUITE_P(BatchNormTest); TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_2) { - PartialShape data_batch_shape{10, 100}; - element::Type inputs_et = element::f32; + ov::PartialShape data_batch_shape{10, 100}; + ov::element::Type inputs_et = ov::element::f32; - std::vector ch_inputs = {{inputs_et, PartialShape{100}, "gamma"}, - {inputs_et, PartialShape{100}, "beta"}, - {inputs_et, PartialShape{100}, "mean"}, - {inputs_et, PartialShape{100}, "variance"}}; + std::vector ch_inputs = {{inputs_et, ov::PartialShape{100}, "gamma"}, + {inputs_et, ov::PartialShape{100}, "beta"}, + {inputs_et, ov::PartialShape{100}, "mean"}, + {inputs_et, ov::PartialShape{100}, "variance"}}; double epsilon = 0.001; @@ -62,13 +62,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_2) { } TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_4) { - PartialShape data_batch_shape{1, 10, 224, 224}; - element::Type inputs_et = element::f16; + ov::PartialShape data_batch_shape{1, 10, 224, 224}; + ov::element::Type inputs_et = ov::element::f16; - std::vector ch_inputs = {{inputs_et, PartialShape{10}, "gamma"}, - {inputs_et, PartialShape{10}, "beta"}, - {inputs_et, PartialShape{10}, "mean"}, - {inputs_et, PartialShape{10}, "variance"}}; + std::vector ch_inputs = {{inputs_et, ov::PartialShape{10}, "gamma"}, + {inputs_et, ov::PartialShape{10}, "beta"}, + {inputs_et, ov::PartialShape{10}, "mean"}, + {inputs_et, ov::PartialShape{10}, "variance"}}; double epsilon = 0.001; @@ -82,13 +82,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_4) { } TYPED_TEST_P(BatchNormTest, batch_norm_inference_inputs_rank_dynamic) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - element::Type inputs_et = element::f32; + ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()}; + ov::element::Type inputs_et = ov::element::f32; - std::vector ch_inputs = {{inputs_et, PartialShape::dynamic(), "gamma"}, - {inputs_et, PartialShape::dynamic(), "beta"}, - {inputs_et, PartialShape::dynamic(), "mean"}, - {inputs_et, PartialShape::dynamic(), "variance"}}; + std::vector ch_inputs = {{inputs_et, ov::PartialShape::dynamic(), "gamma"}, + {inputs_et, ov::PartialShape::dynamic(), "beta"}, + {inputs_et, ov::PartialShape::dynamic(), "mean"}, + {inputs_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -101,13 +101,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_inputs_rank_dynamic) { } TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_channel_inputs_rank_dynamic) { - PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; - element::Type inputs_et = element::f32; + ov::PartialShape data_batch_shape{64, ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}; + ov::element::Type inputs_et = ov::element::f32; - std::vector ch_inputs = {{inputs_et, PartialShape::dynamic(), "gamma"}, - {inputs_et, PartialShape::dynamic(), "beta"}, - {inputs_et, PartialShape::dynamic(), "mean"}, - {inputs_et, PartialShape::dynamic(), "variance"}}; + std::vector ch_inputs = {{inputs_et, ov::PartialShape::dynamic(), "gamma"}, + {inputs_et, ov::PartialShape::dynamic(), "beta"}, + {inputs_et, ov::PartialShape::dynamic(), "mean"}, + {inputs_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -117,17 +117,17 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_channel_ ASSERT_EQ(bn->get_output_size(), 1); ASSERT_EQ(bn->get_output_element_type(0), inputs_et); ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme( - PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); + ov::PartialShape{64, ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()})); } TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_dynamic_some_channel_inputs_rank_static) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - element::Type input_et = element::f32; + ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()}; + ov::element::Type input_et = ov::element::f32; - std::vector inputs = {{input_et, PartialShape{Dimension::dynamic()}, "gamma"}, - {input_et, PartialShape::dynamic(), "beta"}, - {input_et, PartialShape{Dimension::dynamic()}, "mean"}, - {input_et, PartialShape::dynamic(), "variance"}}; + std::vector inputs = {{input_et, ov::PartialShape{ov::Dimension::dynamic()}, "gamma"}, + {input_et, ov::PartialShape::dynamic(), "beta"}, + {input_et, ov::PartialShape{ov::Dimension::dynamic()}, "mean"}, + {input_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -140,13 +140,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_dynamic_some_ch } TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_some_channel_inputs_rank_static) { - PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), 224}; - element::Type input_et = element::f32; + ov::PartialShape data_batch_shape{64, ov::Dimension::dynamic(), ov::Dimension::dynamic(), 224}; + ov::element::Type input_et = ov::element::f32; - std::vector inputs = {{input_et, PartialShape{3}, "gamma"}, - {input_et, PartialShape::dynamic(), "beta"}, - {input_et, PartialShape{3}, "mean"}, - {input_et, PartialShape{Dimension::dynamic()}, "variance"}}; + std::vector inputs = {{input_et, ov::PartialShape{3}, "gamma"}, + {input_et, ov::PartialShape::dynamic(), "beta"}, + {input_et, ov::PartialShape{3}, "mean"}, + {input_et, ov::PartialShape{ov::Dimension::dynamic()}, "variance"}}; double epsilon = 0.001; @@ -155,22 +155,22 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_some_cha ASSERT_EQ(bn->get_output_size(), 1); ASSERT_EQ(bn->get_output_element_type(0), input_et); - ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(PartialShape{64, 3, Dimension::dynamic(), 224})); + ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(ov::PartialShape{64, 3, ov::Dimension::dynamic(), 224})); } TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_inputs_element_types) { - PartialShape data_batch_shape{10, 100}; + ov::PartialShape data_batch_shape{10, 100}; - const std::vector inputs_et{element::i32, element::u32, element::boolean}; + const std::vector inputs_et{ov::element::i32, ov::element::u32, ov::element::boolean}; double eps = 0.001; std::vector bn_tests; for (const auto& et : inputs_et) { - std::vector ch_inputs = {{et, PartialShape{100}, "gamma"}, - {et, PartialShape{100}, "beta"}, - {et, PartialShape{100}, "mean"}, - {et, PartialShape{100}, "variance"}}; + std::vector ch_inputs = {{et, ov::PartialShape{100}, "gamma"}, + {et, ov::PartialShape{100}, "beta"}, + {et, ov::PartialShape{100}, "mean"}, + {et, ov::PartialShape{100}, "variance"}}; bn_tests.push_back(BatchNormInferParams{et, data_batch_shape, ch_inputs, eps}); } @@ -179,7 +179,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_inputs_element_types) { try { auto bn = makeBatchNormOp(params); FAIL() << "Invalid input element types not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Input element types must be floating-point"); } catch (...) { FAIL() << "Input element types check failed for unexpected reason"; @@ -189,23 +189,23 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_inputs_element_types) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_inputs_element_types) { // Data batch input element type and shape - const element::Type data_batch_et = element::f32; - const PartialShape data_batch_ps{10, 200}; + const ov::element::Type data_batch_et = ov::element::f32; + const ov::PartialShape data_batch_ps{10, 200}; // Invalid combination of element types of gamma/beta/mean/variance inputs - vector bn_ch_inputs = {{element::f32, PartialShape{200}, "gamma"}, - {element::f32, PartialShape{200}, "beta"}, - {element::f32, PartialShape{200}, "mean"}, - {element::f32, PartialShape{200}, "variance"}}; + vector bn_ch_inputs = {{ov::element::f32, ov::PartialShape{200}, "gamma"}, + {ov::element::f32, ov::PartialShape{200}, "beta"}, + {ov::element::f32, ov::PartialShape{200}, "mean"}, + {ov::element::f32, ov::PartialShape{200}, "variance"}}; const double epsilon = 0.001; std::vector bn_params; - bn_params.push_back(BatchNormInferParams{element::f16, data_batch_ps, bn_ch_inputs, epsilon}); + bn_params.push_back(BatchNormInferParams{ov::element::f16, data_batch_ps, bn_ch_inputs, epsilon}); for (size_t i = 0; i < bn_ch_inputs.size(); i++) { std::vector inputs = bn_ch_inputs; - (inputs[i]).in_et = element::f16; + (inputs[i]).in_et = ov::element::f16; bn_params.push_back(BatchNormInferParams{data_batch_et, data_batch_ps, inputs, epsilon}); } @@ -214,7 +214,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_inputs_element_typ try { auto bn = makeBatchNormOp(bn_p); FAIL() << "Incompatible input element types not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Input element types do not match"); } catch (...) { FAIL() << "Input element types check failed for unexpected reason"; @@ -223,13 +223,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_inputs_element_typ } TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_data_batch_input_rank) { - PartialShape data_batch_shape{Dimension::dynamic()}; - element::Type inputs_et = element::f32; + ov::PartialShape data_batch_shape{ov::Dimension::dynamic()}; + ov::element::Type inputs_et = ov::element::f32; - std::vector ch_inputs = {{inputs_et, PartialShape::dynamic(), "gamma"}, - {inputs_et, PartialShape::dynamic(), "beta"}, - {inputs_et, PartialShape::dynamic(), "mean"}, - {inputs_et, PartialShape::dynamic(), "variance"}}; + std::vector ch_inputs = {{inputs_et, ov::PartialShape::dynamic(), "gamma"}, + {inputs_et, ov::PartialShape::dynamic(), "beta"}, + {inputs_et, ov::PartialShape::dynamic(), "mean"}, + {inputs_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -237,7 +237,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_data_batch_input_rank) try { auto bn = makeBatchNormOp(params); FAIL() << "Data batch input with invalid rank 1 not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Input argument must have rank of at least 2 (input argument shape: [?])"); } catch (...) { FAIL() << "Data batch input rank check failed for unexpected reason"; @@ -245,13 +245,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_data_batch_input_rank) } TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_input_ranks) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - element::Type input_et = element::f32; + ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()}; + ov::element::Type input_et = ov::element::f32; - std::vector inputs = {{input_et, PartialShape{3, Dimension::dynamic()}, "gamma"}, - {input_et, PartialShape::dynamic(), "beta"}, - {input_et, PartialShape{Dimension::dynamic()}, "mean"}, - {input_et, PartialShape::dynamic(), "variance"}}; + std::vector inputs = {{input_et, ov::PartialShape{3, ov::Dimension::dynamic()}, "gamma"}, + {input_et, ov::PartialShape::dynamic(), "beta"}, + {input_et, ov::PartialShape{ov::Dimension::dynamic()}, "mean"}, + {input_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -259,7 +259,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_input_rank try { auto bn = makeBatchNormOp(params); FAIL() << "Incompatible gamma/beta/mean/variance input ranks not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Shapes for gamma/beta/mean/variance do not match"); } catch (...) { FAIL() << "gamma/beta/mean/variance input ranks check failed for unexpected reason"; @@ -267,13 +267,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_input_rank } TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_inputs_channel_count) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - element::Type input_et = element::f32; + ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()}; + ov::element::Type input_et = ov::element::f32; - std::vector inputs = {{input_et, PartialShape{3}, "gamma"}, - {input_et, PartialShape::dynamic(), "beta"}, - {input_et, PartialShape{4}, "mean"}, - {input_et, PartialShape::dynamic(), "variance"}}; + std::vector inputs = {{input_et, ov::PartialShape{3}, "gamma"}, + {input_et, ov::PartialShape::dynamic(), "beta"}, + {input_et, ov::PartialShape{4}, "mean"}, + {input_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -281,7 +281,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_inputs_cha try { auto bn = makeBatchNormOp(params); FAIL() << "Incompatible gamma/beta/mean/variance inputs channel count not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Shapes for gamma/beta/mean/variance do not match"); } catch (...) { FAIL() << "gamma/beta/mean/variance inputs channel count check failed for unexpected reason"; @@ -289,14 +289,14 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_inputs_cha } TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_channel_inputs_rank) { - PartialShape data_batch_shape{PartialShape::dynamic()}; - element::Type input_et = element::f32; + ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()}; + ov::element::Type input_et = ov::element::f32; std::vector inputs = { - {input_et, PartialShape{Dimension::dynamic(), Dimension::dynamic()}, "gamma"}, - {input_et, PartialShape::dynamic(), "beta"}, - {input_et, PartialShape{Dimension::dynamic(), Dimension::dynamic()}, "mean"}, - {input_et, PartialShape::dynamic(), "variance"}}; + {input_et, ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, "gamma"}, + {input_et, ov::PartialShape::dynamic(), "beta"}, + {input_et, ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, "mean"}, + {input_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -304,7 +304,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_channel_inputs_rank) { try { auto bn = makeBatchNormOp(params); FAIL() << "Invalid rank of gamma/beta/mean/variance inputs not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Shape for gamma/beta/mean/variance ([?,?]) does not have rank 1"); } catch (...) { FAIL() << "gamma/beta/mean/variance inputs rank check failed for unexpected reason"; @@ -312,13 +312,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_channel_inputs_rank) { } TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_data_batch_and_channel_inputs_channel_count) { - PartialShape data_batch_shape{64, 4, Dimension::dynamic(), 224}; - element::Type input_et = element::f32; + ov::PartialShape data_batch_shape{64, 4, ov::Dimension::dynamic(), 224}; + ov::element::Type input_et = ov::element::f32; - std::vector inputs = {{input_et, PartialShape{3}, "gamma"}, - {input_et, PartialShape::dynamic(), "beta"}, - {input_et, PartialShape{3}, "mean"}, - {input_et, PartialShape::dynamic(), "variance"}}; + std::vector inputs = {{input_et, ov::PartialShape{3}, "gamma"}, + {input_et, ov::PartialShape::dynamic(), "beta"}, + {input_et, ov::PartialShape{3}, "mean"}, + {input_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -326,7 +326,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_data_batch_and_cha try { auto bn = makeBatchNormOp(params); FAIL() << "Incompatible data batch and gamma/beta/mean/variance channel count not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Input channel dimension (4) does not match " "shape for gamma/beta/mean/variance ([3])"); @@ -337,13 +337,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_data_batch_and_cha } TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_zero) { - PartialShape data_batch_shape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; - element::Type inputs_et = element::f32; + ov::PartialShape data_batch_shape{ov::Dimension::dynamic(), 0, ov::Dimension::dynamic(), ov::Dimension::dynamic()}; + ov::element::Type inputs_et = ov::element::f32; - std::vector ch_inputs = {{inputs_et, PartialShape::dynamic(), "gamma"}, - {inputs_et, PartialShape::dynamic(), "beta"}, - {inputs_et, PartialShape::dynamic(), "mean"}, - {inputs_et, PartialShape::dynamic(), "variance"}}; + std::vector ch_inputs = {{inputs_et, ov::PartialShape::dynamic(), "gamma"}, + {inputs_et, ov::PartialShape::dynamic(), "beta"}, + {inputs_et, ov::PartialShape::dynamic(), "mean"}, + {inputs_et, ov::PartialShape::dynamic(), "variance"}}; double epsilon = 0.001; @@ -351,7 +351,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_ze try { auto bn = makeBatchNormOp(params); FAIL() << "Data batch channel count zero not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Channel count must be at least 1"); } catch (...) { FAIL() << "Data batch channel count check failed for unexpected reason"; @@ -359,20 +359,20 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_ze } TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_epsilon) { - PartialShape data_batch_shape{10, 100}; - element::Type inputs_et = element::f32; + ov::PartialShape data_batch_shape{10, 100}; + ov::element::Type inputs_et = ov::element::f32; - std::vector ch_inputs = {{inputs_et, PartialShape{100}, "gamma"}, - {inputs_et, PartialShape{100}, "beta"}, - {inputs_et, PartialShape{100}, "mean"}, - {inputs_et, PartialShape{100}, "variance"}}; + std::vector ch_inputs = {{inputs_et, ov::PartialShape{100}, "gamma"}, + {inputs_et, ov::PartialShape{100}, "beta"}, + {inputs_et, ov::PartialShape{100}, "mean"}, + {inputs_et, ov::PartialShape{100}, "variance"}}; double eps_neg = -1.0; const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, eps_neg}; try { auto bn = makeBatchNormOp(params); FAIL() << "Invalid 'epsilon' attribute value not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Attribute 'epsilon' must be a floating-point value greater than or equal to zero."); } catch (...) { @@ -397,5 +397,5 @@ REGISTER_TYPED_TEST_SUITE_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_zero, batch_norm_inference_invalid_epsilon); -using Types = ::testing::Types; +using Types = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop, BatchNormTest, Types); diff --git a/src/core/tests/type_prop/batch_to_space.cpp b/src/core/tests/type_prop/batch_to_space.cpp index 9ded8a3df2f..df529d1853f 100644 --- a/src/core/tests/type_prop/batch_to_space.cpp +++ b/src/core/tests/type_prop/batch_to_space.cpp @@ -2,14 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/batch_to_space.hpp" + +#include + #include #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "ngraph/util.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/space_to_batch.hpp" using namespace std; -using namespace ngraph; using namespace testing; namespace { @@ -19,33 +23,34 @@ constexpr size_t crops_begin_input_idx = 2; constexpr size_t crops_end_input_idx = 3; constexpr size_t batch_to_space_required_inputs = 4; struct InputInfo { - element::Type in_et; - PartialShape in_pshape; + ov::element::Type in_et; + ov::PartialShape in_pshape; }; using BatchToSpaceInputParams = std::array; -std::shared_ptr makeBatchToSpaceOp(const BatchToSpaceInputParams& p) { +std::shared_ptr makeBatchToSpaceOp(const BatchToSpaceInputParams& p) { if (p.size() != batch_to_space_required_inputs) { throw runtime_error("BatchToSpace requires 4 inputs"); } - auto data = make_shared(p.at(data_input_idx).in_et, p.at(data_input_idx).in_pshape); + auto data = make_shared(p.at(data_input_idx).in_et, p.at(data_input_idx).in_pshape); auto block_shape = - make_shared(p.at(block_shape_input_idx).in_et, p.at(block_shape_input_idx).in_pshape); + make_shared(p.at(block_shape_input_idx).in_et, p.at(block_shape_input_idx).in_pshape); auto crops_begin = - make_shared(p.at(crops_begin_input_idx).in_et, p.at(crops_begin_input_idx).in_pshape); - auto crops_end = make_shared(p.at(crops_end_input_idx).in_et, p.at(crops_end_input_idx).in_pshape); - return make_shared(data, block_shape, crops_begin, crops_end); + make_shared(p.at(crops_begin_input_idx).in_et, p.at(crops_begin_input_idx).in_pshape); + auto crops_end = + make_shared(p.at(crops_end_input_idx).in_et, p.at(crops_end_input_idx).in_pshape); + return make_shared(data, block_shape, crops_begin, crops_end); } } // namespace TEST(type_prop, batch_to_space_incompatible_input_element_types) { - element::Type float_et = element::f32; - element::Type integer64_et = element::i64; - element::Type integer32_et = element::i32; + ov::element::Type float_et = ov::element::f32; + ov::element::Type integer64_et = ov::element::i64; + ov::element::Type integer32_et = ov::element::i32; - Shape data_sshape{10, 26}; - Shape inputs_sshape{2}; + ov::Shape data_sshape{10, 26}; + ov::Shape inputs_sshape{2}; vector test_cases; test_cases.push_back(BatchToSpaceInputParams{InputInfo{float_et, data_sshape}, @@ -67,7 +72,7 @@ TEST(type_prop, batch_to_space_incompatible_input_element_types) { try { auto batch_to_space = makeBatchToSpaceOp(test_case); FAIL() << "Incompatible element types for block_shape/crops_begin/crops_end inputs not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "block_shape, crops_begin and crops_end inputs must have same element type."); } catch (...) { @@ -77,10 +82,10 @@ TEST(type_prop, batch_to_space_incompatible_input_element_types) { } TEST(type_prop, batch_to_space_invalid_input_element_types) { - element::Type float_et = element::f32; + ov::element::Type float_et = ov::element::f32; - Shape data_sshape{10, 26}; - Shape inputs_sshape{2}; + ov::Shape data_sshape{10, 26}; + ov::Shape inputs_sshape{2}; const BatchToSpaceInputParams params{InputInfo{float_et, data_sshape}, InputInfo{float_et, inputs_sshape}, @@ -90,7 +95,7 @@ TEST(type_prop, batch_to_space_invalid_input_element_types) { try { auto batch_to_space = makeBatchToSpaceOp(params); FAIL() << "Invalid non-integer element type for block_shape/crops_begin/crops_end inputs not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "block_shape and crops inputs must have integer element type."); } catch (...) { FAIL() << "Element type check for block_shape/crops_begin/crops_end inputs failed for unexpected reason"; @@ -98,11 +103,11 @@ TEST(type_prop, batch_to_space_invalid_input_element_types) { } TEST(type_prop, batch_to_space_invalid_data_input_rank) { - Shape data_sshape{4}; - element::Type data_et = element::f32; + ov::Shape data_sshape{4}; + ov::element::Type data_et = ov::element::f32; - Shape inputs_sshape{2}; - element::Type inputs_et = element::i64; + ov::Shape inputs_sshape{2}; + ov::element::Type inputs_et = ov::element::i64; const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape}, InputInfo{inputs_et, inputs_sshape}, @@ -112,7 +117,7 @@ TEST(type_prop, batch_to_space_invalid_data_input_rank) { try { auto batch_to_space = makeBatchToSpaceOp(params); FAIL() << "Invalid rank of data input not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "data input must have rank greater or equal than 2."); } catch (...) { FAIL() << "Rank check for data input failed for unexpected reason"; @@ -120,12 +125,12 @@ TEST(type_prop, batch_to_space_invalid_data_input_rank) { } TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) { - Shape data_sshape{10, 26}; - element::Type data_et = element::f32; + ov::Shape data_sshape{10, 26}; + ov::element::Type data_et = ov::element::f32; - Shape inputs_sshape_1D{2}; - Shape inputs_sshape_2D{2, 1}; - element::Type inputs_et = element::i64; + ov::Shape inputs_sshape_1D{2}; + ov::Shape inputs_sshape_2D{2, 1}; + ov::element::Type inputs_et = ov::element::i64; vector test_cases; test_cases.push_back(BatchToSpaceInputParams{InputInfo{data_et, data_sshape}, @@ -147,7 +152,7 @@ TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) { try { auto batch_to_space = makeBatchToSpaceOp(test_case); FAIL() << "Incompatible shapes for block_shape/crops_begin/crops_end inputs not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "block_shape, crops_begin and crops_end inputs must have the same shape."); } catch (...) { @@ -157,11 +162,11 @@ TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) { } TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) { - Shape data_sshape{10, 26}; - element::Type data_et = element::f32; + ov::Shape data_sshape{10, 26}; + ov::element::Type data_et = ov::element::f32; - Shape inputs_sshape_2D{2, 1}; - element::Type inputs_et = element::i64; + ov::Shape inputs_sshape_2D{2, 1}; + ov::element::Type inputs_et = ov::element::i64; const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape}, InputInfo{inputs_et, inputs_sshape_2D}, @@ -171,7 +176,7 @@ TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) { try { auto batch_to_space = makeBatchToSpaceOp(params); FAIL() << "Invalid rank for block_shape/crops_begin/crops_end inputs not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "block_shape and crops inputs must have rank 1."); } catch (...) { FAIL() << "Rank check for block_shape/crops_begin/crops_end inputs failed for unexpected reason"; @@ -179,11 +184,11 @@ TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) { } TEST(type_prop, batch_to_space_incompatible_data_and_secondary_inputs_shapes) { - Shape data_sshape{10, 26}; - element::Type data_et = element::f32; + ov::Shape data_sshape{10, 26}; + ov::element::Type data_et = ov::element::f32; - Shape inputs_sshape{5}; - element::Type inputs_et = element::i64; + ov::Shape inputs_sshape{5}; + ov::element::Type inputs_et = ov::element::i64; const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape}, InputInfo{inputs_et, inputs_sshape}, @@ -193,7 +198,7 @@ TEST(type_prop, batch_to_space_incompatible_data_and_secondary_inputs_shapes) { try { auto batch_to_space = makeBatchToSpaceOp(params); FAIL() << "Incompatible shapes for data and block_shape/crops_begin/crops_end inputs not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "block_shape and crop inputs must have same number of elements " "as data input rank."); @@ -204,21 +209,21 @@ TEST(type_prop, batch_to_space_incompatible_data_and_secondary_inputs_shapes) { } TEST(type_prop, batch_to_space_invalid_block_shape_input) { - Shape data_sshape{100, 7, 13, 3}; - element::Type data_et = element::f32; + ov::Shape data_sshape{100, 7, 13, 3}; + ov::element::Type data_et = ov::element::f32; - Shape inputs_sshape{4}; - element::Type inputs_et = element::i64; + ov::Shape inputs_sshape{4}; + ov::element::Type inputs_et = ov::element::i64; - auto data = make_shared(data_et, data_sshape); - auto block_shape = make_shared(inputs_et, inputs_sshape, vector{0, 10, 5, 1}); - auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, 0}); - auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, 0, 0}); + auto data = make_shared(data_et, data_sshape); + auto block_shape = make_shared(inputs_et, inputs_sshape, vector{0, 10, 5, 1}); + auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, 0}); + auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, 0, 0}); try { - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); FAIL() << "Invalid elements of block_shape input not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Elements of block_shape input must be greater or equal to one."); } catch (...) { FAIL() << "Greater than zero elements of block_shape input check failed for unexpected reason"; @@ -226,20 +231,20 @@ TEST(type_prop, batch_to_space_invalid_block_shape_input) { } TEST(type_prop, batch_to_space_invalid_crops_input_values) { - Shape data_sshape{100, 7, 13, 3}; - element::Type data_et = element::f32; + ov::Shape data_sshape{100, 7, 13, 3}; + ov::element::Type data_et = ov::element::f32; - Shape inputs_sshape{4}; - element::Type inputs_et = element::i64; + ov::Shape inputs_sshape{4}; + ov::element::Type inputs_et = ov::element::i64; try { - auto data = make_shared(data_et, data_sshape); - auto block_shape = make_shared(inputs_et, inputs_sshape, vector{1, 10, 5, 1}); - auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, -1}); - auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, 0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(data_et, data_sshape); + auto block_shape = make_shared(inputs_et, inputs_sshape, vector{1, 10, 5, 1}); + auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, -1}); + auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); FAIL() << "Invalid crops_begin input values not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Elements of crops_begin and crops_end inputs must be greater or equal to zero."); } catch (...) { @@ -247,13 +252,13 @@ TEST(type_prop, batch_to_space_invalid_crops_input_values) { } try { - auto data = make_shared(data_et, data_sshape); - auto block_shape = make_shared(inputs_et, inputs_sshape, vector{1, 10, 5, 1}); - auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, 0}); - auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, -1, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(data_et, data_sshape); + auto block_shape = make_shared(inputs_et, inputs_sshape, vector{1, 10, 5, 1}); + auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, 0}); + auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, -1, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); FAIL() << "Invalid crops_end input values not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Elements of crops_begin and crops_end inputs must be greater or equal to zero."); } catch (...) { @@ -262,19 +267,19 @@ TEST(type_prop, batch_to_space_invalid_crops_input_values) { } TEST(type_prop, batch_to_space_incompatible_block_shape_input_values_with_data_shape) { - Shape data_sshape{80, 7, 13, 3}; - element::Type data_et = element::f32; + ov::Shape data_sshape{80, 7, 13, 3}; + ov::element::Type data_et = ov::element::f32; - Shape inputs_sshape{4}; - element::Type inputs_et = element::i64; + ov::Shape inputs_sshape{4}; + ov::element::Type inputs_et = ov::element::i64; - auto data = make_shared(data_et, data_sshape); - auto block_shape = make_shared(inputs_et, inputs_sshape, vector{1, 10, 5, 1}); - auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, 0}); - auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, 0, 0}); + auto data = make_shared(data_et, data_sshape); + auto block_shape = make_shared(inputs_et, inputs_sshape, vector{1, 10, 5, 1}); + auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, 0}); + auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, 0, 0}); try { - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); FAIL() << "Incompatible data shape and block_shape input values not detected"; } catch (const ov::Exception& error) { EXPECT_HAS_SUBSTRING(error.what(), "[ 80, 80] must be a multiple of divisor: 50"); @@ -284,21 +289,21 @@ TEST(type_prop, batch_to_space_incompatible_block_shape_input_values_with_data_s } TEST(type_prop, batch_to_space_invalid_crops_out_of_bounds) { - Shape data_sshape{32, 4, 1, 3}; - element::Type data_et = element::f32; + ov::Shape data_sshape{32, 4, 1, 3}; + ov::element::Type data_et = ov::element::f32; - Shape inputs_sshape{4}; - element::Type inputs_et = element::i64; + ov::Shape inputs_sshape{4}; + ov::element::Type inputs_et = ov::element::i64; - auto data = make_shared(data_et, data_sshape); - auto block_shape = make_shared(inputs_et, inputs_sshape, vector{1, 2, 2, 1}); - auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, 2}); - auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, 0, 2}); + auto data = make_shared(data_et, data_sshape); + auto block_shape = make_shared(inputs_et, inputs_sshape, vector{1, 2, 2, 1}); + auto crops_begin = make_shared(inputs_et, inputs_sshape, vector{0, 3, 1, 2}); + auto crops_end = make_shared(inputs_et, inputs_sshape, vector{0, 3, 0, 2}); try { - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); FAIL() << "Invalid out of bound crops values not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]"); } catch (...) { @@ -307,172 +312,180 @@ TEST(type_prop, batch_to_space_invalid_crops_out_of_bounds) { } TEST(type_prop, batch_to_space_output_shape_2D) { - auto data = make_shared(element::f32, Shape{10, 26}); - auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); - auto crops_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto crops_end = make_shared(element::i64, Shape{2}, vector{0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, ov::Shape{10, 26}); + auto block_shape = make_shared(ov::element::i64, ov::Shape{2}, vector{1, 5}); + auto crops_begin = make_shared(ov::element::i64, ov::Shape{2}, vector{0, 2}); + auto crops_end = make_shared(ov::element::i64, ov::Shape{2}, vector{0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_shape(), (Shape{10 / 5, 26 * 5 - 2})); + ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32); + ASSERT_EQ(batch_to_space->get_shape(), (ov::Shape{10 / 5, 26 * 5 - 2})); } TEST(type_prop, batch_to_space_output_shape_4D) { - auto data = make_shared(element::f32, Shape{100, 7, 13, 3}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, ov::Shape{100, 7, 13, 3}); + auto block_shape = make_shared(ov::element::i64, ov::Shape{4}, vector{1, 10, 5, 1}); + auto crops_begin = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 1, 0}); + auto crops_end = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); + ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32); + ASSERT_EQ(batch_to_space->get_shape(), (ov::Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); } TEST(type_prop, batch_to_space_output_shape_5D) { - auto data = make_shared(element::f32, Shape{960, 6, 13, 128, 16}); - auto block_shape = make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); - auto crops_begin = make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); - auto crops_end = make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, ov::Shape{960, 6, 13, 128, 16}); + auto block_shape = + make_shared(ov::element::i32, ov::Shape{5}, vector{1, 6, 5, 1, 16}); + auto crops_begin = + make_shared(ov::element::i32, ov::Shape{5}, vector{0, 2, 0, 0, 0}); + auto crops_end = make_shared(ov::element::i32, ov::Shape{5}, vector{0, 2, 1, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_shape(), (Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16})); + ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32); + ASSERT_EQ(batch_to_space->get_shape(), (ov::Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16})); } TEST(type_prop, batch_to_space_output_dynamic_shape_5D_when_batch_is_static) { - auto data = make_shared(element::f32, PartialShape{960, {2, 20}, {12, 14}, {100, 150}, {10, 20}}); - auto block_shape = make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); - auto crops_begin = make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); - auto crops_end = make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, + ov::PartialShape{960, {2, 20}, {12, 14}, {100, 150}, {10, 20}}); + auto block_shape = + make_shared(ov::element::i32, ov::Shape{5}, vector{1, 6, 5, 1, 16}); + auto crops_begin = + make_shared(ov::element::i32, ov::Shape{5}, vector{0, 2, 0, 0, 0}); + auto crops_end = make_shared(ov::element::i32, ov::Shape{5}, vector{0, 2, 1, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); ASSERT_EQ(batch_to_space->get_output_partial_shape(0), - (PartialShape{960 / (6 * 5 * 16), - {2 * 6 - 2 - 2, 20 * 6 - 2 - 2}, - {12 * 5 - 1, 14 * 5 - 1}, - {100, 150}, - {10 * 16, 20 * 16}})); + (ov::PartialShape{960 / (6 * 5 * 16), + {2 * 6 - 2 - 2, 20 * 6 - 2 - 2}, + {12 * 5 - 1, 14 * 5 - 1}, + {100, 150}, + {10 * 16, 20 * 16}})); } OPENVINO_SUPPRESS_DEPRECATED_START TEST(type_prop, batch_to_space_output_dynamic_shape_5D_when_batch_is_dynamic) { - auto data_shape = PartialShape{{959, 962}, {2, 34}, {9, 21}, {100, 162}, {1, 1999}}; + auto data_shape = ov::PartialShape{{959, 962}, {2, 34}, {9, 21}, {100, 162}, {1, 1999}}; set_shape_labels(data_shape, 10); - auto data = make_shared(element::f32, data_shape); - auto block_shape = make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); - auto crops_begin = make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); - auto crops_end = make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, data_shape); + auto block_shape = + make_shared(ov::element::i32, ov::Shape{5}, vector{1, 6, 5, 1, 16}); + auto crops_begin = + make_shared(ov::element::i32, ov::Shape{5}, vector{0, 2, 0, 0, 0}); + auto crops_end = make_shared(ov::element::i32, ov::Shape{5}, vector{0, 2, 1, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); EXPECT_EQ(batch_to_space->get_output_partial_shape(0), - (PartialShape{{ceil_div(959, (6 * 5 * 16)), 962 / (6 * 5 * 16)}, - {2 * 6 - 2 - 2, 34 * 6 - 2 - 2}, - {9 * 5 - 1, 21 * 5 - 1}, - {100, 162}, - {1 * 16, 1999 * 16}})); + (ov::PartialShape{{ngraph::ceil_div(959, (6 * 5 * 16)), 962 / (6 * 5 * 16)}, + {2 * 6 - 2 - 2, 34 * 6 - 2 - 2}, + {9 * 5 - 1, 21 * 5 - 1}, + {100, 162}, + {1 * 16, 1999 * 16}})); EXPECT_THAT(get_shape_labels(batch_to_space->get_output_partial_shape(0)), ElementsAre(ov::no_label, ov::no_label, ov::no_label, 13, ov::no_label)); } TEST(type_prop, batch_to_space_input_interval_shape_block_one) { - auto data_shape = PartialShape{{959, 962}, {2, 34}, {9, 21}}; + auto data_shape = ov::PartialShape{{959, 962}, {2, 34}, {9, 21}}; set_shape_labels(data_shape, 10); - auto data = make_shared(element::f32, data_shape); - auto block_shape = make_shared(element::i32, Shape{3}, vector{1, 1, 1}); - auto crops_begin = make_shared(element::i32, Shape{3}, vector{0, 0, 0}); - auto crops_end = make_shared(element::i32, Shape{3}, vector{0, 0, 1}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, data_shape); + auto block_shape = make_shared(ov::element::i32, ov::Shape{3}, vector{1, 1, 1}); + auto crops_begin = make_shared(ov::element::i32, ov::Shape{3}, vector{0, 0, 0}); + auto crops_end = make_shared(ov::element::i32, ov::Shape{3}, vector{0, 0, 1}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); EXPECT_EQ(batch_to_space->get_output_partial_shape(0), - PartialShape({{959, 962}, {2, 34}, {9 * 1 - 1, 21 * 1 - 1}})); + ov::PartialShape({{959, 962}, {2, 34}, {9 * 1 - 1, 21 * 1 - 1}})); EXPECT_THAT(get_shape_labels(batch_to_space->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label)); } TEST(type_prop, batch_to_space_and_space_to_batch) { - auto data = make_shared(element::f32, PartialShape{4800, 9, {11, -1}, 2}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); - auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); - auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, ov::PartialShape{4800, 9, {11, -1}, 2}); + auto block_shape = + make_shared(ov::element::i64, ov::Shape{4}, vector{1, 12, 100, 2}); + auto crops_begin = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 38, 1}); + auto crops_end = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 5, 38, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32); ASSERT_EQ(batch_to_space->get_output_partial_shape(0), - (PartialShape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, {11 * 100 - 38 - 38, -1}, 2 * 2 - 1})); + (ov::PartialShape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, {11 * 100 - 38 - 38, -1}, 2 * 2 - 1})); - auto space_to_batch = make_shared(batch_to_space, block_shape, crops_begin, crops_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); - ASSERT_EQ(space_to_batch->get_output_partial_shape(0), (PartialShape{4800, 9, {11, -1}, 2})); + auto space_to_batch = make_shared(batch_to_space, block_shape, crops_begin, crops_end); + ASSERT_EQ(space_to_batch->get_element_type(), ov::element::f32); + ASSERT_EQ(space_to_batch->get_output_partial_shape(0), (ov::PartialShape{4800, 9, {11, -1}, 2})); } TEST(type_prop, batch_to_space_dynamic_shape_static_rank) { - auto data = make_shared(element::f32, PartialShape::dynamic(4)); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); + auto block_shape = make_shared(ov::element::i64, ov::Shape{4}, vector{1, 10, 5, 1}); + auto crops_begin = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 1, 0}); + auto crops_end = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic(4)); + ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32); + ASSERT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape::dynamic(4)); } TEST(type_prop, batch_to_space_dynamic_shape_dynamic_rank) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto block_shape = make_shared(ov::element::i64, ov::Shape{4}, vector{1, 10, 5, 1}); + auto crops_begin = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 1, 0}); + auto crops_end = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic()); + ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32); + ASSERT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TEST(type_prop, batch_to_space_default_ctor) { - auto data = make_shared(element::i16, Shape{100, 7, 13, 3}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto data = make_shared(ov::element::i16, ov::Shape{100, 7, 13, 3}); + auto block_shape = make_shared(ov::element::i64, ov::Shape{4}, vector{1, 10, 5, 1}); + auto crops_begin = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 1, 0}); + auto crops_end = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 0, 0}); - auto batch_to_space = make_shared(); + auto batch_to_space = make_shared(); - batch_to_space->set_arguments(OutputVector{data, block_shape, crops_begin, crops_end}); + batch_to_space->set_arguments(ov::OutputVector{data, block_shape, crops_begin, crops_end}); batch_to_space->validate_and_infer_types(); EXPECT_EQ(batch_to_space->get_input_size(), 4); EXPECT_EQ(batch_to_space->get_output_size(), 1); - EXPECT_EQ(batch_to_space->get_element_type(), element::i16); - EXPECT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); + EXPECT_EQ(batch_to_space->get_element_type(), ov::element::i16); + EXPECT_EQ(batch_to_space->get_shape(), (ov::Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); } TEST(type_prop, batch_to_space_non_const_inputs) { - auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto data = make_shared(ov::element::f32, ov::PartialShape{100, 7, 13, 3}); - auto block_shape = make_shared(element::i64, PartialShape{4}); - auto crops_begin = make_shared(element::i64, PartialShape{4}); - auto crops_end = make_shared(element::i64, PartialShape{4}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto block_shape = make_shared(ov::element::i64, ov::PartialShape{4}); + auto crops_begin = make_shared(ov::element::i64, ov::PartialShape{4}); + auto crops_end = make_shared(ov::element::i64, ov::PartialShape{4}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - EXPECT_EQ(batch_to_space->get_element_type(), element::f32); - EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic(4)); + EXPECT_EQ(batch_to_space->get_element_type(), ov::element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape::dynamic(4)); } TEST(type_prop, batch_to_space_block_non_constant_only) { - auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); - auto block_shape = make_shared(element::i64, PartialShape{4}); - auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, ov::PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(ov::element::i64, ov::PartialShape{4}); + auto crops_begin = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 1, 0}); + auto crops_end = make_shared(ov::element::i64, ov::Shape{4}, vector{0, 3, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - EXPECT_EQ(batch_to_space->get_element_type(), element::f32); - EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({-1, {1, -1}, {12, -1}, {3, -1}})); + EXPECT_EQ(batch_to_space->get_element_type(), ov::element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape({-1, {1, -1}, {12, -1}, {3, -1}})); } TEST(type_prop, batch_to_space_crops_non_constant_only) { - auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); - auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 5, 1}); - auto crops_begin = make_shared(element::i64, PartialShape{4}); - auto crops_end = make_shared(element::i64, PartialShape{4}); - auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + auto data = make_shared(ov::element::f32, ov::PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(ov::element::i64, ov::Shape{4}, vector{1, 2, 5, 1}); + auto crops_begin = make_shared(ov::element::i64, ov::PartialShape{4}); + auto crops_end = make_shared(ov::element::i64, ov::PartialShape{4}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); - EXPECT_EQ(batch_to_space->get_element_type(), element::f32); - EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({10, -1, -1, -1})); + EXPECT_EQ(batch_to_space->get_element_type(), ov::element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape({10, -1, -1, -1})); } diff --git a/src/core/tests/type_prop/binary_convolution.cpp b/src/core/tests/type_prop/binary_convolution.cpp index 8ad8c3825eb..e623faf2ed4 100644 --- a/src/core/tests/type_prop/binary_convolution.cpp +++ b/src/core/tests/type_prop/binary_convolution.cpp @@ -2,219 +2,224 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/binary_convolution.hpp" + +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" +#include "openvino/core/coordinate_diff.hpp" using namespace std; -using namespace ngraph; using namespace testing; TEST(type_prop, bin_convolution_auto_padding_same) { - PartialShape data_batch_shape{1, 1, 5, 5}; - PartialShape filters_shape{1, 1, 3, 3}; + ov::PartialShape data_batch_shape{1, 1, 5, 5}; + ov::PartialShape filters_shape{1, 1, 3, 3}; set_shape_labels(data_batch_shape, 10); set_shape_labels(filters_shape, 20); - Strides strides{1, 1}; - CoordinateDiff pads_begin{0, 0}; - CoordinateDiff pads_end{0, 0}; - Strides dilations{1, 1}; - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + ov::Strides strides{1, 1}; + ov::CoordinateDiff pads_begin{0, 0}; + ov::CoordinateDiff pads_end{0, 0}; + ov::Strides dilations{1, 1}; + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const float pad_value = 1.0f; - const auto auto_pad = op::PadType::SAME_LOWER; + const auto auto_pad = ov::op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::u1, filters_shape); + auto data_batch = make_shared(ov::element::f32, data_batch_shape); + auto filters = make_shared(ov::element::u1, filters_shape); - auto conv = make_shared(data_batch, - filters, - strides, - pads_begin, - pads_end, - dilations, - mode, - pad_value, - auto_pad); + auto conv = make_shared(data_batch, + filters, + strides, + pads_begin, + pads_end, + dilations, + mode, + pad_value, + auto_pad); EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label)); - EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{1, 1, 5, 5})); - EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1})); - EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1})); + EXPECT_EQ(conv->get_output_partial_shape(0), (ov::PartialShape{1, 1, 5, 5})); + EXPECT_EQ(conv->get_pads_begin(), (ov::CoordinateDiff{1, 1})); + EXPECT_EQ(conv->get_pads_end(), (ov::CoordinateDiff{1, 1})); } TEST(type_prop, bin_convolution_auto_padding_same_lower_spatial_dims_static) { - PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5}; - PartialShape filters_shape{Dimension::dynamic(), Dimension::dynamic(), 3, 3}; + ov::PartialShape data_batch_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5, 5}; + ov::PartialShape filters_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 3, 3}; set_shape_labels(data_batch_shape, 10); set_shape_labels(filters_shape, 20); - Strides strides{1, 1}; - CoordinateDiff pads_begin{0, 0}; - CoordinateDiff pads_end{0, 0}; - Strides dilations{1, 1}; - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + ov::Strides strides{1, 1}; + ov::CoordinateDiff pads_begin{0, 0}; + ov::CoordinateDiff pads_end{0, 0}; + ov::Strides dilations{1, 1}; + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const float pad_value = 1.0f; - const auto auto_pad = op::PadType::SAME_LOWER; + const auto auto_pad = ov::op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::u1, filters_shape); + auto data_batch = make_shared(ov::element::f32, data_batch_shape); + auto filters = make_shared(ov::element::u1, filters_shape); - auto conv = make_shared(data_batch, - filters, - strides, - pads_begin, - pads_end, - dilations, - mode, - pad_value, - auto_pad); + auto conv = make_shared(data_batch, + filters, + strides, + pads_begin, + pads_end, + dilations, + mode, + pad_value, + auto_pad); EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label)); - EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), Dimension::dynamic(), 5, 5})); - EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1})); - EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1})); + EXPECT_EQ(conv->get_output_partial_shape(0), + (ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5, 5})); + EXPECT_EQ(conv->get_pads_begin(), (ov::CoordinateDiff{1, 1})); + EXPECT_EQ(conv->get_pads_end(), (ov::CoordinateDiff{1, 1})); } TEST(type_prop, bin_convolution_auto_padding_same_upper_spatial_dims_static) { - const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5}; - const PartialShape filters_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 2}; - Strides strides{1, 1}; - CoordinateDiff pads_begin{0, 0}; - CoordinateDiff pads_end{0, 0}; - Strides dilations{1, 1}; - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + const ov::PartialShape data_batch_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5, 5}; + const ov::PartialShape filters_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 2, 2}; + ov::Strides strides{1, 1}; + ov::CoordinateDiff pads_begin{0, 0}; + ov::CoordinateDiff pads_end{0, 0}; + ov::Strides dilations{1, 1}; + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const float pad_value = 1.0f; - const auto auto_pad = op::PadType::SAME_UPPER; + const auto auto_pad = ov::op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::u1, filters_shape); + auto data_batch = make_shared(ov::element::f32, data_batch_shape); + auto filters = make_shared(ov::element::u1, filters_shape); - auto conv = make_shared(data_batch, - filters, - strides, - pads_begin, - pads_end, - dilations, - mode, - pad_value, - auto_pad); + auto conv = make_shared(data_batch, + filters, + strides, + pads_begin, + pads_end, + dilations, + mode, + pad_value, + auto_pad); - EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), Dimension::dynamic(), 5, 5})); - EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0})); - EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1})); + EXPECT_EQ(conv->get_output_partial_shape(0), + (ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5, 5})); + EXPECT_EQ(conv->get_pads_begin(), (ov::CoordinateDiff{0, 0})); + EXPECT_EQ(conv->get_pads_end(), (ov::CoordinateDiff{1, 1})); } TEST(type_prop, bin_convolution_auto_padding_same_data_batch_spatial_dims_dynamic) { - PartialShape data_batch_shape{1, 1, Dimension::dynamic(), 5}; - PartialShape filters_shape{Dimension::dynamic(), 1, 3, 3}; + ov::PartialShape data_batch_shape{1, 1, ov::Dimension::dynamic(), 5}; + ov::PartialShape filters_shape{ov::Dimension::dynamic(), 1, 3, 3}; set_shape_labels(data_batch_shape, 10); set_shape_labels(filters_shape, 20); - Strides strides{1, 1}; - CoordinateDiff pads_begin{0, 0}; - CoordinateDiff pads_end{0, 0}; - Strides dilations{1, 1}; - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + ov::Strides strides{1, 1}; + ov::CoordinateDiff pads_begin{0, 0}; + ov::CoordinateDiff pads_end{0, 0}; + ov::Strides dilations{1, 1}; + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const float pad_value = 1.0f; - const auto auto_pad = op::PadType::SAME_LOWER; + const auto auto_pad = ov::op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::u1, filters_shape); + auto data_batch = make_shared(ov::element::f32, data_batch_shape); + auto filters = make_shared(ov::element::u1, filters_shape); - auto conv = make_shared(data_batch, - filters, - strides, - pads_begin, - pads_end, - dilations, - mode, - pad_value, - auto_pad); + auto conv = make_shared(data_batch, + filters, + strides, + pads_begin, + pads_end, + dilations, + mode, + pad_value, + auto_pad); EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label)); - EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 5})); - EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 1})); - EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{0, 1})); + EXPECT_EQ(conv->get_output_partial_shape(0), + (ov::PartialShape{1, ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5})); + EXPECT_EQ(conv->get_pads_begin(), (ov::CoordinateDiff{0, 1})); + EXPECT_EQ(conv->get_pads_end(), (ov::CoordinateDiff{0, 1})); } TEST(type_prop, bin_convolution_dyn_data_batch) { - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const float pad_value = 1.0f; - const auto auto_pad = op::PadType::EXPLICIT; + const auto auto_pad = ov::op::PadType::EXPLICIT; - const auto data_batch = make_shared(element::f32, PartialShape::dynamic()); - const auto filters = make_shared(element::u1, PartialShape{1, 1, 3, 3}); - const auto bin_conv = make_shared(data_batch, - filters, - Strides{}, - CoordinateDiff{}, - CoordinateDiff{}, - Strides{}, - mode, - pad_value, - auto_pad); - - EXPECT_EQ(bin_conv->get_output_partial_shape(0), (PartialShape{-1, 1, {1, -1}, {1, -1}})); -} - -TEST(type_prop, bin_convolution_dyn_filters) { - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; - const float pad_value = 1.0f; - const auto auto_pad = op::PadType::EXPLICIT; - - const auto data_batch = make_shared(element::f32, PartialShape{1, 1, 5, 5}); - const auto filters = make_shared(element::u1, PartialShape::dynamic()); - const auto bin_conv = make_shared(data_batch, - filters, - Strides{}, - CoordinateDiff{}, - CoordinateDiff{}, - Strides{}, - mode, - pad_value, - auto_pad); - - EXPECT_EQ(bin_conv->get_output_partial_shape(0), (PartialShape{1, -1, {1, 5}, {1, 5}})); -} - -TEST(type_prop, bin_convolution_dyn_data_batch_and_filters) { - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; - const float pad_value = 1.0f; - const auto auto_pad = op::PadType::EXPLICIT; - - const auto data_batch = make_shared(element::f32, PartialShape::dynamic()); - const auto filters = make_shared(element::u1, PartialShape::dynamic()); - const auto bin_conv = make_shared(data_batch, - filters, - Strides{}, - CoordinateDiff{}, - CoordinateDiff{}, - Strides{}, - mode, - pad_value, - auto_pad); - - EXPECT_EQ(bin_conv->get_output_partial_shape(0), PartialShape::dynamic()); -} - -TEST(type_prop, bin_convolution_invalid_inputs_et) { - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; - const float pad_value = 1.0f; - const auto auto_pad = op::PadType::EXPLICIT; - try { - const auto data_batch = make_shared(element::boolean, PartialShape{1, 1, 5, 5}); - const auto filters = make_shared(element::u1, PartialShape{1, 1, 3, 3}); - const auto bin_conv = make_shared(data_batch, + const auto data_batch = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto filters = make_shared(ov::element::u1, ov::PartialShape{1, 1, 3, 3}); + const auto bin_conv = make_shared(data_batch, filters, - Strides{}, - CoordinateDiff{}, - CoordinateDiff{}, - Strides{}, + ov::Strides{}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + ov::Strides{}, mode, pad_value, auto_pad); + + EXPECT_EQ(bin_conv->get_output_partial_shape(0), (ov::PartialShape{-1, 1, {1, -1}, {1, -1}})); +} + +TEST(type_prop, bin_convolution_dyn_filters) { + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + const float pad_value = 1.0f; + const auto auto_pad = ov::op::PadType::EXPLICIT; + + const auto data_batch = make_shared(ov::element::f32, ov::PartialShape{1, 1, 5, 5}); + const auto filters = make_shared(ov::element::u1, ov::PartialShape::dynamic()); + const auto bin_conv = make_shared(data_batch, + filters, + ov::Strides{}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + ov::Strides{}, + mode, + pad_value, + auto_pad); + + EXPECT_EQ(bin_conv->get_output_partial_shape(0), (ov::PartialShape{1, -1, {1, 5}, {1, 5}})); +} + +TEST(type_prop, bin_convolution_dyn_data_batch_and_filters) { + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + const float pad_value = 1.0f; + const auto auto_pad = ov::op::PadType::EXPLICIT; + + const auto data_batch = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto filters = make_shared(ov::element::u1, ov::PartialShape::dynamic()); + const auto bin_conv = make_shared(data_batch, + filters, + ov::Strides{}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + ov::Strides{}, + mode, + pad_value, + auto_pad); + + EXPECT_EQ(bin_conv->get_output_partial_shape(0), ov::PartialShape::dynamic()); +} + +TEST(type_prop, bin_convolution_invalid_inputs_et) { + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + const float pad_value = 1.0f; + const auto auto_pad = ov::op::PadType::EXPLICIT; + try { + const auto data_batch = make_shared(ov::element::boolean, ov::PartialShape{1, 1, 5, 5}); + const auto filters = make_shared(ov::element::u1, ov::PartialShape{1, 1, 3, 3}); + const auto bin_conv = make_shared(data_batch, + filters, + ov::Strides{}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + ov::Strides{}, + mode, + pad_value, + auto_pad); // data batch element type must be float point FAIL() << "Incompatible element type of data batch input not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Data batch element type must be numeric"); } catch (...) { FAIL() << "Data batch element type validation check failed for unexpected reason"; @@ -224,25 +229,25 @@ TEST(type_prop, bin_convolution_invalid_inputs_et) { } TEST(type_prop, bin_convolution_incompatible_input_channels) { - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const float pad_value = 1.0f; - const auto auto_pad = op::PadType::EXPLICIT; + const auto auto_pad = ov::op::PadType::EXPLICIT; - auto data_batch = make_shared(element::f32, PartialShape{1, 1, 5, 5}); - auto filters = make_shared(element::u1, PartialShape{1, 2, 3, 3}); + auto data_batch = make_shared(ov::element::f32, ov::PartialShape{1, 1, 5, 5}); + auto filters = make_shared(ov::element::u1, ov::PartialShape{1, 2, 3, 3}); try { - auto conv = make_shared(data_batch, - filters, - Strides{}, - CoordinateDiff{}, - CoordinateDiff{}, - Strides{}, - mode, - pad_value, - auto_pad); + auto conv = make_shared(data_batch, + filters, + ov::Strides{}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + ov::Strides{}, + mode, + pad_value, + auto_pad); FAIL() << "Incompatible input channel dimension in data batch and filters not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch channel count")); } catch (...) { FAIL() << "Data batch and filters input channel count validation check failed for " @@ -251,27 +256,27 @@ TEST(type_prop, bin_convolution_incompatible_input_channels) { } TEST(type_prop, bin_convolution_invalid_input_ranks) { - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const float pad_value = 1.0f; - const auto auto_pad = op::PadType::EXPLICIT; + const auto auto_pad = ov::op::PadType::EXPLICIT; // data partial shape provided is rank 4 (Conv2D) // filter partial shape provided is rank 5 (Conv3D) try { - const auto data_batch = make_shared(element::f32, PartialShape{1, 1, 5, 5}); - const auto filters = make_shared(element::u1, PartialShape{1, 1, 3, 3, 3}); - const auto bin_conv = make_shared(data_batch, - filters, - Strides{}, - CoordinateDiff{}, - CoordinateDiff{}, - Strides{}, - mode, - pad_value, - auto_pad); + const auto data_batch = make_shared(ov::element::f32, ov::PartialShape{1, 1, 5, 5}); + const auto filters = make_shared(ov::element::u1, ov::PartialShape{1, 1, 3, 3, 3}); + const auto bin_conv = make_shared(data_batch, + filters, + ov::Strides{}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + ov::Strides{}, + mode, + pad_value, + auto_pad); // data batch and filters have incompatible ranks FAIL() << "Incompatible input ranks not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match"); } catch (...) { FAIL() << "Rank validation check of inputs failed for unexpected reason"; @@ -280,20 +285,20 @@ TEST(type_prop, bin_convolution_invalid_input_ranks) { // data partial shape provided is rank 5 (Conv3D) // filter partial shape provided is rank 4 (Conv2D) try { - const auto data_batch = make_shared(element::f32, PartialShape{1, 1, 5, 5, 5}); - const auto filters = make_shared(element::u1, PartialShape{1, 1, 3, 3}); - const auto bin_conv = make_shared(data_batch, - filters, - Strides{}, - CoordinateDiff{}, - CoordinateDiff{}, - Strides{}, - mode, - pad_value, - auto_pad); + const auto data_batch = make_shared(ov::element::f32, ov::PartialShape{1, 1, 5, 5, 5}); + const auto filters = make_shared(ov::element::u1, ov::PartialShape{1, 1, 3, 3}); + const auto bin_conv = make_shared(data_batch, + filters, + ov::Strides{}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + ov::Strides{}, + mode, + pad_value, + auto_pad); // data batch and filters have incompatible ranks FAIL() << "Incompatible input ranks not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match"); } catch (...) { FAIL() << "Rank validation check of inputs failed for unexpected reason"; @@ -301,34 +306,34 @@ TEST(type_prop, bin_convolution_invalid_input_ranks) { } TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) { - Strides strides_1d{1}; - Strides strides_3d{1, 1, 1}; + ov::Strides strides_1d{1}; + ov::Strides strides_3d{1, 1, 1}; - Strides dilations_2d{1, 1}; - Strides dilations_3d{1, 1, 1}; + ov::Strides dilations_2d{1, 1}; + ov::Strides dilations_3d{1, 1, 1}; - CoordinateDiff pads_end_2d{0, 0}; - CoordinateDiff pads_begin_3d{0, 0, 0}; + ov::CoordinateDiff pads_end_2d{0, 0}; + ov::CoordinateDiff pads_begin_3d{0, 0, 0}; - const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const float pad_value = 1.0f; - const auto auto_pad = op::PadType::EXPLICIT; + const auto auto_pad = ov::op::PadType::EXPLICIT; try { - const auto data_batch = make_shared(element::f32, PartialShape{1, 1, 5, 5}); - const auto filters = make_shared(element::u1, PartialShape{1, 1, 3, 3}); - const auto bin_conv = make_shared(data_batch, - filters, - strides_3d, - CoordinateDiff{}, - CoordinateDiff{}, - dilations_2d, - mode, - pad_value, - auto_pad); - // Strides have incompatible number of spatial dimensions + const auto data_batch = make_shared(ov::element::f32, ov::PartialShape{1, 1, 5, 5}); + const auto filters = make_shared(ov::element::u1, ov::PartialShape{1, 1, 3, 3}); + const auto bin_conv = make_shared(data_batch, + filters, + strides_3d, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + dilations_2d, + mode, + pad_value, + auto_pad); + // ov::Strides have incompatible number of spatial dimensions FAIL() << "Incompatible stride number of spatial dimensions not detected."; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Strides should be defined for all and only spatial dimensions.")); } catch (...) { @@ -336,20 +341,20 @@ TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) { } try { - const auto data_batch = make_shared(element::f32, PartialShape{1, 1, 5, 5}); - const auto filters = make_shared(element::u1, PartialShape{1, 1, 3, 3}); - const auto bin_conv = make_shared(data_batch, - filters, - Strides{1, 1}, - CoordinateDiff{}, - CoordinateDiff{}, - dilations_3d, - mode, - pad_value, - auto_pad); + const auto data_batch = make_shared(ov::element::f32, ov::PartialShape{1, 1, 5, 5}); + const auto filters = make_shared(ov::element::u1, ov::PartialShape{1, 1, 3, 3}); + const auto bin_conv = make_shared(data_batch, + filters, + ov::Strides{1, 1}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + dilations_3d, + mode, + pad_value, + auto_pad); // Dilations have incompatible number of spatial dimensions FAIL() << "Incompatible dilations number of spatial dimensions not detected."; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Dilations should be defined for all and only spatial dimensions.")); } catch (...) { @@ -357,20 +362,20 @@ TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) { } try { - const auto data_batch = make_shared(element::f32, PartialShape{1, 1, 5, 5}); - const auto filters = make_shared(element::u1, PartialShape{1, 1, 3, 3}); - const auto bin_conv = make_shared(data_batch, - filters, - Strides{1, 1}, - pads_begin_3d, - pads_end_2d, - dilations_2d, - mode, - pad_value, - auto_pad); + const auto data_batch = make_shared(ov::element::f32, ov::PartialShape{1, 1, 5, 5}); + const auto filters = make_shared(ov::element::u1, ov::PartialShape{1, 1, 3, 3}); + const auto bin_conv = make_shared(data_batch, + filters, + ov::Strides{1, 1}, + pads_begin_3d, + pads_end_2d, + dilations_2d, + mode, + pad_value, + auto_pad); // Pads have incompatible number of spatial dimensions FAIL() << "Incompatible pads number of spatial dimensions not detected."; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Pads begin and end should be defined for all and only spatial dimensions.")); } catch (...) { @@ -378,60 +383,60 @@ TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) { } } -class TypePropBinaryConvolutionV1Test : public TypePropOpTest { +class TypePropBinaryConvolutionV1Test : public TypePropOpTest { protected: - CoordinateDiff empty_pad{}; + ov::CoordinateDiff empty_pad{}; }; TEST_F(TypePropBinaryConvolutionV1Test, default_ctor) { - const auto data = make_shared(element::f32, PartialShape{1, 3, 5, 5}); - const auto filters = make_shared(element::f32, PartialShape{2, 3, 4, 4}); + const auto data = make_shared(ov::element::f32, ov::PartialShape{1, 3, 5, 5}); + const auto filters = make_shared(ov::element::f32, ov::PartialShape{2, 3, 4, 4}); const auto op = make_op(); - op->set_arguments(OutputVector{data, filters}); + op->set_arguments(ov::OutputVector{data, filters}); op->set_strides({1, 3}); op->set_dilations({1, 2}); op->set_pads_begin({2, 2}); op->set_pads_end({2, 2}); - op->set_auto_pad(op::PadType::EXPLICIT); - op->set_mode(op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT); + op->set_auto_pad(ov::op::PadType::EXPLICIT); + op->set_mode(ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT); op->set_pad_value(1.0f); op->validate_and_infer_types(); EXPECT_EQ(op->get_input_size(), 2); EXPECT_EQ(op->get_output_size(), 1); - EXPECT_EQ(op->get_strides(), Strides({1, 3})); - EXPECT_EQ(op->get_dilations(), Strides({1, 2})); - EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2})); - EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2})); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 2, 6, 1})); + EXPECT_EQ(op->get_strides(), ov::Strides({1, 3})); + EXPECT_EQ(op->get_dilations(), ov::Strides({1, 2})); + EXPECT_EQ(op->get_pads_begin(), ov::CoordinateDiff({2, 2})); + EXPECT_EQ(op->get_pads_end(), ov::CoordinateDiff({2, 2})); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape({1, 2, 6, 1})); } TEST_F(TypePropBinaryConvolutionV1Test, interval_shapes) { - PartialShape data_batch_pshape{{1, 3}, 1, {1, 5}, {3, 10}}; - PartialShape filters_pshape{2, {1, 3}, 3, 3}; + ov::PartialShape data_batch_pshape{{1, 3}, 1, {1, 5}, {3, 10}}; + ov::PartialShape filters_pshape{2, {1, 3}, 3, 3}; set_shape_labels(data_batch_pshape, 10); set_shape_labels(filters_pshape, 20); - constexpr auto et = element::f32; - constexpr auto auto_pad = op::PadType::EXPLICIT; - constexpr auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; + constexpr auto et = ov::element::f32; + constexpr auto auto_pad = ov::op::PadType::EXPLICIT; + constexpr auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; constexpr auto pad_value = 1.0f; - const auto data_batch = make_shared(et, data_batch_pshape); - const auto filters = make_shared(et, filters_pshape); + const auto data_batch = make_shared(et, data_batch_pshape); + const auto filters = make_shared(et, filters_pshape); const auto op = make_op(data_batch, filters, - Strides{}, - CoordinateDiff{}, - CoordinateDiff{}, - Strides{}, + ov::Strides{}, + ov::CoordinateDiff{}, + ov::CoordinateDiff{}, + ov::Strides{}, mode, pad_value, auto_pad); EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label)); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{1, 3}, 2, {1, 3}, {1, 8}})); - EXPECT_EQ(op->get_pads_begin(), (CoordinateDiff{0, 0})); - EXPECT_EQ(op->get_pads_end(), (CoordinateDiff{0, 0})); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape({{1, 3}, 2, {1, 3}, {1, 8}})); + EXPECT_EQ(op->get_pads_begin(), (ov::CoordinateDiff{0, 0})); + EXPECT_EQ(op->get_pads_end(), (ov::CoordinateDiff{0, 0})); } diff --git a/src/core/tests/type_prop/binary_elementwise.cpp b/src/core/tests/type_prop/binary_elementwise.cpp index e501c0c24f5..cd66d736a1a 100644 --- a/src/core/tests/type_prop/binary_elementwise.cpp +++ b/src/core/tests/type_prop/binary_elementwise.cpp @@ -2,33 +2,49 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/greater.hpp" +#include "openvino/op/greater_eq.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/less_eq.hpp" +#include "openvino/op/maximum.hpp" +#include "openvino/op/minimum.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/not_equal.hpp" +#include "openvino/op/power.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/subtract.hpp" using namespace std; -using namespace ngraph; using namespace testing; // // Tests for binary elementwise ops. // void test_binary(std::string /* node_type */, - shared_ptr(f)(const shared_ptr& x, const shared_ptr& y)) { + shared_ptr(f)(const shared_ptr& x, const shared_ptr& y)) { // Check for bad arguments - auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); - auto tv0_4_2_param = make_shared(element::f32, Shape{4, 2}); + auto tv0_2_4_param_0 = make_shared(ov::element::f32, ov::Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(ov::element::f32, ov::Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(ov::element::i32, ov::Shape{2, 4}); + auto tv0_4_2_param = make_shared(ov::element::f32, ov::Shape{4, 2}); - auto test_binary_bad_arguments_view_shapes = [&](const shared_ptr& x, const shared_ptr& y) { + auto test_binary_bad_arguments_view_shapes = [&](const shared_ptr& x, const shared_ptr& y) { try { auto node = f(x, y); // Should have thrown, so fail if it didn't FAIL() << "Incompatible view arguments not detected."; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Argument shapes are inconsistent")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -36,12 +52,13 @@ void test_binary(std::string /* node_type */, }; test_binary_bad_arguments_view_shapes(tv0_2_4_param_0, tv0_4_2_param); - auto test_binary_bad_arguments_view_element_types = [&](const shared_ptr& x, const shared_ptr& y) { + auto test_binary_bad_arguments_view_element_types = [&](const shared_ptr& x, + const shared_ptr& y) { try { auto node = f(x, y); // Should have thrown, so fail if it didn't FAIL() << "Incompatible view arguments not detected."; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Arguments do not have the same element type")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -50,7 +67,7 @@ void test_binary(std::string /* node_type */, test_binary_bad_arguments_view_element_types(tv0_2_4_param_0, tv0_2_4_param_2); - auto test_binary_good_arguments = [&](const shared_ptr& x, const shared_ptr& y) { + auto test_binary_good_arguments = [&](const shared_ptr& x, const shared_ptr& y) { auto node = f(x, y); EXPECT_TRUE(node->has_same_type(node->input_values()[0].get_node_shared_ptr())); }; @@ -58,64 +75,64 @@ void test_binary(std::string /* node_type */, } TEST(type_prop, add_bad_arguments) { - test_binary("Add", [](const shared_ptr& x, const shared_ptr& y) -> shared_ptr { - return make_shared(x, y); + test_binary("Add", [](const shared_ptr& x, const shared_ptr& y) -> shared_ptr { + return make_shared(x, y); }); } namespace { template -void test_binary_eltwise_numpy(const element::Type& et, const op::AutoBroadcastSpec& autob) { - auto param1 = make_shared(et, Shape{1, 3, 6}); - auto param2 = make_shared(et, Shape{3, 1}); - auto param3 = make_shared(et, Shape{2, 3, 6}); - auto param4 = make_shared(et, Shape{6}); - auto param5 = make_shared(et, Shape{}); +void test_binary_eltwise_numpy(const ov::element::Type& et, const ov::op::AutoBroadcastSpec& autob) { + auto param1 = make_shared(et, ov::Shape{1, 3, 6}); + auto param2 = make_shared(et, ov::Shape{3, 1}); + auto param3 = make_shared(et, ov::Shape{2, 3, 6}); + auto param4 = make_shared(et, ov::Shape{6}); + auto param5 = make_shared(et, ov::Shape{}); - EXPECT_EQ(make_shared(param1, param2, autob)->get_shape(), (Shape{1, 3, 6})); - EXPECT_EQ(make_shared(param1, param3, autob)->get_shape(), (Shape{2, 3, 6})); - EXPECT_EQ(make_shared(param4, param3, autob)->get_shape(), (Shape{2, 3, 6})); - EXPECT_EQ(make_shared(param5, param3, autob)->get_shape(), (Shape{2, 3, 6})); - EXPECT_EQ(make_shared(param3, param5, autob)->get_shape(), (Shape{2, 3, 6})); + EXPECT_EQ(make_shared(param1, param2, autob)->get_shape(), (ov::Shape{1, 3, 6})); + EXPECT_EQ(make_shared(param1, param3, autob)->get_shape(), (ov::Shape{2, 3, 6})); + EXPECT_EQ(make_shared(param4, param3, autob)->get_shape(), (ov::Shape{2, 3, 6})); + EXPECT_EQ(make_shared(param5, param3, autob)->get_shape(), (ov::Shape{2, 3, 6})); + EXPECT_EQ(make_shared(param3, param5, autob)->get_shape(), (ov::Shape{2, 3, 6})); - auto pp1 = make_shared(et, PartialShape{1, Dimension::dynamic(), 6}); - auto pp2 = make_shared(et, PartialShape{3, 1}); - EXPECT_EQ(make_shared(pp1, pp2, autob)->get_shape(), (Shape{1, 3, 6})); + auto pp1 = make_shared(et, ov::PartialShape{1, ov::Dimension::dynamic(), 6}); + auto pp2 = make_shared(et, ov::PartialShape{3, 1}); + EXPECT_EQ(make_shared(pp1, pp2, autob)->get_shape(), (ov::Shape{1, 3, 6})); } template -void test_binary_eltwise_bad_argument_shape(const element::Type& et) { - auto input1 = make_shared(element::f32, Shape{2, 4}); - auto input2 = make_shared(element::f32, Shape{1, 2, 4}); +void test_binary_eltwise_bad_argument_shape(const ov::element::Type& et) { + auto input1 = make_shared(ov::element::f32, ov::Shape{2, 4}); + auto input2 = make_shared(ov::element::f32, ov::Shape{1, 2, 4}); - OV_EXPECT_THROW(auto bc = make_shared(input1, input2, op::AutoBroadcastType::NONE), - NodeValidationFailure, + OV_EXPECT_THROW(auto bc = make_shared(input1, input2, ov::op::AutoBroadcastType::NONE), + ov::NodeValidationFailure, HasSubstr("Argument shapes are inconsistent")); } template -shared_ptr createReshapeSubgraph(PartialShape param_shape, - shared_ptr constant_op, - bool const_rhs = true) { - auto param = make_shared(element::f32, param_shape); - auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); +shared_ptr createReshapeSubgraph(ov::PartialShape param_shape, + shared_ptr constant_op, + bool const_rhs = true) { + auto param = make_shared(ov::element::f32, param_shape); + auto shape_of = make_shared(param); + auto cast_fp = make_shared(shape_of, ov::element::f32); - Output op; + ov::Output op; if (const_rhs) op = make_shared(cast_fp, constant_op); else op = make_shared(constant_op, cast_fp); - auto cast_int = make_shared(op, element::i32); - return make_shared(param, cast_int, false); + auto cast_int = make_shared(op, ov::element::i32); + return make_shared(param, cast_int, false); } } // namespace TEST(type_prop, eltwise_auto_bcast) { - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(ov::element::f32, ov::op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(ov::element::f32, ov::op::AutoBroadcastType::NUMPY); } // --- Binary elementwise comparision ops tests - start @@ -129,9 +146,9 @@ protected: return std::make_shared(std::forward(args)...); } - std::shared_ptr make_op_with_types(element::Type et0, element::Type et1) { - const auto a = std::make_shared(et0, Shape{1, 2, 3}); - const auto b = std::make_shared(et1, Shape{1, 2, 3}); + std::shared_ptr make_op_with_types(ov::element::Type et0, ov::element::Type et1) { + const auto a = std::make_shared(et0, ov::Shape{1, 2, 3}); + const auto b = std::make_shared(et1, ov::Shape{1, 2, 3}); return make_op(a, b); } }; @@ -139,19 +156,19 @@ protected: TYPED_TEST_SUITE_P(BinaryElementwiseCmpTest); TYPED_TEST_P(BinaryElementwiseCmpTest, argument_shapes_are_inconsistent) { - test_binary_eltwise_bad_argument_shape(element::f64); + test_binary_eltwise_bad_argument_shape(ov::element::f64); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_static_partial_shape_no_broadcast) { - auto shape = PartialShape{2, 4, 5}; + auto shape = ov::PartialShape{2, 4, 5}; set_shape_labels(shape, 3); - const auto a = make_shared(element::f32, shape); - const auto b = make_shared(element::f32, PartialShape({2, 4, 5})); + const auto a = make_shared(ov::element::f32, shape); + const auto b = make_shared(ov::element::f32, ov::PartialShape({2, 4, 5})); - const auto op = this->make_op(a, b, op::AutoBroadcastType::NONE); + const auto op = this->make_op(a, b, ov::op::AutoBroadcastType::NONE); EXPECT_EQ(op->get_output_size(), 1); - EXPECT_EQ(op->get_element_type(), element::boolean); + EXPECT_EQ(op->get_element_type(), ov::element::boolean); EXPECT_EQ(op->get_output_partial_shape(0), shape); EXPECT_EQ(op->get_shape(), shape.get_shape()); EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(3, 4, 5)); @@ -160,163 +177,168 @@ TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_static_partial_shape_no_broadca } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_static_partial_shape_numpy_broadcast) { - test_binary_eltwise_numpy(element::f64, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(ov::element::f64, ov::op::AutoBroadcastType::NUMPY); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_static_partial_shape_pdpd_broadcast) { - auto a = make_shared(element::f64, PartialShape{1, 3, 6}); - auto b = make_shared(element::f64, PartialShape{1, 1, 1}); + auto a = make_shared(ov::element::f64, ov::PartialShape{1, 3, 6}); + auto b = make_shared(ov::element::f64, ov::PartialShape{1, 1, 1}); - const auto op = this->make_op(a, b, op::AutoBroadcastType::PDPD); + const auto op = this->make_op(a, b, ov::op::AutoBroadcastType::PDPD); EXPECT_EQ(op->get_output_size(), 1); - EXPECT_EQ(op->get_element_type(), element::boolean); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 3, 6})); - EXPECT_EQ(op->get_shape(), Shape({1, 3, 6})); + EXPECT_EQ(op->get_element_type(), ov::element::boolean); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape({1, 3, 6})); + EXPECT_EQ(op->get_shape(), ov::Shape({1, 3, 6})); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_dynamic_partial_shape_no_broadcast) { - const auto shape = PartialShape{2, {3, 4}, 8, {2, 5}, 10}; - auto a = make_shared(element::i64, PartialShape{2, {3, 5}, -1, {-1, 5}, {6, -1}}); - auto b = make_shared(element::i64, shape); + const auto shape = ov::PartialShape{2, {3, 4}, 8, {2, 5}, 10}; + auto a = make_shared(ov::element::i64, ov::PartialShape{2, {3, 5}, -1, {-1, 5}, {6, -1}}); + auto b = make_shared(ov::element::i64, shape); - auto op = this->make_op(a, b, op::AutoBroadcastType::NONE); + auto op = this->make_op(a, b, ov::op::AutoBroadcastType::NONE); EXPECT_EQ(op->get_output_size(), 1); - EXPECT_EQ(op->get_element_type(), element::boolean); + EXPECT_EQ(op->get_element_type(), ov::element::boolean); EXPECT_EQ(op->get_output_partial_shape(0), shape); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_dynamic_partial_shape_numpy_broadcast) { - auto a = make_shared(element::i64, PartialShape{2, {3, 5}, -1, {-1, 5}, {6, -1}}); - auto b = make_shared(element::i64, PartialShape{2, {3, 4}, 8}); + auto a = make_shared(ov::element::i64, ov::PartialShape{2, {3, 5}, -1, {-1, 5}, {6, -1}}); + auto b = make_shared(ov::element::i64, ov::PartialShape{2, {3, 4}, 8}); - auto op = this->make_op(a, b, op::AutoBroadcastType::NUMPY); + auto op = this->make_op(a, b, ov::op::AutoBroadcastType::NUMPY); EXPECT_EQ(op->get_output_size(), 1); - EXPECT_EQ(op->get_element_type(), element::boolean); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({2, {3, 5}, 2, {3, 4}, 8})); + EXPECT_EQ(op->get_element_type(), ov::element::boolean); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape({2, {3, 5}, 2, {3, 4}, 8})); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_dynamic_rank_shape_no_broadcast) { - const auto a = make_shared(element::i32, PartialShape::dynamic()); - const auto b = make_shared(element::i32, PartialShape::dynamic()); + const auto a = make_shared(ov::element::i32, ov::PartialShape::dynamic()); + const auto b = make_shared(ov::element::i32, ov::PartialShape::dynamic()); - const auto op = this->make_op(a, b, op::AutoBroadcastType::NONE); + const auto op = this->make_op(a, b, ov::op::AutoBroadcastType::NONE); EXPECT_EQ(op->get_output_size(), 1); - EXPECT_EQ(op->get_element_type(), element::boolean); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic()); + EXPECT_EQ(op->get_element_type(), ov::element::boolean); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_dynamic_rank_shape) { - const auto a = make_shared(element::i16, PartialShape::dynamic()); - const auto b = make_shared(element::i16, PartialShape::dynamic()); + const auto a = make_shared(ov::element::i16, ov::PartialShape::dynamic()); + const auto b = make_shared(ov::element::i16, ov::PartialShape::dynamic()); const auto op = this->make_op(a, b); EXPECT_EQ(op->get_output_size(), 1); - EXPECT_EQ(op->get_element_type(), element::boolean); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape::dynamic()); + EXPECT_EQ(op->get_element_type(), ov::element::boolean); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_one_input_is_dynamic_rank_shape) { - const auto a = make_shared(element::i8, PartialShape{3, 4, {1, 5}, -1}); - const auto b = make_shared(element::i8, PartialShape::dynamic()); + const auto a = make_shared(ov::element::i8, ov::PartialShape{3, 4, {1, 5}, -1}); + const auto b = make_shared(ov::element::i8, ov::PartialShape::dynamic()); - EXPECT_EQ(this->make_op(a, b)->get_output_partial_shape(0), PartialShape::dynamic()); - EXPECT_EQ(this->make_op(b, a)->get_output_partial_shape(0), PartialShape::dynamic()); + EXPECT_EQ(this->make_op(a, b)->get_output_partial_shape(0), ov::PartialShape::dynamic()); + EXPECT_EQ(this->make_op(b, a)->get_output_partial_shape(0), ov::PartialShape::dynamic()); } TYPED_TEST_P(BinaryElementwiseCmpTest, allowed_mixed_input_types) { // Done as multiple assertion test because gtest not allow combine type param and data param combined fixture. - ASSERT_EQ(this->make_op_with_types(element::boolean, element::boolean)->get_element_type(), element::boolean); - ASSERT_EQ(this->make_op_with_types(element::boolean, element::dynamic)->get_element_type(), element::boolean); - ASSERT_EQ(this->make_op_with_types(element::dynamic, element::i32)->get_element_type(), element::boolean); - ASSERT_EQ(this->make_op_with_types(element::dynamic, element::boolean)->get_element_type(), element::boolean); - ASSERT_EQ(this->make_op_with_types(element::dynamic, element::dynamic)->get_element_type(), element::boolean); + ASSERT_EQ(this->make_op_with_types(ov::element::boolean, ov::element::boolean)->get_element_type(), + ov::element::boolean); + ASSERT_EQ(this->make_op_with_types(ov::element::boolean, ov::element::dynamic)->get_element_type(), + ov::element::boolean); + ASSERT_EQ(this->make_op_with_types(ov::element::dynamic, ov::element::i32)->get_element_type(), + ov::element::boolean); + ASSERT_EQ(this->make_op_with_types(ov::element::dynamic, ov::element::boolean)->get_element_type(), + ov::element::boolean); + ASSERT_EQ(this->make_op_with_types(ov::element::dynamic, ov::element::dynamic)->get_element_type(), + ov::element::boolean); } TYPED_TEST_P(BinaryElementwiseCmpTest, not_allowed_mixed_input_types) { - ASSERT_ANY_THROW({ this->make_op_with_types(element::i32, element::boolean); }); - ASSERT_ANY_THROW({ this->make_op_with_types(element::boolean, element::i32); }); + ASSERT_ANY_THROW({ this->make_op_with_types(ov::element::i32, ov::element::boolean); }); + ASSERT_ANY_THROW({ this->make_op_with_types(ov::element::boolean, ov::element::i32); }); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_labels_from_one_input_only_no_broadcast) { - constexpr auto et = element::f64; + constexpr auto et = ov::element::f64; - auto labeled_shape = PartialShape{2, 4, 5}; + auto labeled_shape = ov::PartialShape{2, 4, 5}; set_shape_labels(labeled_shape, 3); const auto exp_labels = get_shape_labels(labeled_shape); - const auto a = make_shared(et, labeled_shape); - const auto b = make_shared(et, PartialShape({2, 4, 5})); + const auto a = make_shared(et, labeled_shape); + const auto b = make_shared(et, ov::PartialShape({2, 4, 5})); - EXPECT_EQ(get_shape_labels(this->make_op(a, b, op::AutoBroadcastType::NONE)->get_output_partial_shape(0)), + EXPECT_EQ(get_shape_labels(this->make_op(a, b, ov::op::AutoBroadcastType::NONE)->get_output_partial_shape(0)), exp_labels); - EXPECT_EQ(get_shape_labels(this->make_op(b, a, op::AutoBroadcastType::NONE)->get_output_partial_shape(0)), + EXPECT_EQ(get_shape_labels(this->make_op(b, a, ov::op::AutoBroadcastType::NONE)->get_output_partial_shape(0)), exp_labels); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_labels_from_both_inputs_no_broadcast) { - constexpr auto et = element::f64; + constexpr auto et = ov::element::f64; const auto labels_a = ov::TensorLabel{10, ov::no_label, 12, 13, 14, 15}; - auto shape_a = PartialShape{2, 4, 5, -1, {4, 5}, {-1, 6}}; + auto shape_a = ov::PartialShape{2, 4, 5, -1, {4, 5}, {-1, 6}}; set_shape_labels(shape_a, labels_a); - const auto a = make_shared(et, shape_a); + const auto a = make_shared(et, shape_a); const auto labels_b = ov::TensorLabel{20, 21, ov::no_label, 23, 24, 25}; - auto shape_b = PartialShape{2, 4, 5, 5, -1, {4, -1}}; + auto shape_b = ov::PartialShape{2, 4, 5, 5, -1, {4, -1}}; set_shape_labels(shape_b, labels_b); - const auto b = make_shared(et, shape_b); + const auto b = make_shared(et, shape_b); - EXPECT_THAT(this->make_op(a, b, op::AutoBroadcastType::NONE)->get_output_partial_shape(0), - AllOf(Eq(PartialShape({2, 4, 5, 5, {4, 5}, {4, 6}})), + EXPECT_THAT(this->make_op(a, b, ov::op::AutoBroadcastType::NONE)->get_output_partial_shape(0), + AllOf(Eq(ov::PartialShape({2, 4, 5, 5, {4, 5}, {4, 6}})), ResultOf(get_shape_labels, ElementsAre(20, 21, 12, 23, 24, 25)))); - EXPECT_THAT(this->make_op(b, a, op::AutoBroadcastType::NONE)->get_output_partial_shape(0), - AllOf(Eq(PartialShape({2, 4, 5, 5, {4, 5}, {4, 6}})), + EXPECT_THAT(this->make_op(b, a, ov::op::AutoBroadcastType::NONE)->get_output_partial_shape(0), + AllOf(Eq(ov::PartialShape({2, 4, 5, 5, {4, 5}, {4, 6}})), ResultOf(get_shape_labels, ElementsAre(10, 21, 12, 13, 14, 15)))); } TYPED_TEST_P(BinaryElementwiseCmpTest, propagate_labels_from_both_inputs_numpy_broadcast) { - constexpr auto et = element::f64; + constexpr auto et = ov::element::f64; const auto labels_a = ov::TensorLabel{10, ov::no_label, 12, 13, ov::no_label, 15}; - auto shape_a = PartialShape{2, {2, 4}, -1, {4, 5}, {-1, 6}, 1}; + auto shape_a = ov::PartialShape{2, {2, 4}, -1, {4, 5}, {-1, 6}, 1}; set_shape_labels(shape_a, labels_a); - const auto a = make_shared(et, shape_a); + const auto a = make_shared(et, shape_a); const auto labels_b = ov::TensorLabel{20, 21, ov::no_label, 23}; - auto shape_b = PartialShape{2, {4, -1}, 5, {4, -1}}; + auto shape_b = ov::PartialShape{2, {4, -1}, 5, {4, -1}}; set_shape_labels(shape_b, labels_b); - const auto b = make_shared(et, shape_b); + const auto b = make_shared(et, shape_b); - EXPECT_THAT(this->make_op(a, b, op::AutoBroadcastType::NUMPY)->get_output_partial_shape(0), - AllOf(Eq(PartialShape({2, {2, 4}, 2, {4, 5}, 5, {4, -1}})), + EXPECT_THAT(this->make_op(a, b, ov::op::AutoBroadcastType::NUMPY)->get_output_partial_shape(0), + AllOf(Eq(ov::PartialShape({2, {2, 4}, 2, {4, 5}, 5, {4, -1}})), ResultOf(get_shape_labels, ElementsAre(10, ov::no_label, 20, 21, ov::no_label, 23)))); - EXPECT_THAT(this->make_op(b, a, op::AutoBroadcastType::NUMPY)->get_output_partial_shape(0), - AllOf(Eq(PartialShape({2, {2, 4}, 2, {4, 5}, 5, {4, -1}})), + EXPECT_THAT(this->make_op(b, a, ov::op::AutoBroadcastType::NUMPY)->get_output_partial_shape(0), + AllOf(Eq(ov::PartialShape({2, {2, 4}, 2, {4, 5}, 5, {4, -1}})), ResultOf(get_shape_labels, ElementsAre(10, ov::no_label, 20, 13, ov::no_label, 23)))); } TYPED_TEST_P(BinaryElementwiseCmpTest, use_default_ctor) { - constexpr auto dtype = element::f32; + constexpr auto dtype = ov::element::f32; - const auto a = make_shared(dtype, PartialShape{2, 5, -1, {-1, 5}, {6, -1}}); - const auto b = make_shared(dtype, PartialShape{2, 4, 8}); + const auto a = make_shared(dtype, ov::PartialShape{2, 5, -1, {-1, 5}, {6, -1}}); + const auto b = make_shared(dtype, ov::PartialShape{2, 4, 8}); const auto op = this->make_op(); - op->set_arguments(NodeVector{a, b}); - op->set_autob(op::AutoBroadcastType::NUMPY); + op->set_arguments(ov::NodeVector{a, b}); + op->set_autob(ov::op::AutoBroadcastType::NUMPY); op->validate_and_infer_types(); - EXPECT_EQ(op->get_autob(), op::AutoBroadcastType::NUMPY); - EXPECT_EQ(op->get_element_type(), element::boolean); + EXPECT_EQ(op->get_autob(), ov::op::AutoBroadcastType::NUMPY); + EXPECT_EQ(op->get_element_type(), ov::element::boolean); EXPECT_EQ(op->get_output_size(), 1); - EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({2, 5, 2, 4, 8})); + EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape({2, 5, 2, 4, 8})); } REGISTER_TYPED_TEST_SUITE_P(BinaryElementwiseCmpTest, @@ -336,79 +358,85 @@ REGISTER_TYPED_TEST_SUITE_P(BinaryElementwiseCmpTest, propagate_labels_from_both_inputs_numpy_broadcast, use_default_ctor); -using BinaryOpTypes = - Types; +using BinaryOpTypes = Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop, BinaryElementwiseCmpTest, BinaryOpTypes); } // namespace BEC TEST(type_prop, binary_arithmetic_bad_argument_element_types) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(ov::element::boolean, ov::Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(ov::element::boolean, ov::Shape{2, 4}); - OV_EXPECT_THROW(auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1), - NodeValidationFailure, + OV_EXPECT_THROW(auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1), + ov::NodeValidationFailure, HasSubstr("Arguments cannot have boolean element type")); } TEST(type_prop, binary_arithmetic_bad_argument_shape_with_none_autobroadcast_attribute) { - test_binary_eltwise_bad_argument_shape(element::f32); - test_binary_eltwise_bad_argument_shape(element::f32); + test_binary_eltwise_bad_argument_shape(ov::element::f32); + test_binary_eltwise_bad_argument_shape(ov::element::f32); } TEST(type_prop, binary_elementwise_arithmetic_both_dynamic) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, PartialShape::dynamic()); - auto add = make_shared(a, b); + auto a = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto b = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_static_dynamic_result_static) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto add = make_shared(a, b); + auto a = make_shared(ov::element::f32, ov::PartialShape{1, ov::Dimension::dynamic(), 3}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); + auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); - ASSERT_EQ(add->get_shape(), (Shape{1, 2, 3})); + ASSERT_EQ(add->get_shape(), (ov::Shape{1, 2, 3})); } TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_static_dynamic_result_rank_static_dynamic) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto add = make_shared(a, b); + auto a = + make_shared(ov::element::f32, + ov::PartialShape{1, ov::Dimension::dynamic(), ov::Dimension::dynamic()}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); + auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); ASSERT_TRUE(add->get_output_partial_shape(0).is_dynamic()); - ASSERT_TRUE(add->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, Dimension::dynamic()})); + ASSERT_TRUE(add->get_output_partial_shape(0).same_scheme(ov::PartialShape{1, 2, ov::Dimension::dynamic()})); } TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_static_dynamic) { - auto a = make_shared(element::f32, PartialShape{1, 2, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto add = make_shared(a, b); + auto a = make_shared(ov::element::f32, ov::PartialShape{1, 2, 3}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); + auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); - ASSERT_EQ(add->get_shape(), (Shape{1, 2, 3})); + ASSERT_EQ(add->get_shape(), (ov::Shape{1, 2, 3})); } TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_static) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, 3}); - auto add = make_shared(a, b); + auto a = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, 3}); + auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); - ASSERT_EQ(add->get_shape(), (Shape{1, 2, 3})); + ASSERT_EQ(add->get_shape(), (ov::Shape{1, 2, 3})); } TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 3, 3}); + auto a = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 3, 3}); try { - auto add = make_shared(a, b); + auto add = make_shared(a, b); FAIL() << "Inconsistent partial shapes not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Argument shapes are inconsistent"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -416,13 +444,13 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_inconsist } TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{1, 3, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(ov::element::f32, ov::PartialShape{1, 3, 3}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); try { - auto add = make_shared(a, b); + auto add = make_shared(a, b); FAIL() << "Inconsistent partial shapes not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Argument shapes are inconsistent"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -430,13 +458,13 @@ TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_inconsis } TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{Dimension::dynamic(), 3, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(ov::element::f32, ov::PartialShape{ov::Dimension::dynamic(), 3, 3}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); try { - auto add = make_shared(a, b); + auto add = make_shared(a, b); FAIL() << "Inconsistent partial shapes not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Argument shapes are inconsistent"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -444,13 +472,13 @@ TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_inconsist } TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, 3, 4}); + auto a = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, 3, 4}); try { - auto add = make_shared(a, b); + auto add = make_shared(a, b); FAIL() << "Inconsistent partial shapes not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Argument shapes are inconsistent"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -458,13 +486,13 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_different } TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, 2, 3, 4}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(ov::element::f32, ov::PartialShape{1, 2, 3, 4}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); try { - auto add = make_shared(a, b); + auto add = make_shared(a, b); FAIL() << "Inconsistent partial shapes not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Argument shapes are inconsistent"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -472,13 +500,13 @@ TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_differen } TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3, 4}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(ov::element::f32, ov::PartialShape{1, ov::Dimension::dynamic(), 3, 4}); + auto b = make_shared(ov::element::f32, ov::PartialShape{1, 2, ov::Dimension::dynamic()}); try { - auto add = make_shared(a, b); + auto add = make_shared(a, b); FAIL() << "Inconsistent partial shapes not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Argument shapes are inconsistent"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -486,34 +514,34 @@ TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_different } TEST(type_prop, binary_elementwise_arithmetic_both_et_dynamic) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); - auto b = make_shared(element::dynamic, Shape{1, 2, 3, 4}); - auto add = make_shared(a, b); + auto a = make_shared(ov::element::dynamic, ov::Shape{1, 2, 3, 4}); + auto b = make_shared(ov::element::dynamic, ov::Shape{1, 2, 3, 4}); + auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_element_type(0).is_dynamic()); } TEST(type_prop, binary_elementwise_arithmetic_left_et_dynamic) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); - auto b = make_shared(element::u32, Shape{1, 2, 3, 4}); - auto add = make_shared(a, b); + auto a = make_shared(ov::element::dynamic, ov::Shape{1, 2, 3, 4}); + auto b = make_shared(ov::element::u32, ov::Shape{1, 2, 3, 4}); + auto add = make_shared(a, b); - ASSERT_EQ(add->get_output_element_type(0), element::u32); + ASSERT_EQ(add->get_output_element_type(0), ov::element::u32); } TEST(type_prop, binary_elementwise_arithmetic_right_et_dynamic) { - auto a = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto b = make_shared(element::dynamic, Shape{1, 2, 3, 4}); - auto add = make_shared(a, b); + auto a = make_shared(ov::element::i64, ov::Shape{1, 2, 3, 4}); + auto b = make_shared(ov::element::dynamic, ov::Shape{1, 2, 3, 4}); + auto add = make_shared(a, b); - ASSERT_EQ(add->get_output_element_type(0), element::i64); + ASSERT_EQ(add->get_output_element_type(0), ov::element::i64); } TEST(type_prop, logic_arith_compare_partial_et) { - auto test_arith = [](element::Type et0, element::Type et1) -> std::shared_ptr { - auto param0 = std::make_shared(et0, Shape{1, 2, 3}); - auto param1 = std::make_shared(et1, Shape{1, 2, 3}); - return std::make_shared(param0, param1); + auto test_arith = [](ov::element::Type et0, ov::element::Type et1) -> std::shared_ptr { + auto param0 = std::make_shared(et0, ov::Shape{1, 2, 3}); + auto param1 = std::make_shared(et1, ov::Shape{1, 2, 3}); + return std::make_shared(param0, param1); }; // Arith ops: @@ -527,295 +555,355 @@ TEST(type_prop, logic_arith_compare_partial_et) { // dyn int -> int // dyn boo -> ! // dyn dyn -> dyn - ASSERT_EQ(test_arith(element::i32, element::i32)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::i32, element::boolean); }); - ASSERT_EQ(test_arith(element::i32, element::dynamic)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::i32); }); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::boolean); }); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::dynamic); }); - ASSERT_EQ(test_arith(element::dynamic, element::i32)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::dynamic, element::boolean); }); - ASSERT_EQ(test_arith(element::dynamic, element::dynamic)->get_element_type(), element::dynamic); + ASSERT_EQ(test_arith(ov::element::i32, ov::element::i32)->get_element_type(), ov::element::i32); + ASSERT_ANY_THROW({ test_arith(ov::element::i32, ov::element::boolean); }); + ASSERT_EQ(test_arith(ov::element::i32, ov::element::dynamic)->get_element_type(), ov::element::i32); + ASSERT_ANY_THROW({ test_arith(ov::element::boolean, ov::element::i32); }); + ASSERT_ANY_THROW({ test_arith(ov::element::boolean, ov::element::boolean); }); + ASSERT_ANY_THROW({ test_arith(ov::element::boolean, ov::element::dynamic); }); + ASSERT_EQ(test_arith(ov::element::dynamic, ov::element::i32)->get_element_type(), ov::element::i32); + ASSERT_ANY_THROW({ test_arith(ov::element::dynamic, ov::element::boolean); }); + ASSERT_EQ(test_arith(ov::element::dynamic, ov::element::dynamic)->get_element_type(), ov::element::dynamic); } TEST(type_prop, interval_value_propagation_add_rhs) { - PartialShape op_shape{Dimension(-1), Dimension(2, -1), Dimension(-1, 6), Dimension(7, 10), Dimension(7, 10), 5}; - const auto const_op = op::Constant::create(element::f32, {6}, {2, 3, 4, 5, -5, 6}); + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(2, -1), + ov::Dimension(-1, 6), + ov::Dimension(7, 10), + ov::Dimension(7, 10), + 5}; + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {2, 3, 4, 5, -5, 6}); // const rhs - const auto reshape = createReshapeSubgraph(op_shape, const_op); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto reshape = createReshapeSubgraph(op_shape, const_op); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(4, 10), Dimension(12, 15), Dimension(2, 5), 11})); + ov::PartialShape({-1, -1, ov::Dimension(4, 10), ov::Dimension(12, 15), ov::Dimension(2, 5), 11})); } TEST(type_prop, interval_value_propagation_add_lhs) { - PartialShape op_shape{Dimension(-1), Dimension(2, -1), Dimension(-1, 6), Dimension(7, 10), Dimension(7, 10), 5}; - const auto const_op = op::Constant::create(element::f32, {6}, {2, 3, 4, 5, -5, 6}); + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(2, -1), + ov::Dimension(-1, 6), + ov::Dimension(7, 10), + ov::Dimension(7, 10), + 5}; + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {2, 3, 4, 5, -5, 6}); // const lhs - const auto reshape = createReshapeSubgraph(op_shape, const_op, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto reshape = createReshapeSubgraph(op_shape, const_op, false); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(4, 10), Dimension(12, 15), Dimension(2, 5), 11})); + ov::PartialShape({-1, -1, ov::Dimension(4, 10), ov::Dimension(12, 15), ov::Dimension(2, 5), 11})); } TEST(type_prop, interval_value_propagation_add_incorrect_dim) { // const rhs - result lower than 0 - PartialShape op_shape{Dimension(5, 7)}; - const auto const_op = op::Constant::create(element::f32, {1}, {-10}); - OV_EXPECT_THROW(createReshapeSubgraph(op_shape, const_op), - NodeValidationFailure, + ov::PartialShape op_shape{ov::Dimension(5, 7)}; + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {1}, {-10}); + OV_EXPECT_THROW(createReshapeSubgraph(op_shape, const_op), + ov::NodeValidationFailure, HasSubstr("Dim size cannot be less than -1")); } TEST(type_prop, interval_value_propagation_sub_rhs) { - PartialShape op_shape{Dimension(-1), Dimension(24, -1), Dimension(4, 36), Dimension(13, 27), Dimension(13, 27), 15}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(24, -1), + ov::Dimension(4, 36), + ov::Dimension(13, 27), + ov::Dimension(13, 27), + 15}; // const rhs - const auto const_op = op::Constant::create(element::f32, {6}, {2, 3, 4, 5, -5, 6}); - const auto reshape = createReshapeSubgraph(op_shape, const_op); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {2, 3, 4, 5, -5, 6}); + const auto reshape = createReshapeSubgraph(op_shape, const_op); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(-1, 32), Dimension(8, 22), Dimension(18, 32), 9})); + ov::PartialShape({-1, -1, ov::Dimension(-1, 32), ov::Dimension(8, 22), ov::Dimension(18, 32), 9})); } TEST(type_prop, interval_value_propagation_sub_lhs) { - PartialShape op_shape{Dimension(-1), Dimension(24, -1), Dimension(4, 36), Dimension(13, 27), Dimension(13, 27), 15}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(24, -1), + ov::Dimension(4, 36), + ov::Dimension(13, 27), + ov::Dimension(13, 27), + 15}; // const lhs - const auto const_op = op::Constant::create(element::f32, {6}, {12, 28, 36, 43, 27, 25}); - const auto reshape = createReshapeSubgraph(op_shape, const_op, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {12, 28, 36, 43, 27, 25}); + const auto reshape = createReshapeSubgraph(op_shape, const_op, false); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(0, 32), Dimension(16, 30), Dimension(0, 14), 10})); + ov::PartialShape({-1, -1, ov::Dimension(0, 32), ov::Dimension(16, 30), ov::Dimension(0, 14), 10})); } TEST(type_prop, interval_value_propagation_sub_incorrect_dim) { // const lhs - result lower than 0 - PartialShape op_shape{Dimension(13, 27)}; - const auto const_op = op::Constant::create(element::f32, {1}, {5}); - OV_EXPECT_THROW(createReshapeSubgraph(op_shape, const_op, false), - NodeValidationFailure, + ov::PartialShape op_shape{ov::Dimension(13, 27)}; + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {1}, {5}); + OV_EXPECT_THROW(createReshapeSubgraph(op_shape, const_op, false), + ov::NodeValidationFailure, HasSubstr("Dim size cannot be less than -1")); } TEST(type_prop, interval_value_propagation_mul_rhs) { - PartialShape op_shape{Dimension(-1), Dimension(4, -1), Dimension(-1, 6), Dimension(5, 7), Dimension(9, 10), 15}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 6), + ov::Dimension(5, 7), + ov::Dimension(9, 10), + 15}; // const rhs - const auto const_op = op::Constant::create(element::f32, {6}, {7, 6, 5, 4, 3, 2}); - const auto reshape = createReshapeSubgraph(op_shape, const_op); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {7, 6, 5, 4, 3, 2}); + const auto reshape = createReshapeSubgraph(op_shape, const_op); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(-1, 30), Dimension(20, 28), Dimension(27, 30), 30})); + ov::PartialShape({-1, -1, ov::Dimension(-1, 30), ov::Dimension(20, 28), ov::Dimension(27, 30), 30})); } TEST(type_prop, interval_value_propagation_mul_lhs) { - PartialShape op_shape{Dimension(-1), Dimension(4, -1), Dimension(-1, 6), Dimension(5, 7), Dimension(9, 10), 15}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 6), + ov::Dimension(5, 7), + ov::Dimension(9, 10), + 15}; // const lhs - const auto const_op = op::Constant::create(element::f32, {6}, {7, 6, 5, 4, 3, 2}); - const auto reshape = createReshapeSubgraph(op_shape, const_op, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {7, 6, 5, 4, 3, 2}); + const auto reshape = createReshapeSubgraph(op_shape, const_op, false); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(-1, 30), Dimension(20, 28), Dimension(27, 30), 30})); + ov::PartialShape({-1, -1, ov::Dimension(-1, 30), ov::Dimension(20, 28), ov::Dimension(27, 30), 30})); } TEST(type_prop, interval_value_propagation_mul_incorrect_dim_rhs) { // const rhs - result lower than 0 - PartialShape op_shape{Dimension(5, 7)}; - const auto const_op = op::Constant::create(element::f32, {1}, {-3}); - OV_EXPECT_THROW(createReshapeSubgraph(op_shape, const_op), - NodeValidationFailure, + ov::PartialShape op_shape{ov::Dimension(5, 7)}; + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {1}, {-3}); + OV_EXPECT_THROW(createReshapeSubgraph(op_shape, const_op), + ov::NodeValidationFailure, HasSubstr("Dim size cannot be less than -1")); } TEST(type_prop, interval_value_propagation_mul_incorrect_dim_lhs) { // const lhs - result lower than 0 - PartialShape op_shape{Dimension(5, 7)}; - const auto const_op = op::Constant::create(element::f32, {1}, {-3}); - OV_EXPECT_THROW(createReshapeSubgraph(op_shape, const_op, false), - NodeValidationFailure, + ov::PartialShape op_shape{ov::Dimension(5, 7)}; + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {1}, {-3}); + OV_EXPECT_THROW(createReshapeSubgraph(op_shape, const_op, false), + ov::NodeValidationFailure, HasSubstr("Dim size cannot be less than -1")); } TEST(type_prop, interval_value_propagation_div_rhs) { // const rhs - PartialShape op_shape{Dimension(8, 16), Dimension(9, 30), 15}; - const auto const_op = op::Constant::create(element::f32, {3}, {4, 3, 5}); - const auto reshape = createReshapeSubgraph(op_shape, const_op); - PartialShape expected_shape{Dimension(2, 4), Dimension(3, 10), 3}; - EXPECT_EQ(reshape->get_element_type(), element::f32); + ov::PartialShape op_shape{ov::Dimension(8, 16), ov::Dimension(9, 30), 15}; + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {3}, {4, 3, 5}); + const auto reshape = createReshapeSubgraph(op_shape, const_op); + ov::PartialShape expected_shape{ov::Dimension(2, 4), ov::Dimension(3, 10), 3}; + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), expected_shape); } TEST(type_prop, interval_value_propagation_div_rhs_full) { - PartialShape op_shape{Dimension(-1), Dimension(4, -1), Dimension(-1, 6), Dimension(8, 16), Dimension(9, 30), 15}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 6), + ov::Dimension(8, 16), + ov::Dimension(9, 30), + 15}; // const rhs - const auto const_op = op::Constant::create(element::f32, {6}, {8, 2, 2, 4, 3, 5}); - const auto reshape = createReshapeSubgraph(op_shape, const_op); - PartialShape expected_shape{-1, -1, Dimension(-1, 3), Dimension(2, 4), Dimension(3, 10), 3}; - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {8, 2, 2, 4, 3, 5}); + const auto reshape = createReshapeSubgraph(op_shape, const_op); + ov::PartialShape expected_shape{-1, -1, ov::Dimension(-1, 3), ov::Dimension(2, 4), ov::Dimension(3, 10), 3}; + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), expected_shape); } TEST(type_prop, interval_value_propagation_div_lhs) { - PartialShape op_shape{Dimension(-1), Dimension(4, -1), Dimension(-1, 6), Dimension(8, 16), Dimension(9, 30), 15}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 6), + ov::Dimension(8, 16), + ov::Dimension(9, 30), + 15}; // const lhs - const auto const_op = op::Constant::create(element::f32, {6}, {8, 8, 12, 32, 90, 45}); - const auto reshape = createReshapeSubgraph(op_shape, const_op, false); - PartialShape expected_shape{-1, -1, Dimension(2, -1), Dimension(2, 4), Dimension(3, 10), 3}; - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {8, 8, 12, 32, 90, 45}); + const auto reshape = createReshapeSubgraph(op_shape, const_op, false); + ov::PartialShape expected_shape{-1, -1, ov::Dimension(2, -1), ov::Dimension(2, 4), ov::Dimension(3, 10), 3}; + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), expected_shape); } TEST(type_prop, interval_value_propagation_pow_rhs) { - PartialShape op_shape{Dimension(-1), Dimension(4, -1), Dimension(-1, 4), Dimension(2, 3), Dimension(3, 4), 2}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 4), + ov::Dimension(2, 3), + ov::Dimension(3, 4), + 2}; // const rhs - const auto const_op = op::Constant::create(element::f32, {6}, {2, 2, 2, 2, 2, 2}); - const auto reshape = createReshapeSubgraph(op_shape, const_op); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {2, 2, 2, 2, 2, 2}); + const auto reshape = createReshapeSubgraph(op_shape, const_op); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(-1, 16), Dimension(4, 9), Dimension(9, 16), 4})); + ov::PartialShape({-1, -1, ov::Dimension(-1, 16), ov::Dimension(4, 9), ov::Dimension(9, 16), 4})); } TEST(type_prop, interval_value_propagation_pow_lhs) { - PartialShape op_shape{Dimension(-1), Dimension(4, -1), Dimension(-1, 4), Dimension(2, 3), Dimension(3, 4), 2}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 4), + ov::Dimension(2, 3), + ov::Dimension(3, 4), + 2}; // const lhs - const auto const_op = op::Constant::create(element::f32, {6}, {2, 2, 2, 2, 2, 2}); - const auto reshape = createReshapeSubgraph(op_shape, const_op, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {6}, {2, 2, 2, 2, 2, 2}); + const auto reshape = createReshapeSubgraph(op_shape, const_op, false); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(1, 16), Dimension(4, 8), Dimension(8, 16), 4})); + ov::PartialShape({-1, -1, ov::Dimension(1, 16), ov::Dimension(4, 8), ov::Dimension(8, 16), 4})); } TEST(type_prop, interval_value_propagation_max_rhs) { - PartialShape op_shape{Dimension(-1), - Dimension(4, -1), - Dimension(-1, 4), - Dimension(-1, 4), - Dimension(3, 5), - Dimension(3, 5), - Dimension(3, 5), - 5, - 8}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 4), + ov::Dimension(-1, 4), + ov::Dimension(3, 5), + ov::Dimension(3, 5), + ov::Dimension(3, 5), + 5, + 8}; // const rhs - const auto const_op = op::Constant::create(element::f32, {9}, {2, 2, 2, 6, 2, 4, 7, 8, 5}); - const auto reshape = createReshapeSubgraph(op_shape, const_op); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {9}, {2, 2, 2, 6, 2, 4, 7, 8, 5}); + const auto reshape = createReshapeSubgraph(op_shape, const_op); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(2, 4), 6, Dimension(3, 5), Dimension(4, 5), 7, 8, 8})); + ov::PartialShape({-1, -1, ov::Dimension(2, 4), 6, ov::Dimension(3, 5), ov::Dimension(4, 5), 7, 8, 8})); } TEST(type_prop, interval_value_propagation_max_lhs) { - PartialShape op_shape{Dimension(-1), - Dimension(4, -1), - Dimension(-1, 4), - Dimension(-1, 4), - Dimension(3, 5), - Dimension(3, 5), - Dimension(3, 5), - 5, - 8}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 4), + ov::Dimension(-1, 4), + ov::Dimension(3, 5), + ov::Dimension(3, 5), + ov::Dimension(3, 5), + 5, + 8}; // const lhs - const auto const_op = op::Constant::create(element::f32, {9}, {2, 2, 2, 6, 2, 4, 7, 8, 5}); - const auto reshape = createReshapeSubgraph(op_shape, const_op, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {9}, {2, 2, 2, 6, 2, 4, 7, 8, 5}); + const auto reshape = createReshapeSubgraph(op_shape, const_op, false); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(2, 4), 6, Dimension(3, 5), Dimension(4, 5), 7, 8, 8})); + ov::PartialShape({-1, -1, ov::Dimension(2, 4), 6, ov::Dimension(3, 5), ov::Dimension(4, 5), 7, 8, 8})); } TEST(type_prop, interval_value_propagation_min_rhs) { - PartialShape op_shape{Dimension(-1), - Dimension(4, -1), - Dimension(-1, 4), - Dimension(-1, 4), - Dimension(3, 5), - Dimension(3, 5), - Dimension(3, 5), - 5, - 8}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 4), + ov::Dimension(-1, 4), + ov::Dimension(3, 5), + ov::Dimension(3, 5), + ov::Dimension(3, 5), + 5, + 8}; // const rhs - const auto const_op = op::Constant::create(element::f32, {9}, {2, 2, 2, 6, 2, 4, 7, 8, 5}); - const auto reshape = createReshapeSubgraph(op_shape, const_op); - EXPECT_EQ(reshape->get_element_type(), element::f32); - EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(-1, 2), Dimension(-1, 4), 2, Dimension(3, 4), Dimension(3, 5), 5, 5})); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {9}, {2, 2, 2, 6, 2, 4, 7, 8, 5}); + const auto reshape = createReshapeSubgraph(op_shape, const_op); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); + EXPECT_EQ( + reshape->get_output_partial_shape(0), + ov::PartialShape( + {-1, -1, ov::Dimension(-1, 2), ov::Dimension(-1, 4), 2, ov::Dimension(3, 4), ov::Dimension(3, 5), 5, 5})); } TEST(type_prop, interval_value_propagation_min_lhs) { - PartialShape op_shape{Dimension(-1), - Dimension(4, -1), - Dimension(-1, 4), - Dimension(-1, 4), - Dimension(3, 5), - Dimension(3, 5), - Dimension(3, 5), - 5, - 8}; + ov::PartialShape op_shape{ov::Dimension(-1), + ov::Dimension(4, -1), + ov::Dimension(-1, 4), + ov::Dimension(-1, 4), + ov::Dimension(3, 5), + ov::Dimension(3, 5), + ov::Dimension(3, 5), + 5, + 8}; // const lhs - const auto const_op = op::Constant::create(element::f32, {9}, {2, 2, 2, 6, 2, 4, 7, 8, 5}); - const auto reshape = createReshapeSubgraph(op_shape, const_op, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); - EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({-1, -1, Dimension(-1, 2), Dimension(-1, 4), 2, Dimension(3, 4), Dimension(3, 5), 5, 5})); + const auto const_op = ov::op::v0::Constant::create(ov::element::f32, {9}, {2, 2, 2, 6, 2, 4, 7, 8, 5}); + const auto reshape = createReshapeSubgraph(op_shape, const_op, false); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); + EXPECT_EQ( + reshape->get_output_partial_shape(0), + ov::PartialShape( + {-1, -1, ov::Dimension(-1, 2), ov::Dimension(-1, 4), 2, ov::Dimension(3, 4), ov::Dimension(3, 5), 5, 5})); } TEST(type_prop, interval_value_propagation_add_sub) { - // Dimensions with bounds - auto param = make_shared(element::f32, PartialShape{Dimension(2, 8), Dimension(4, 16), 2}); + // ov::Dimensions with bounds + auto param = make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension(2, 8), ov::Dimension(4, 16), 2}); - auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto add = - make_shared(cast_fp, op::Constant::create(element::f32, {3}, {2, 3, 4})); // {(4, 10), (7, 19), 6} - auto sub = - make_shared(add, op::Constant::create(element::f32, {3}, {3, 2, 1})); // {(1, 7), (5, 17), 5} - auto cast_int = make_shared(sub, element::i32); + auto shape_of = make_shared(param); + auto cast_fp = make_shared(shape_of, ov::element::f32); + auto add = make_shared( + cast_fp, + ov::op::v0::Constant::create(ov::element::f32, {3}, {2, 3, 4})); // {(4, 10), (7, 19), 6} + auto sub = make_shared( + add, + ov::op::v0::Constant::create(ov::element::f32, {3}, {3, 2, 1})); // {(1, 7), (5, 17), 5} + auto cast_int = make_shared(sub, ov::element::i32); - auto reshape = make_shared(param, cast_int, false); + auto reshape = make_shared(param, cast_int, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); - EXPECT_EQ(reshape->get_output_partial_shape(0), PartialShape({Dimension(1, 7), Dimension(5, 17), 5})); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); + EXPECT_EQ(reshape->get_output_partial_shape(0), ov::PartialShape({ov::Dimension(1, 7), ov::Dimension(5, 17), 5})); } TEST(type_prop, interval_value_propagation_add_sub_no_bounds) { // Fully dynamic dimension, no upper, no lower bound - auto param = - make_shared(element::f32, PartialShape{Dimension(-1), Dimension(4, -1), Dimension(-1, 2)}); + auto param = make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension(-1), ov::Dimension(4, -1), ov::Dimension(-1, 2)}); - auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto add = make_shared(cast_fp, op::Constant::create(element::f32, {3}, {2, 3, 4})); - auto sub = make_shared(add, op::Constant::create(element::f32, {3}, {3, 2, 1})); - auto cast_int = make_shared(sub, element::i32); + auto shape_of = make_shared(param); + auto cast_fp = make_shared(shape_of, ov::element::f32); + auto add = make_shared(cast_fp, ov::op::v0::Constant::create(ov::element::f32, {3}, {2, 3, 4})); + auto sub = make_shared(add, ov::op::v0::Constant::create(ov::element::f32, {3}, {3, 2, 1})); + auto cast_int = make_shared(sub, ov::element::i32); - auto reshape = make_shared(param, cast_int, false); + auto reshape = make_shared(param, cast_int, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({Dimension(-1), Dimension(-1), Dimension(3, 5)})); // Fully dynamic if no upper bound + ov::PartialShape( + {ov::Dimension(-1), ov::Dimension(-1), ov::Dimension(3, 5)})); // Fully dynamic if no upper bound } TEST(type_prop, interval_value_propagation_add_sub_div_mul) { - auto param = - make_shared(element::f32, PartialShape{Dimension(-1), Dimension(2, 8), Dimension(4, 10), 6}); + auto param = make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension(-1), ov::Dimension(2, 8), ov::Dimension(4, 10), 6}); - auto shape_of = make_shared(param); - auto cast_fp = make_shared(shape_of, element::f32); - auto add = make_shared( + auto shape_of = make_shared(param); + auto cast_fp = make_shared(shape_of, ov::element::f32); + auto add = make_shared( cast_fp, - op::Constant::create(element::f32, {4}, {2, 2, -1, 3})); // {(-1), (4, 10), (3, 9), (9)} - auto div = make_shared( + ov::op::v0::Constant::create(ov::element::f32, {4}, {2, 2, -1, 3})); // {(-1), (4, 10), (3, 9), (9)} + auto div = make_shared( add, - op::Constant::create(element::f32, {4}, {2, 2, -3, 3})); // {(-1), (2, 5), (-3, -1), (3)} - auto sub = make_shared( + ov::op::v0::Constant::create(ov::element::f32, {4}, {2, 2, -3, 3})); // {(-1), (2, 5), (-3, -1), (3)} + auto sub = make_shared( div, - op::Constant::create(element::f32, {4}, {2, 1, 2, -4})); // {(-1), (1, 4), (-5, -3), (7)} - auto mul = make_shared( + ov::op::v0::Constant::create(ov::element::f32, {4}, {2, 1, 2, -4})); // {(-1), (1, 4), (-5, -3), (7)} + auto mul = make_shared( sub, - op::Constant::create(element::f32, {4}, {2, 3, -4, 5})); // {(-1), (3, 12), (12, 20), (35)} - auto cast_int = make_shared(mul, element::i32); + ov::op::v0::Constant::create(ov::element::f32, {4}, {2, 3, -4, 5})); // {(-1), (3, 12), (12, 20), (35)} + auto cast_int = make_shared(mul, ov::element::i32); - auto reshape = make_shared(param, cast_int, false); + auto reshape = make_shared(param, cast_int, false); - EXPECT_EQ(reshape->get_element_type(), element::f32); + EXPECT_EQ(reshape->get_element_type(), ov::element::f32); EXPECT_EQ(reshape->get_output_partial_shape(0), - PartialShape({Dimension(-1), Dimension(3, 12), Dimension(12, 20), 35})); + ov::PartialShape({ov::Dimension(-1), ov::Dimension(3, 12), ov::Dimension(12, 20), 35})); } diff --git a/src/core/tests/type_prop/broadcast.cpp b/src/core/tests/type_prop/broadcast.cpp index 46bad611c1b..023f8fa9505 100644 --- a/src/core/tests/type_prop/broadcast.cpp +++ b/src/core/tests/type_prop/broadcast.cpp @@ -2,18 +2,24 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/broadcast.hpp" + +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset6.hpp" #include "openvino/core/dimension_tracker.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/unsqueeze.hpp" #include "openvino/op/util/attr_types.hpp" -NGRAPH_SUPPRESS_DEPRECATED_START - using namespace std; -using namespace ngraph; using namespace testing; // Because v3::Broadcast is backward compatible to v1::Broadcast all v1::Broadcast tests should pass @@ -22,96 +28,96 @@ class BroadcastTests : public ::testing::Test {}; TYPED_TEST_SUITE_P(BroadcastTests); TYPED_TEST_P(BroadcastTests, broadcast_dynamic_value_propagation) { - Dimension marked = Dimension(3); + ov::Dimension marked = ov::Dimension(3); ov::DimensionTracker::set_label(marked, 10); - PartialShape target = PartialShape{1, 2, marked, 4}; + ov::PartialShape target = ov::PartialShape{1, 2, marked, 4}; - auto param = make_shared(element::f32, Shape{1, 1}); - auto param_1 = make_shared(element::f32, target); - auto shape = make_shared(param_1); + auto param = make_shared(ov::element::f32, ov::Shape{1, 1}); + auto param_1 = make_shared(ov::element::f32, target); + auto shape = make_shared(param_1); - auto indices = op::Constant::create(element::i32, {}, {2}); - auto axis = op::Constant::create(element::i32, {1}, {0}); - auto gather = make_shared(shape, indices, axis); - auto unsqueeze = make_shared(gather, axis); + auto indices = ov::op::v0::Constant::create(ov::element::i32, {}, {2}); + auto axis = ov::op::v0::Constant::create(ov::element::i32, {1}, {0}); + auto gather = make_shared(shape, indices, axis); + auto unsqueeze = make_shared(gather, axis); - auto five = op::Constant::create(element::i64, {1}, {5}); - auto target_shape = std::make_shared(OutputVector{unsqueeze, five}, 0); + auto five = ov::op::v0::Constant::create(ov::element::i64, {1}, {5}); + auto target_shape = std::make_shared(ov::OutputVector{unsqueeze, five}, 0); auto bc = make_shared(param, target_shape); - ASSERT_EQ(bc->get_element_type(), element::f32); - ASSERT_EQ(bc->get_shape(), (Shape{3, 5})); + ASSERT_EQ(bc->get_element_type(), ov::element::f32); + ASSERT_EQ(bc->get_shape(), (ov::Shape{3, 5})); ASSERT_EQ(ov::DimensionTracker::get_label(bc->get_output_partial_shape(0)[0]), 10); } TYPED_TEST_P(BroadcastTests, broadcast_numpy) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(ov::element::f32, ov::Shape{3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {2, 3, 6}); auto bc = make_shared(param, target_shape); - ASSERT_EQ(bc->get_element_type(), element::f32); - ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); + ASSERT_EQ(bc->get_element_type(), ov::element::f32); + ASSERT_EQ(bc->get_shape(), (ov::Shape{2, 3, 6})); } TYPED_TEST_P(BroadcastTests, broadcast_axes_mapping) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 2}); + auto param = make_shared(ov::element::f32, ov::Shape{3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {2, 3, 1}); + auto axes_mapping = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {1, 2}); auto bc = make_shared(param, target_shape, axes_mapping); - ASSERT_EQ(bc->get_element_type(), element::f32); - ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 1})); + ASSERT_EQ(bc->get_element_type(), ov::element::f32); + ASSERT_EQ(bc->get_shape(), (ov::Shape{2, 3, 1})); } TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_constants) { - auto param = make_shared(element::f32, Shape{16}); - auto target_shape_constant_1 = op::Constant::create(element::i64, Shape{1}, {1}); - auto target_shape_constant_2 = op::Constant::create(element::i64, Shape{1}, {16}); - auto target_shape_constant_3 = op::Constant::create(element::i64, Shape{1}, {50}); - auto target_shape_constant_4 = op::Constant::create(element::i64, Shape{1}, {50}); + auto param = make_shared(ov::element::f32, ov::Shape{16}); + auto target_shape_constant_1 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); + auto target_shape_constant_2 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {16}); + auto target_shape_constant_3 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {50}); + auto target_shape_constant_4 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {50}); std::int64_t axis = 0; - std::vector> args{target_shape_constant_1, - target_shape_constant_2, - target_shape_constant_3, - target_shape_constant_4}; - auto target_shape = make_shared(args, axis); - auto axes_mapping = op::Constant::create(element::i64, Shape{1}, {1}); + std::vector> args{target_shape_constant_1, + target_shape_constant_2, + target_shape_constant_3, + target_shape_constant_4}; + auto target_shape = make_shared(args, axis); + auto axes_mapping = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); auto bc = make_shared(param, target_shape, axes_mapping, "NONE"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); - ASSERT_EQ(bc->get_output_partial_shape(0).rank(), (Rank{4})); + ASSERT_EQ(bc->get_output_partial_shape(0).rank(), (ov::Rank{4})); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); - ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, 16, 50, 50})); + ASSERT_EQ(bc->get_output_partial_shape(0), (ov::PartialShape{1, 16, 50, 50})); } TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_node) { - auto param = make_shared(element::f32, Shape{16}); - auto target_shape_constant_1 = make_shared(element::i64, Shape{1}); - auto target_shape_constant_2 = op::Constant::create(element::i64, Shape{1}, {16}); - auto target_shape_constant_3 = op::Constant::create(element::i64, Shape{1}, {50}); - auto target_shape_constant_4 = op::Constant::create(element::i64, Shape{1}, {50}); + auto param = make_shared(ov::element::f32, ov::Shape{16}); + auto target_shape_constant_1 = make_shared(ov::element::i64, ov::Shape{1}); + auto target_shape_constant_2 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {16}); + auto target_shape_constant_3 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {50}); + auto target_shape_constant_4 = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {50}); std::int64_t axis = 0; - std::vector> args{target_shape_constant_1, - target_shape_constant_2, - target_shape_constant_3, - target_shape_constant_4}; - auto target_shape = make_shared(args, axis); - auto axes_mapping = op::Constant::create(element::i64, Shape{1}, {1}); + std::vector> args{target_shape_constant_1, + target_shape_constant_2, + target_shape_constant_3, + target_shape_constant_4}; + auto target_shape = make_shared(args, axis); + auto axes_mapping = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); auto bc = make_shared(param, target_shape, axes_mapping, "NONE"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); - ASSERT_EQ(bc->get_output_partial_shape(0).rank(), (Rank{4})); + ASSERT_EQ(bc->get_output_partial_shape(0).rank(), (ov::Rank{4})); ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); - ASSERT_EQ(bc->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 16, 50, 50})); + ASSERT_EQ(bc->get_output_partial_shape(0), ov::PartialShape({ov::Dimension::dynamic(), 16, 50, 50})); } TYPED_TEST_P(BroadcastTests, broadcast_fail_rank) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{3}, {1, 2, 3}); + auto param = make_shared(ov::element::f32, ov::Shape{3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {2, 3, 1}); + auto axes_mapping = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {1, 2, 3}); try { auto bc = make_shared(param, target_shape, axes_mapping); FAIL() << "Broadcast: target shape mismatch with input rank not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Broadcast axes_mapping shape [3] doesn't match rank of input tensor 2"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -119,14 +125,14 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_rank) { } TYPED_TEST_P(BroadcastTests, broadcast_fail_transpose) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 1, 3}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {2, 1}); + auto param = make_shared(ov::element::f32, ov::Shape{3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {2, 1, 3}); + auto axes_mapping = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {2, 1}); try { auto bc = make_shared(param, target_shape, axes_mapping); FAIL() << "Broadcast: transpose prohibition not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Broadcast doesn't permit transposes. axes_mapping AxisVector{2, 1} " "not in sorted order"); @@ -136,14 +142,14 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_transpose) { } TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 3}); + auto param = make_shared(ov::element::f32, ov::Shape{3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {2, 3, 1}); + auto axes_mapping = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {1, 3}); try { auto bc = make_shared(param, target_shape, axes_mapping); FAIL() << "Broadcast: wrong axes_map not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Broadcast axes_mapping[1]: 3 exceeds target rank 3"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -151,14 +157,14 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map) { } TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map_shape) { - auto param = make_shared(element::f32, Shape{3, 2}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 3}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 2}); + auto param = make_shared(ov::element::f32, ov::Shape{3, 2}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {2, 3, 3}); + auto axes_mapping = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, {1, 2}); try { auto bc = make_shared(param, target_shape, axes_mapping); FAIL() << "Broadcast: wrong target shape not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Broadcast target[axes_mapping[1]] Expected 2. Got 3"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -166,14 +172,14 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map_shape) { } TYPED_TEST_P(BroadcastTests, broadcast_axes_wrong_rank) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, Shape{1}); - auto bc_axes = make_shared(element::i64, Shape{2, 2}); + auto arg = make_shared(ov::element::f32, ov::Shape{2, 4}); + auto bc_shape = make_shared(ov::element::i64, ov::Shape{1}); + auto bc_axes = make_shared(ov::element::i64, ov::Shape{2, 2}); try { auto bc = make_shared(arg, bc_shape, bc_axes); FAIL() << "Broadcast: axes shape rank not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Broadcast axes rank must be 1"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -181,13 +187,13 @@ TYPED_TEST_P(BroadcastTests, broadcast_axes_wrong_rank) { } TYPED_TEST_P(BroadcastTests, broadcast_target_shape_wrong_rank) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, Shape{}); + auto arg = make_shared(ov::element::f32, ov::Shape{2, 4}); + auto bc_shape = make_shared(ov::element::i64, ov::Shape{}); try { auto bc = make_shared(arg, bc_shape); FAIL() << "Broadcast: axes target shape rank not detected"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Broadcast shape rank must be 1, but has"); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -195,40 +201,40 @@ TYPED_TEST_P(BroadcastTests, broadcast_target_shape_wrong_rank) { } TYPED_TEST_P(BroadcastTests, broadcast_fully_dynamic_target_shape) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, PartialShape::dynamic()); - auto bc_axes = make_shared(element::i64, Shape{2}); + auto arg = make_shared(ov::element::f32, ov::Shape{2, 4}); + auto bc_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); + auto bc_axes = make_shared(ov::element::i64, ov::Shape{2}); auto bc = make_shared(arg, bc_shape, bc_axes); ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); - bc_shape = make_shared(element::i64, Shape{1}); + bc_shape = make_shared(ov::element::i64, ov::Shape{1}); bc = make_shared(arg, bc_shape, bc_axes); ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_dynamic_values_of_target_shape) { - const auto data = make_shared(element::f32, Shape{2}); - const auto target = make_shared(element::i32, PartialShape::dynamic(4)); - const auto target_shape = std::make_shared(target); - const auto axes_mapping = op::Constant::create(element::i64, Shape{1}, {1}); + const auto data = make_shared(ov::element::f32, ov::Shape{2}); + const auto target = make_shared(ov::element::i32, ov::PartialShape::dynamic(4)); + const auto target_shape = std::make_shared(target); + const auto axes_mapping = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); auto bc = make_shared(data, target_shape, axes_mapping); ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); - ASSERT_EQ(bc->get_output_partial_shape(0), PartialShape::dynamic(4)); + ASSERT_EQ(bc->get_output_partial_shape(0), ov::PartialShape::dynamic(4)); } TYPED_TEST_P(BroadcastTests, broadcast_broadcast_shape_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4}); + auto arg = make_shared(ov::element::f32, ov::Shape{2, 4}); // wrong element type - auto bc_shape = make_shared(element::boolean, Shape{1}); - auto bc_axes = make_shared(element::i64, Shape{2}); + auto bc_shape = make_shared(ov::element::boolean, ov::Shape{1}); + auto bc_axes = make_shared(ov::element::i64, ov::Shape{2}); try { auto bc = make_shared(arg, bc_shape, bc_axes); FAIL() << "Broadcast: did not detect shape element type not integral number"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Broadcast shape must be an integral number")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -236,15 +242,15 @@ TYPED_TEST_P(BroadcastTests, broadcast_broadcast_shape_et_wrong) { } TYPED_TEST_P(BroadcastTests, broadcast_axes_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, Shape{1}); + auto arg = make_shared(ov::element::f32, ov::Shape{2, 4}); + auto bc_shape = make_shared(ov::element::i64, ov::Shape{1}); // wrong element type - auto bc_axes = make_shared(element::f32, Shape{2}); + auto bc_axes = make_shared(ov::element::f32, ov::Shape{2}); try { auto bc = make_shared(arg, bc_shape, bc_axes); FAIL() << "Broadcast: did not detect axes element type not integral numbers"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Broadcast axes must be integral numbers, but are:")); } catch (...) { FAIL() << "Deduced type check failed for unexpected reason"; @@ -254,99 +260,106 @@ TYPED_TEST_P(BroadcastTests, broadcast_axes_et_wrong) { // EXPLICIT MODE TYPED_TEST_P(BroadcastTests, broadcast_explicit_all_inputs_dynamic) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); + const auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping - const auto axes_mapping_const = op::Constant::create(element::i64, Shape{3}, vector{0, 1, 2}); + const auto axes_mapping_const = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, vector{0, 1, 2}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_target_shape_static_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); + const auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping - const auto axes_mapping_const = op::Constant::create(element::i64, Shape{3}, vector{0, 1, 2}); + const auto axes_mapping_const = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, vector{0, 1, 2}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = op::Constant::create(element::i64, Shape{3}, vector{1, 2, 3}); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, vector{1, 2, 3}); + const auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); - ASSERT_EQ(bc->get_shape(), (Shape{1, 2, 3})); + ASSERT_EQ(bc->get_shape(), (ov::Shape{1, 2, 3})); // const axes mapping - const auto axes_mapping_const = op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + const auto axes_mapping_const = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); - ASSERT_EQ(bc->get_shape(), (Shape{1, 2, 3})); + ASSERT_EQ(bc->get_shape(), (ov::Shape{1, 2, 3})); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_input_rank_static) { - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic(3)); + const auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); + const auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping - const auto axes_mapping_const = op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + const auto axes_mapping_const = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_target_shape_and_input_data_rank_static) { // static rank data - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic(3)); + const auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); + auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping - const auto axes_mapping_const = op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + const auto axes_mapping_const = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape_static_rank_input) { - const auto target_shape = op::Constant::create(element::i64, Shape{4}, vector{1, 1, 5, 10}); + const auto target_shape = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, vector{1, 1, 5, 10}); // static rank data - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic(3)); + auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); - ASSERT_EQ(bc->get_shape(), (Shape{1, 1, 5, 10})); + ASSERT_EQ(bc->get_shape(), (ov::Shape{1, 1, 5, 10})); // const axes mapping - const auto axes_mapping_const = op::Constant::create(element::i64, Shape{4}, vector{0, 2, 1, 3}); + const auto axes_mapping_const = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, vector{0, 2, 1, 3}); try { auto bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); FAIL() << "Broadcast: Broadcast axes_mapping shape doesn't match rank of input tensor"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Broadcast axes_mapping shape [4] doesn't match rank of input tensor 3")); } catch (...) { @@ -355,54 +368,55 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape_static_rank_i } TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape) { - const auto data = make_shared(element::f32, PartialShape{1, 2, 3, 4}); + const auto data = make_shared(ov::element::f32, ov::PartialShape{1, 2, 3, 4}); // dynamic target shape and axes mapping - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); + auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping - const auto axes_mapping_const = op::Constant::create(element::i64, Shape{4}, vector{0, 2, 1, 3}); + const auto axes_mapping_const = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, vector{0, 2, 1, 3}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape and const axes mapping - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape_const_target_shape) { - const auto data = make_shared(element::f32, PartialShape{4}); - auto target_shape = op::Constant::create(element::i64, Shape{4}, vector{1, 4, 2, 3}); + const auto data = make_shared(ov::element::f32, ov::PartialShape{4}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, vector{1, 4, 2, 3}); // dynamic axes mapping - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); - ASSERT_EQ(bc->get_shape(), (Shape{1, 4, 2, 3})); + ASSERT_EQ(bc->get_shape(), (ov::Shape{1, 4, 2, 3})); // const axes mapping - const auto axes_mapping_const = op::Constant::create(element::i64, Shape{1}, vector{1}); + const auto axes_mapping_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, vector{1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); - ASSERT_EQ(bc->get_shape(), (Shape{1, 4, 2, 3})); + ASSERT_EQ(bc->get_shape(), (ov::Shape{1, 4, 2, 3})); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) { // dynamic input - auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape{4}); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto target_shape = make_shared(ov::element::i64, ov::PartialShape{4}); + const auto axes_mapping = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -410,7 +424,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) { ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); // static rank input - data = make_shared(element::f32, PartialShape::dynamic(2)); + data = make_shared(ov::element::f32, ov::PartialShape::dynamic(2)); bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); @@ -420,30 +434,30 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) { // NUMPY MODE TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_shape_dynamic) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic()); // dynamic output shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_constant) { // dynamic data - auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = op::Constant::create(element::i64, Shape{3}, vector{1, 2, 3}); + auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, vector{1, 2, 3}); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); // static rank data - data = make_shared(element::f32, PartialShape::dynamic(2)); + data = make_shared(ov::element::f32, ov::PartialShape::dynamic(2)); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); @@ -451,70 +465,74 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_constant) { TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_dynamic) { // static rank data - auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic(3)); + const auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static shape data - data = make_shared(element::f32, PartialShape{3, 4, 5, 6}); + data = make_shared(ov::element::f32, ov::PartialShape{3, 4, 5, 6}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_target_shape_static_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + const auto data = make_shared(ov::element::f32, ov::PartialShape::dynamic(3)); + const auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); const auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_static_shape) { - const auto data = make_shared(element::f32, PartialShape{1, 2, 3}); + const auto data = make_shared(ov::element::f32, ov::PartialShape{1, 2, 3}); // static rank target_shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // constant target_shape - const auto target_shape_const = op::Constant::create(element::i64, Shape{3}, vector{3, 2, 3}); + const auto target_shape_const = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, vector{3, 2, 3}); bc = make_shared(data, target_shape_const, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); - ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{3, 2, 3})); + ASSERT_EQ(bc->get_output_partial_shape(0), (ov::PartialShape{3, 2, 3})); } TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_partially_dynamic) { - const Shape expected_target_shape{1, 2, 3, 4}; + const ov::Shape expected_target_shape{1, 2, 3, 4}; const auto target_shape = - op::Constant::create(element::i64, - {expected_target_shape.size()}, - std::vector(expected_target_shape.begin(), expected_target_shape.end())); + ov::op::v0::Constant::create(ov::element::i64, + {expected_target_shape.size()}, + std::vector(expected_target_shape.begin(), expected_target_shape.end())); - auto data = make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + auto data = make_shared(ov::element::f32, ov::PartialShape{2, 3, ov::Dimension::dynamic()}); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); - data = make_shared(element::f32, PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}); + data = make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension::dynamic(), 3, ov::Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); - data = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + data = make_shared(ov::element::f32, + ov::PartialShape{2, ov::Dimension::dynamic(), ov::Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); - data = make_shared(element::f32, - PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); + data = make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); @@ -522,12 +540,13 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_partially_dynamic) { } TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) { - const auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 2, 3, 4}); + const auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, {1, 2, 3, 4}); - auto data = make_shared(element::f32, PartialShape{Dimension::dynamic(), 999, 3, 4}); + auto data = + make_shared(ov::element::f32, ov::PartialShape{ov::Dimension::dynamic(), 999, 3, 4}); try { auto bc = make_shared(data, target_shape, "NUMPY"); - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Input shape dimension equal 999 cannot be broadcasted (numpy mode) " "to 2. Allowed input dimension value would be 1 or 2"); @@ -535,12 +554,12 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) { FAIL() << "Deduced type check failed for unexpected reason"; } - data = - make_shared(element::f32, - PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 888}); + data = make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic(), 888}); try { auto bc = make_shared(data, target_shape, "NUMPY"); - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Input shape dimension equal 888 cannot be broadcasted (numpy mode) " "to 4. Allowed input dimension value would be 1 or 4"); @@ -548,12 +567,12 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) { FAIL() << "Deduced type check failed for unexpected reason"; } - data = - make_shared(element::f32, - PartialShape{5, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); + data = make_shared( + ov::element::f32, + ov::PartialShape{5, ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}); try { auto bc = make_shared(data, target_shape, "NUMPY"); - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), "Input shape dimension equal 5 cannot be broadcasted (numpy mode) to " "1. Allowed input dimension value would be 1"); @@ -595,50 +614,53 @@ REGISTER_TYPED_TEST_SUITE_P(BroadcastTests, broadcast_numpy_static_dims_incorrect, broadcast_dynamic_value_propagation); -typedef ::testing::Types BroadcastTypes; +typedef ::testing::Types BroadcastTypes; // the last empty argument resolves compiler warning on MAC: // `must specify at least one argument for '...'` (variadic macro) INSTANTIATE_TYPED_TEST_SUITE_P(type_prop, BroadcastTests, BroadcastTypes, ); // changing AutoBroadcastSpec to BroadcastModeSpec forces runing pdpd tests separately TEST(type_prop, broadcast_v1_pdpd) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(ov::element::f32, ov::Shape{3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {2, 3, 6}); - auto bc = - make_shared(param, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); - ASSERT_EQ(bc->get_element_type(), element::f32); - ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); + auto bc = make_shared(param, + target_shape, + ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, 1)); + ASSERT_EQ(bc->get_element_type(), ov::element::f32); + ASSERT_EQ(bc->get_shape(), (ov::Shape{2, 3, 6})); } TEST(type_prop, broadcast_v3_pdpd) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(ov::element::f32, ov::Shape{3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {2, 3, 6}); - auto bc = make_shared(param, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); - ASSERT_EQ(bc->get_element_type(), element::f32); - ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); + auto bc = make_shared(param, + target_shape, + ov::op::BroadcastModeSpec(ov::op::BroadcastType::PDPD, 1)); + ASSERT_EQ(bc->get_element_type(), ov::element::f32); + ASSERT_EQ(bc->get_shape(), (ov::Shape{2, 3, 6})); } TEST(type_prop, broadcast_v3_bidirectional_mode_string) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i32, Shape{2}); + const auto arg = make_shared(ov::element::f32, ov::Shape{1, 4, 1}); + const auto shape = make_shared(ov::element::i32, ov::Shape{2}); - const auto broadcast_v3 = make_shared(arg, shape, "BIDIRECTIONAL"); + const auto broadcast_v3 = make_shared(arg, shape, "BIDIRECTIONAL"); - ASSERT_EQ(broadcast_v3->get_broadcast_spec(), op::BroadcastType::BIDIRECTIONAL); + ASSERT_EQ(broadcast_v3->get_broadcast_spec(), ov::op::BroadcastType::BIDIRECTIONAL); } TEST(type_prop, broadcast_v3_shape_unexpected_axes_mapping_input) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i16, Shape{2}); - const auto axes_mapping = make_shared(element::f32, Shape{3}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{1, 4, 1}); + const auto shape = make_shared(ov::element::i16, ov::Shape{2}); + const auto axes_mapping = make_shared(ov::element::f32, ov::Shape{3}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; try { - const auto broadcast_v3 = make_shared(arg, shape, axes_mapping, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, axes_mapping, broadcast_spec); FAIL() << "Unexpected axes mapping input exception not thrown"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("axes_mapping input should not be provided for mode other than explicit")); } catch (...) { @@ -647,14 +669,14 @@ TEST(type_prop, broadcast_v3_shape_unexpected_axes_mapping_input) { } TEST(type_prop, broadcast_v3_not_provided_axes_input_for_explicit_mode) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i16, Shape{2}); - const auto broadcast_spec = op::BroadcastType::EXPLICIT; + const auto arg = make_shared(ov::element::f32, ov::Shape{1, 4, 1}); + const auto shape = make_shared(ov::element::i16, ov::Shape{2}); + const auto broadcast_spec = ov::op::BroadcastType::EXPLICIT; try { - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); FAIL() << "axes_mapping input should be provided if explicit mode is used"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("axes_mapping input should be provided if explicit mode is used")); } catch (...) { @@ -663,98 +685,98 @@ TEST(type_prop, broadcast_v3_not_provided_axes_input_for_explicit_mode) { } TEST(type_prop, broadcast_v3_shape) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {1, 4}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{1, 4, 1}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {2}, {1, 4}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); - ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 4, 4})); - ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{2}))); + ASSERT_EQ(broadcast_v3->get_element_type(), ov::element::f32); + ASSERT_EQ(broadcast_v3->get_shape(), (ov::Shape{1, 4, 4})); + ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, ov::AxisSet{2}))); } TEST(type_prop, broadcast_v3_shape_2) { - const auto arg = make_shared(element::f32, Shape{3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {2, 1, 6}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{3, 1}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {3}, {2, 1, 6}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); - ASSERT_EQ(broadcast_v3->get_shape(), (Shape{2, 3, 6})); - ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); + ASSERT_EQ(broadcast_v3->get_element_type(), ov::element::f32); + ASSERT_EQ(broadcast_v3->get_shape(), (ov::Shape{2, 3, 6})); + ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, ov::AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_shape_3) { - const auto arg = make_shared(element::f32, Shape{2, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {2, 4}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{2, 1}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {2}, {2, 4}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); - ASSERT_EQ(broadcast_v3->get_shape(), (Shape{2, 4})); - ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{1}))); + ASSERT_EQ(broadcast_v3->get_element_type(), ov::element::f32); + ASSERT_EQ(broadcast_v3->get_shape(), (ov::Shape{2, 4})); + ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, ov::AxisSet{1}))); } TEST(type_prop, broadcast_v3_shape_4) { - const auto arg = make_shared(element::f32, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {3, 1}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{1, 3, 1}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {2}, {3, 1}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); - ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 3, 1})); - ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{}))); + ASSERT_EQ(broadcast_v3->get_element_type(), ov::element::f32); + ASSERT_EQ(broadcast_v3->get_shape(), (ov::Shape{1, 3, 1})); + ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, ov::AxisSet{}))); } TEST(type_prop, broadcast_v3_shape_5) { - const auto arg = make_shared(element::f32, Shape{16, 1, 1}); - const auto shape = op::Constant::create(element::i64, {4}, {1, 1, 50, 50}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{16, 1, 1}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {4}, {1, 1, 50, 50}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); - ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 16, 50, 50})); - ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2, 3}))); + ASSERT_EQ(broadcast_v3->get_element_type(), ov::element::f32); + ASSERT_EQ(broadcast_v3->get_shape(), (ov::Shape{1, 16, 50, 50})); + ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, ov::AxisSet{0, 2, 3}))); } TEST(type_prop, broadcast_v3_shape_6) { - const auto arg = make_shared(element::f32, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {3, 1, 3}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{1, 3, 1}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {3}, {3, 1, 3}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); - ASSERT_EQ(broadcast_v3->get_shape(), (Shape{3, 3, 3})); - ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); + ASSERT_EQ(broadcast_v3->get_element_type(), ov::element::f32); + ASSERT_EQ(broadcast_v3->get_shape(), (ov::Shape{3, 3, 3})); + ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, ov::AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_shape_6_type_infer) { - const auto arg = make_shared(element::u16, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {3, 1, 3}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::u16, ov::Shape{1, 3, 1}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {3}, {3, 1, 3}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::u16); - ASSERT_EQ(broadcast_v3->get_shape(), (Shape{3, 3, 3})); - ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); + ASSERT_EQ(broadcast_v3->get_element_type(), ov::element::u16); + ASSERT_EQ(broadcast_v3->get_shape(), (ov::Shape{3, 3, 3})); + ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, ov::AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_incorrect_target_shape) { - const auto arg = make_shared(element::f32, Shape{4, 3, 2}); - const auto shape = op::Constant::create(element::i64, {3}, {8, 6, 4}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{4, 3, 2}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {3}, {8, 6, 4}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; try { - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); FAIL() << "Not applicable breadcast exception not thrown"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Broadcast incorrect target shape. Expecting either 1 or 4. Got 8")); } catch (...) { @@ -763,14 +785,14 @@ TEST(type_prop, broadcast_v3_incorrect_target_shape) { } TEST(type_prop, broadcast_v3_incorrect_target_shape_2) { - const auto arg = make_shared(element::f32, Shape{1, 1, 2}); - const auto shape = op::Constant::create(element::i64, {2}, {2, 3}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::Shape{1, 1, 2}); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {2}, {2, 3}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; try { - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); FAIL() << "Not applicable breadcast exception not thrown"; - } catch (const NodeValidationFailure& error) { + } catch (const ov::NodeValidationFailure& error) { EXPECT_HAS_SUBSTRING(error.what(), std::string("Broadcast incorrect target shape. Expecting either 1 or 2. Got 3")); } catch (...) { @@ -779,203 +801,207 @@ TEST(type_prop, broadcast_v3_incorrect_target_shape_2) { } TEST(type_prop, broadcast_v3_output_rank_not_deduced) { - const auto arg = make_shared(element::f32, PartialShape::dynamic()); - const auto shape = make_shared(element::i64, PartialShape::dynamic(1)); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::PartialShape::dynamic()); + const auto shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (PartialShape::dynamic())); + ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (ov::PartialShape::dynamic())); } TEST(type_prop, broadcast_v3_output_rank_deduced_from_arg) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); - const auto shape = op::Constant::create(element::i64, {3}, {8, 6, 4}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {3}, {8, 6, 4}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), 8, 6, 4})); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (ov::PartialShape{ov::Dimension::dynamic(), 8, 6, 4})); } TEST(type_prop, broadcast_v3_output_rank_deduced_from_new_shape_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); - const auto shape = op::Constant::create(element::i64, {5}, {8, 6, 1, 5, 1}); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); + const auto shape = ov::op::v0::Constant::create(ov::element::i64, {5}, {8, 6, 1, 5, 1}); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 5); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), - (PartialShape{8, 6, Dimension::dynamic(), 5, Dimension::dynamic()})); + (ov::PartialShape{8, 6, ov::Dimension::dynamic(), 5, ov::Dimension::dynamic()})); } TEST(type_prop, broadcast_v3_bidirectional_dynamic_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic()); + const auto arg = make_shared(ov::element::f32, ov::PartialShape::dynamic()); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); + auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); + auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); + target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); + broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - const auto target_shape_const = op::Constant::create(element::i64, {3}, {2, 4, 6}); - broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); + const auto target_shape_const = ov::op::v0::Constant::create(ov::element::i64, {3}, {2, 4, 6}); + broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, broadcast_v3_bidirectional_static_rank_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); + const auto arg = make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); + auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); + auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); + target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); + broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - const auto target_shape_const = op::Constant::create(element::i64, {3}, {2, 4, 6}); - broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); + const auto target_shape_const = ov::op::v0::Constant::create(ov::element::i64, {3}, {2, 4, 6}); + broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).is_dynamic()); } TEST(type_prop, broadcast_v3_bidirectional_static_shape_input) { - const auto arg = make_shared(element::f32, PartialShape{1, 2, 3, 1}); + const auto arg = make_shared(ov::element::f32, ov::PartialShape{1, 2, 3, 1}); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); + auto target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic()); + auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); + target_shape = make_shared(ov::element::i64, ov::PartialShape::dynamic(1)); + broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - auto target_shape_const = op::Constant::create(element::i64, {4}, {2, 2, 3, 2}); - broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); + auto target_shape_const = ov::op::v0::Constant::create(ov::element::i64, {4}, {2, 2, 3, 2}); + broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).is_static()); - ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (PartialShape{2, 2, 3, 2})); + ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (ov::PartialShape{2, 2, 3, 2})); - target_shape_const = op::Constant::create(element::i64, {4}, {5, 2, 3, 7}); - broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); + target_shape_const = ov::op::v0::Constant::create(ov::element::i64, {4}, {5, 2, 3, 7}); + broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).is_static()); - ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (PartialShape{5, 2, 3, 7})); + ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (ov::PartialShape{5, 2, 3, 7})); } TEST(type_prop, broadcast_v3_bidirectional_partially_dynamic_input) { - const auto target_shape = op::Constant::create(element::i64, Shape{4}, vector{1, 1, 50, 50}); + const auto target_shape = + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, vector{1, 1, 50, 50}); - auto data = make_shared(element::f32, PartialShape{16, 1, Dimension::dynamic()}); - auto bc = make_shared(data, target_shape, "BIDIRECTIONAL"); + auto data = make_shared(ov::element::f32, ov::PartialShape{16, 1, ov::Dimension::dynamic()}); + auto bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); - ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, 16, 50, 50})); + ASSERT_EQ(bc->get_output_partial_shape(0), (ov::PartialShape{1, 16, 50, 50})); - data = make_shared(element::f32, PartialShape{Dimension::dynamic(), 1, Dimension::dynamic()}); - bc = make_shared(data, target_shape, "BIDIRECTIONAL"); + data = make_shared(ov::element::f32, + ov::PartialShape{ov::Dimension::dynamic(), 1, ov::Dimension::dynamic()}); + bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); - ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), 50, 50})); + ASSERT_EQ(bc->get_output_partial_shape(0), (ov::PartialShape{1, ov::Dimension::dynamic(), 50, 50})); - data = make_shared(element::f32, PartialShape{16, Dimension::dynamic(), Dimension::dynamic()}); - bc = make_shared(data, target_shape, "BIDIRECTIONAL"); + data = make_shared(ov::element::f32, + ov::PartialShape{16, ov::Dimension::dynamic(), ov::Dimension::dynamic()}); + bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); - ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, 16, 50, 50})); + ASSERT_EQ(bc->get_output_partial_shape(0), (ov::PartialShape{1, 16, 50, 50})); - data = make_shared(element::f32, - PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - bc = make_shared(data, target_shape, "BIDIRECTIONAL"); + data = make_shared( + ov::element::f32, + ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}); + bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); - ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), 50, 50})); + ASSERT_EQ(bc->get_output_partial_shape(0), (ov::PartialShape{1, ov::Dimension::dynamic(), 50, 50})); } TEST(type_prop, broadcast_i32_shape_value) { - const auto arg = make_shared(element::f32, PartialShape({5, -1})); - const auto shape = make_shared(arg, element::i64); - const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; + const auto arg = make_shared(ov::element::f32, ov::PartialShape({5, -1})); + const auto shape = make_shared(arg, ov::element::i64); + const auto broadcast_spec = ov::op::BroadcastType::BIDIRECTIONAL; - const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); + const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), PartialShape({5, -1})); + ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), ov::PartialShape({5, -1})); // shape type resetting - shape->set_output_type(element::i32); + shape->set_output_type(ov::element::i32); arg->revalidate_and_infer_types(); shape->revalidate_and_infer_types(); broadcast_v3->revalidate_and_infer_types(); - ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), PartialShape({5, -1})); + ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), ov::PartialShape({5, -1})); // broadcast type resetting - broadcast_v3->set_broadcast_spec(op::BroadcastType::NUMPY); + broadcast_v3->set_broadcast_spec(ov::op::BroadcastType::NUMPY); arg->revalidate_and_infer_types(); shape->revalidate_and_infer_types(); broadcast_v3->revalidate_and_infer_types(); - ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), PartialShape({5, -1})); + ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), ov::PartialShape({5, -1})); } TEST(type_prop, broadcast_v3_default_constructor) { - auto param = make_shared(element::f32, Shape{5, 2, 3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {1, 3, 6}); + auto param = make_shared(ov::element::f32, ov::Shape{5, 2, 3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {1, 3, 6}); - auto op = make_shared(); + auto op = make_shared(); - EXPECT_EQ(op->get_broadcast_spec().m_type, op::BroadcastType::NUMPY); + EXPECT_EQ(op->get_broadcast_spec().m_type, ov::op::BroadcastType::NUMPY); - op->set_broadcast_spec(op::BroadcastType::BIDIRECTIONAL); - EXPECT_EQ(op->get_broadcast_spec().m_type, op::BroadcastType::BIDIRECTIONAL); + op->set_broadcast_spec(ov::op::BroadcastType::BIDIRECTIONAL); + EXPECT_EQ(op->get_broadcast_spec().m_type, ov::op::BroadcastType::BIDIRECTIONAL); op->set_argument(0, param); op->set_argument(1, target_shape); op->validate_and_infer_types(); - EXPECT_EQ(op->get_element_type(), element::f32); - EXPECT_EQ(op->get_shape(), (Shape{5, 2, 3, 6})); + EXPECT_EQ(op->get_element_type(), ov::element::f32); + EXPECT_EQ(op->get_shape(), (ov::Shape{5, 2, 3, 6})); } TEST(type_prop, broadcast_v3_bidirectional_data_bigger_rank_numpy) { - auto param = make_shared(element::f32, Shape{5, 2, 3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {4, 3, 6}); + auto param = make_shared(ov::element::f32, ov::Shape{5, 2, 3, 1}); + auto target_shape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{3}, {4, 3, 6}); - OV_EXPECT_THROW(auto b = make_shared(param, target_shape), - NodeValidationFailure, + OV_EXPECT_THROW(auto b = make_shared(param, target_shape), + ov::NodeValidationFailure, HasSubstr("Broadcast target_shape has smaller rank")); } TEST(type_prop, broadcast_v3_labels_in0_dynamic_mixed_dims_bidirectional) { // All dimensions of A have labels, B without labels - PartialShape pshape_a{-1, 2, 1, {4, 8}, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; - PartialShape pshape_b{-1, 2, {3, 9}, 1, {3, 9}, -1, {1, 9}, -1, {3, 19}, {1, 10}}; + ov::PartialShape pshape_a{-1, 2, 1, {4, 8}, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; + ov::PartialShape pshape_b{-1, 2, {3, 9}, 1, {3, 9}, -1, {1, 9}, -1, {3, 19}, {1, 10}}; - PartialShape expected_shape = {-1, 2, {3, 9}, {4, 8}, {3, 9}, {4, 8}, -1, -1, {3, 19}, {4, 18}}; + ov::PartialShape expected_shape = {-1, 2, {3, 9}, {4, 8}, {3, 9}, {4, 8}, -1, -1, {3, 19}, {4, 18}}; ov::TensorLabel expected_labels{10, 11, ov::no_label, 13, ov::no_label, 15, 16, 17, ov::no_label, 19}; set_shape_labels(pshape_a, {10, 11, 12, 13, 14, 15, 16, 17, 18, 19}); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, "BIDIRECTIONAL"); + auto op = make_shared(data, shape_of, "BIDIRECTIONAL"); const auto out_shape = op->get_output_partial_shape(0); @@ -985,19 +1011,19 @@ TEST(type_prop, broadcast_v3_labels_in0_dynamic_mixed_dims_bidirectional) { TEST(type_prop, broadcast_v3_labels_in1_dynamic_mixed_dims_bidirectional) { // All dimensions of B have labels, A without labels - PartialShape pshape_a{-1, 2, 1, {4, 8}, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; - PartialShape pshape_b{-1, 2, {3, 9}, 1, {3, 9}, -1, {1, 9}, -1, {3, 19}, {1, 10}}; + ov::PartialShape pshape_a{-1, 2, 1, {4, 8}, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; + ov::PartialShape pshape_b{-1, 2, {3, 9}, 1, {3, 9}, -1, {1, 9}, -1, {3, 19}, {1, 10}}; - PartialShape expected_shape = {-1, 2, {3, 9}, {4, 8}, {3, 9}, {4, 8}, -1, -1, {3, 19}, {4, 18}}; + ov::PartialShape expected_shape = {-1, 2, {3, 9}, {4, 8}, {3, 9}, {4, 8}, -1, -1, {3, 19}, {4, 18}}; ov::TensorLabel expected_labels{10, 11, 12, ov::no_label, 14, ov::no_label, 16, 17, 18, ov::no_label}; set_shape_labels(pshape_b, {10, 11, 12, 13, 14, 15, 16, 17, 18, 19}); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, "BIDIRECTIONAL"); + auto op = make_shared(data, shape_of, "BIDIRECTIONAL"); const auto out_shape = op->get_output_partial_shape(0); @@ -1007,20 +1033,20 @@ TEST(type_prop, broadcast_v3_labels_in1_dynamic_mixed_dims_bidirectional) { TEST(type_prop, broadcast_v3_labels_different_dynamic_mixed_dims_broadcast_bidirectional) { // Both params have dimensions with different labels - PartialShape pshape_a{-1, 2, 1, {4, 8}, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; - PartialShape pshape_b{-1, 2, {3, 9}, 1, {3, 9}, -1, {1, 9}, -1, {3, 19}, {1, 10}}; + ov::PartialShape pshape_a{-1, 2, 1, {4, 8}, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; + ov::PartialShape pshape_b{-1, 2, {3, 9}, 1, {3, 9}, -1, {1, 9}, -1, {3, 19}, {1, 10}}; - PartialShape expected_shape = {-1, 2, {3, 9}, {4, 8}, {3, 9}, {4, 8}, -1, -1, {3, 19}, {4, 18}}; + ov::PartialShape expected_shape = {-1, 2, {3, 9}, {4, 8}, {3, 9}, {4, 8}, -1, -1, {3, 19}, {4, 18}}; ov::TensorLabel expected_labels{ov::no_label, 21, 22, 13, 24, 15, ov::no_label, ov::no_label, 28, 19}; set_shape_labels(pshape_a, {10, 11, 12, 13, 14, 15, 16, 17, 18, 19}); set_shape_labels(pshape_b, {20, 21, 22, 23, 24, 25, 26, 27, 28, 29}); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, "BIDIRECTIONAL"); + auto op = make_shared(data, shape_of, "BIDIRECTIONAL"); const auto out_shape = op->get_output_partial_shape(0); @@ -1030,20 +1056,20 @@ TEST(type_prop, broadcast_v3_labels_different_dynamic_mixed_dims_broadcast_bidir TEST(type_prop, broadcast_v3_labels_same_dynamic_mixed_dims_broadcast_bidirectional) { // Both params have dimensions with the same labels - PartialShape pshape_a{-1, 2, 1, {4, 8}, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; - PartialShape pshape_b{-1, 2, {3, 9}, 1, {3, 9}, -1, {1, 9}, -1, {3, 19}, {1, 10}}; + ov::PartialShape pshape_a{-1, 2, 1, {4, 8}, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; + ov::PartialShape pshape_b{-1, 2, {3, 9}, 1, {3, 9}, -1, {1, 9}, -1, {3, 19}, {1, 10}}; - PartialShape expected_shape = {-1, 2, {3, 9}, {4, 8}, {3, 9}, {4, 8}, -1, -1, {3, 19}, {4, 18}}; + ov::PartialShape expected_shape = {-1, 2, {3, 9}, {4, 8}, {3, 9}, {4, 8}, -1, -1, {3, 19}, {4, 18}}; ov::TensorLabel expected_labels{10, 11, 12, 13, 14, 15, 16, 17, 18, 19}; set_shape_labels(pshape_a, expected_labels); set_shape_labels(pshape_b, expected_labels); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, "BIDIRECTIONAL"); + auto op = make_shared(data, shape_of, "BIDIRECTIONAL"); const auto out_shape = op->get_output_partial_shape(0); @@ -1052,33 +1078,33 @@ TEST(type_prop, broadcast_v3_labels_same_dynamic_mixed_dims_broadcast_bidirectio } TEST(type_prop, broadcast_v3_in0_interval_in1_param_rank_bigger_bidirectional) { - PartialShape pshape_a{{4, 8}, 1}; - auto data = make_shared(element::i32, pshape_a); - auto target_shape_param = make_shared(element::i32, Shape{3}); - auto broadcast = make_shared(data, target_shape_param, op::BroadcastType::BIDIRECTIONAL); + ov::PartialShape pshape_a{{4, 8}, 1}; + auto data = make_shared(ov::element::i32, pshape_a); + auto target_shape_param = make_shared(ov::element::i32, ov::Shape{3}); + auto broadcast = make_shared(data, target_shape_param, ov::op::BroadcastType::BIDIRECTIONAL); - EXPECT_EQ(broadcast->get_output_partial_shape(0), (PartialShape{-1, {4, 8}, -1})); + EXPECT_EQ(broadcast->get_output_partial_shape(0), (ov::PartialShape{-1, {4, 8}, -1})); } TEST(type_prop, broadcast_v3_in0_interval_in1_param_rank_smaller_bidirectional) { - PartialShape pshape_a{-1, 2, {1, 10}, {4, 8}, 1}; - auto data = make_shared(element::i32, pshape_a); - auto target_shape_param = make_shared(element::i32, Shape{3}); - auto broadcast = make_shared(data, target_shape_param, op::BroadcastType::BIDIRECTIONAL); + ov::PartialShape pshape_a{-1, 2, {1, 10}, {4, 8}, 1}; + auto data = make_shared(ov::element::i32, pshape_a); + auto target_shape_param = make_shared(ov::element::i32, ov::Shape{3}); + auto broadcast = make_shared(data, target_shape_param, ov::op::BroadcastType::BIDIRECTIONAL); - EXPECT_EQ(broadcast->get_output_partial_shape(0), (PartialShape{-1, 2, -1, {4, 8}, -1})); + EXPECT_EQ(broadcast->get_output_partial_shape(0), (ov::PartialShape{-1, 2, -1, {4, 8}, -1})); } TEST(type_prop, broadcast_v3_labels_in0_dims_in1_param_bidirectional) { - PartialShape pshape_a{-1, 2, 1, {4, 8}, {1, 10}}; + ov::PartialShape pshape_a{-1, 2, 1, {4, 8}, {1, 10}}; - PartialShape expected_shape{-1, 2, -1, {4, 8}, -1}; + ov::PartialShape expected_shape{-1, 2, -1, {4, 8}, -1}; ov::TensorLabel expected_labels{10, 11, 12, 13, 14}; set_shape_labels(pshape_a, expected_labels); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape_param = std::make_shared(element::i32, Shape{5}); - auto broadcast = make_shared(data, target_shape_param, op::BroadcastType::BIDIRECTIONAL); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape_param = std::make_shared(ov::element::i32, ov::Shape{5}); + auto broadcast = make_shared(data, target_shape_param, ov::op::BroadcastType::BIDIRECTIONAL); const auto& out_shape = broadcast->get_output_partial_shape(0); @@ -1088,17 +1114,17 @@ TEST(type_prop, broadcast_v3_labels_in0_dims_in1_param_bidirectional) { TEST(type_prop, broadcast_v3_non_broadcastable_dims_numpy) { // Numpy mode for v3::Broadcast mode is one directional - PartialShape pshape_a{{4, 8}, {2, 4}}; - PartialShape pshape_b{{1}, {5, 6}}; + ov::PartialShape pshape_a{{4, 8}, {2, 4}}; + ov::PartialShape pshape_b{{1}, {5, 6}}; // No validation for non-broadcastable dimensions pair - PartialShape expected_shape = {1, {5, 6}}; + ov::PartialShape expected_shape = {1, {5, 6}}; - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, "NUMPY"); + auto op = make_shared(data, shape_of, "NUMPY"); const auto out_shape = op->get_output_partial_shape(0); EXPECT_EQ(out_shape, expected_shape); @@ -1107,18 +1133,18 @@ TEST(type_prop, broadcast_v3_non_broadcastable_dims_numpy) { TEST(type_prop, broadcast_v3_labels_in0_dynamic_mixed_dims_numpy) { // Numpy mode for v3::Broadcast mode is one directional // All dimensions of A have labels, B without labels - PartialShape pshape_a{-1, 2, 1, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; - PartialShape pshape_b{-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; + ov::PartialShape pshape_a{-1, 2, 1, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; + ov::PartialShape pshape_b{-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; - PartialShape expected_shape = {-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; + ov::PartialShape expected_shape = {-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; set_shape_labels(pshape_a, {10, 11, 12, 13, 14, 15, 16, 17, 18}); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, "NUMPY"); + auto op = make_shared(data, shape_of, "NUMPY"); const auto out_shape = op->get_output_partial_shape(0); EXPECT_EQ(out_shape, expected_shape); @@ -1129,20 +1155,20 @@ TEST(type_prop, broadcast_v3_labels_in0_dynamic_mixed_dims_numpy) { TEST(type_prop, broadcast_v3_labels_in1_dynamic_mixed_dims_numpy) { // Numpy mode for v3::Broadcast mode is one directional // All dimensions of B have labels, A without labels - PartialShape pshape_a{-1, 2, 1, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; - PartialShape pshape_b{-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; + ov::PartialShape pshape_a{-1, 2, 1, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; + ov::PartialShape pshape_b{-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; - PartialShape expected_shape = {-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; + ov::PartialShape expected_shape = {-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; // Output shape is a copy of the target shape, `B` labels are propagated ov::TensorLabel expected_labels{10, 11, 12, 13, 14, 15, 16, 17, 18}; set_shape_labels(pshape_b, expected_labels); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, "NUMPY"); + auto op = make_shared(data, shape_of, "NUMPY"); const auto out_shape = op->get_output_partial_shape(0); @@ -1153,21 +1179,21 @@ TEST(type_prop, broadcast_v3_labels_in1_dynamic_mixed_dims_numpy) { TEST(type_prop, broadcast_v3_labels_both_inputs_dynamic_mixed_dims_numpy) { // Numpy mode for v3::Broadcast mode is one directional // All dimensions of A and B have labels - PartialShape pshape_a{-1, 2, 1, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; - PartialShape pshape_b{-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; + ov::PartialShape pshape_a{-1, 2, 1, -1, {4, 8}, -1, {1, 8}, {1, 10}, {4, 18}}; + ov::PartialShape pshape_b{-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; - PartialShape expected_shape = {-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; + ov::PartialShape expected_shape = {-1, 2, {3, 9}, {4, 10}, -1, {5, 11}, -1, {6, 20}, {1, 10}}; // Output shape is a copy of the target shape, `B` labels are propagated ov::TensorLabel expected_labels{20, 21, 22, 23, 24, 25, 26, 27, 28}; set_shape_labels(pshape_a, {10, 11, 12, 13, 14, 15, 16, 17, 18}); set_shape_labels(pshape_b, {20, 21, 22, 23, 24, 25, 26, 27, 28}); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, "NUMPY"); + auto op = make_shared(data, shape_of, "NUMPY"); const auto out_shape = op->get_output_partial_shape(0); @@ -1176,20 +1202,21 @@ TEST(type_prop, broadcast_v3_labels_both_inputs_dynamic_mixed_dims_numpy) { } TEST(type_prop, broadcast_v3_labels_dynamic_mixed_dims_explicit) { - PartialShape pshape_a{2, {6, 8}, -1}; - PartialShape pshape_b{2, -1, {6, 8}, -1, 5}; + ov::PartialShape pshape_a{2, {6, 8}, -1}; + ov::PartialShape pshape_b{2, -1, {6, 8}, -1, 5}; - PartialShape expected_shape = {2, -1, {6, 8}, -1, 5}; + ov::PartialShape expected_shape = {2, -1, {6, 8}, -1, 5}; ov::TensorLabel expected_labels{21, 22, 23, 24, 25}; set_shape_labels(pshape_b, {21, 22, 23, 24, 25}); - auto axis_map = std::make_shared(element::i32, Shape{3}, std::vector{0, 2, 3}); + auto axis_map = + std::make_shared(ov::element::i32, ov::Shape{3}, std::vector{0, 2, 3}); - auto data = std::make_shared(element::f32, pshape_a); - auto target_shape = std::make_shared(element::f32, pshape_b); - auto shape_of = make_shared(target_shape); + auto data = std::make_shared(ov::element::f32, pshape_a); + auto target_shape = std::make_shared(ov::element::f32, pshape_b); + auto shape_of = make_shared(target_shape); - auto op = make_shared(data, shape_of, axis_map, "EXPLICIT"); + auto op = make_shared(data, shape_of, axis_map, "EXPLICIT"); const auto out_shape = op->get_output_partial_shape(0); @@ -1200,25 +1227,25 @@ TEST(type_prop, broadcast_v3_labels_dynamic_mixed_dims_explicit) { TEST(type_prop, broadcast_v3_eval_labels_static_dims_numpy) { // Numpy mode for v3::Broadcast mode is one directional // All dimensions of A have labels, B without labels - PartialShape pshape_a{1, 1}; - PartialShape pshape_b{2, 3}; - PartialShape pshape_c{1, 3}; + ov::PartialShape pshape_a{1, 1}; + ov::PartialShape pshape_b{2, 3}; + ov::PartialShape pshape_c{1, 3}; - PartialShape expected_shape = {2, 3}; + ov::PartialShape expected_shape = {2, 3}; ov::TensorLabel expected_labels{22, 23}; set_shape_labels(pshape_b, {22, 23}); - auto a = std::make_shared(element::f32, pshape_a); - auto b = std::make_shared(element::f32, pshape_b); - auto shape_of_a = make_shared(a); - auto shape_of_b = make_shared(b); + auto a = std::make_shared(ov::element::f32, pshape_a); + auto b = std::make_shared(ov::element::f32, pshape_b); + auto shape_of_a = make_shared(a); + auto shape_of_b = make_shared(b); - auto broadcast_a = make_shared(a, shape_of_b, "NUMPY"); - auto shape_of_broadcast_a = make_shared(broadcast_a); + auto broadcast_a = make_shared(a, shape_of_b, "NUMPY"); + auto shape_of_broadcast_a = make_shared(broadcast_a); - auto c = std::make_shared(element::f32, pshape_c); - auto broadcast_c = make_shared(c, shape_of_broadcast_a, "NUMPY"); + auto c = std::make_shared(ov::element::f32, pshape_c); + auto broadcast_c = make_shared(c, shape_of_broadcast_a, "NUMPY"); const auto out_shape = broadcast_c->get_output_partial_shape(0); @@ -1227,27 +1254,27 @@ TEST(type_prop, broadcast_v3_eval_labels_static_dims_numpy) { } TEST(type_prop, broadcast_v3_eval_labels_static_dims_bidirectional) { - PartialShape pshape_a{1, 3}; - PartialShape pshape_b{2, 1}; - PartialShape pshape_c{1, 1}; + ov::PartialShape pshape_a{1, 3}; + ov::PartialShape pshape_b{2, 1}; + ov::PartialShape pshape_c{1, 1}; - PartialShape expected_shape = {2, 3}; + ov::PartialShape expected_shape = {2, 3}; ov::TensorLabel expected_labels{22, 13}; set_shape_labels(pshape_a, {12, 13}); set_shape_labels(pshape_b, {22, 23}); set_shape_labels(pshape_c, {33, 33}); - auto a = std::make_shared(element::f32, pshape_a); - auto b = std::make_shared(element::f32, pshape_b); - auto shape_of_a = make_shared(a); - auto shape_of_b = make_shared(b); + auto a = std::make_shared(ov::element::f32, pshape_a); + auto b = std::make_shared(ov::element::f32, pshape_b); + auto shape_of_a = make_shared(a); + auto shape_of_b = make_shared(b); - auto broadcast_a = make_shared(a, shape_of_b, "BIDIRECTIONAL"); - auto shape_of_broadcast_a = make_shared(broadcast_a); + auto broadcast_a = make_shared(a, shape_of_b, "BIDIRECTIONAL"); + auto shape_of_broadcast_a = make_shared(broadcast_a); - auto c = std::make_shared(element::f32, pshape_c); - auto broadcast_c = make_shared(c, shape_of_broadcast_a, "BIDIRECTIONAL"); + auto c = std::make_shared(ov::element::f32, pshape_c); + auto broadcast_c = make_shared(c, shape_of_broadcast_a, "BIDIRECTIONAL"); const auto out_shape = broadcast_c->get_output_partial_shape(0); @@ -1256,23 +1283,27 @@ TEST(type_prop, broadcast_v3_eval_labels_static_dims_bidirectional) { } TEST(type_prop, broadcast_v3_bidirectional_tricky_partial_value_case_and_equal_partial_value_propagation) { - PartialShape pshape_a{{0, 10}, 1, 4}; - PartialShape pshape_b{{0, 10}, 1}; + ov::PartialShape pshape_a{{0, 10}, 1, 4}; + ov::PartialShape pshape_b{{0, 10}, 1}; - PartialShape expected_shape = PartialShape{{0, 10}, 1, 4}; + ov::PartialShape expected_shape = ov::PartialShape{{0, 10}, 1, 4}; - auto a = std::make_shared(element::f32, pshape_a); - auto b = std::make_shared(element::f32, pshape_b); - auto shape_of_b = make_shared(b); - auto concat = - make_shared(ov::OutputVector{shape_of_b, op::v0::Constant::create(element::i64, {1}, {4})}, 0); - auto equal = make_shared(concat, op::v0::Constant::create(element::i64, {3}, {-1, -1, -1})); - auto select = make_shared(equal, op::Constant::create(element::i64, {3}, {1, 1, 1}), concat); + auto a = std::make_shared(ov::element::f32, pshape_a); + auto b = std::make_shared(ov::element::f32, pshape_b); + auto shape_of_b = make_shared(b); + auto concat = make_shared( + ov::OutputVector{shape_of_b, ov::op::v0::Constant::create(ov::element::i64, {1}, {4})}, + 0); + auto equal = + make_shared(concat, ov::op::v0::Constant::create(ov::element::i64, {3}, {-1, -1, -1})); + auto select = + make_shared(equal, ov::op::v0::Constant::create(ov::element::i64, {3}, {1, 1, 1}), concat); - PartialShape shape; - auto broadcast_a = make_shared(a, select, "BIDIRECTIONAL"); + ov::PartialShape shape; + auto broadcast_a = make_shared(a, select, "BIDIRECTIONAL"); const auto out_shape = broadcast_a->get_output_partial_shape(0); + OPENVINO_SUPPRESS_DEPRECATED_START EXPECT_EQ(out_shape, expected_shape); { auto constant = ov::get_constant_from_source(equal->output(0)); @@ -1281,14 +1312,15 @@ TEST(type_prop, broadcast_v3_bidirectional_tricky_partial_value_case_and_equal_p EXPECT_EQ(calculated, expected); } { - equal = make_shared(concat, op::v0::Constant::create(element::i64, {3}, {5, 1, 4})); + equal = make_shared(concat, ov::op::v0::Constant::create(ov::element::i64, {3}, {5, 1, 4})); EXPECT_TRUE(ov::get_constant_from_source(equal->output(0)) == nullptr); } { - equal = make_shared(concat, op::v0::Constant::create(element::i64, {3}, {11, 1, 4})); + equal = make_shared(concat, ov::op::v0::Constant::create(ov::element::i64, {3}, {11, 1, 4})); auto constant = ov::get_constant_from_source(equal->output(0)); EXPECT_TRUE(constant != nullptr); std::vector expected{false, true, true}, calculated = constant->get_vector(); EXPECT_EQ(calculated, expected); } + OPENVINO_SUPPRESS_DEPRECATED_END } diff --git a/src/core/tests/type_prop/bucketize.cpp b/src/core/tests/type_prop/bucketize.cpp index 1a10b8ea828..7608f89cae1 100644 --- a/src/core/tests/type_prop/bucketize.cpp +++ b/src/core/tests/type_prop/bucketize.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include + #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/type_prop.hpp" -#include "gtest/gtest.h" -#include "ngraph/ngraph.hpp" #include "openvino/opsets/opset11.hpp" using namespace std; diff --git a/src/core/tests/type_prop/divide.cpp b/src/core/tests/type_prop/divide.cpp index 364dfae06ba..9a4ffdd6e09 100644 --- a/src/core/tests/type_prop/divide.cpp +++ b/src/core/tests/type_prop/divide.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/divide.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_divide, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/floor_mod.cpp b/src/core/tests/type_prop/floor_mod.cpp index 75761dcc7e4..6189b10d112 100644 --- a/src/core/tests/type_prop/floor_mod.cpp +++ b/src/core/tests/type_prop/floor_mod.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/floor_mod.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_floormod, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/maximum.cpp b/src/core/tests/type_prop/maximum.cpp index feef491e8da..d08df453dab 100644 --- a/src/core/tests/type_prop/maximum.cpp +++ b/src/core/tests/type_prop/maximum.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/maximum.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_maximum, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/minimum.cpp b/src/core/tests/type_prop/minimum.cpp index c038b77828e..52b3554eaca 100644 --- a/src/core/tests/type_prop/minimum.cpp +++ b/src/core/tests/type_prop/minimum.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/minimum.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_minimum, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/mod.cpp b/src/core/tests/type_prop/mod.cpp index f5f4196706d..b1dbab11eea 100644 --- a/src/core/tests/type_prop/mod.cpp +++ b/src/core/tests/type_prop/mod.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/mod.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_mod, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/multiply.cpp b/src/core/tests/type_prop/multiply.cpp index b57e370e8c4..3d68444d563 100644 --- a/src/core/tests/type_prop/multiply.cpp +++ b/src/core/tests/type_prop/multiply.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/multiply.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_multiply, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/power.cpp b/src/core/tests/type_prop/power.cpp index 4e73fb672ba..41e05f332c7 100644 --- a/src/core/tests/type_prop/power.cpp +++ b/src/core/tests/type_prop/power.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/power.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_power, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/squared_difference.cpp b/src/core/tests/type_prop/squared_difference.cpp index 82a6025146e..37b85df7ce0 100644 --- a/src/core/tests/type_prop/squared_difference.cpp +++ b/src/core/tests/type_prop/squared_difference.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/squared_difference.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_squared_difference, ArithmeticOperator, Type); diff --git a/src/core/tests/type_prop/subtract.cpp b/src/core/tests/type_prop/subtract.cpp index 0d3a22085ce..836b7997a18 100644 --- a/src/core/tests/type_prop/subtract.cpp +++ b/src/core/tests/type_prop/subtract.cpp @@ -2,8 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/subtract.hpp" + #include "arithmetic_ops.hpp" -using Type = ::testing::Types; +using Type = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_subtract, ArithmeticOperator, Type);