Removed legacy headers from some core tests (#19328)

* Removed legacy headers from some core tests

* Fixed build
This commit is contained in:
Ilya Churaev 2023-08-24 06:55:21 +04:00 committed by GitHub
parent 99cc3624b7
commit b77e47970d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 2642 additions and 2295 deletions

View File

@ -2,12 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ngraph/except.hpp> #include <gtest/gtest.h>
#include <openvino/op/abs.hpp>
#include <openvino/op/constant.hpp>
#include <openvino/opsets/opset.hpp>
#include "gtest/gtest.h" #include "openvino/op/abs.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/opsets/opset.hpp"
#ifdef SELECTIVE_BUILD_ANALYZER #ifdef SELECTIVE_BUILD_ANALYZER
# define SELECTIVE_BUILD_ANALYZER_ON # define SELECTIVE_BUILD_ANALYZER_ON

View File

@ -2,12 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ngraph/except.hpp> #include <gtest/gtest.h>
#include <openvino/op/abs.hpp>
#include <openvino/op/constant.hpp>
#include <openvino/opsets/opset.hpp>
#include "gtest/gtest.h" #include "openvino/op/abs.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/opsets/opset.hpp"
#ifdef SELECTIVE_BUILD_ANALYZER #ifdef SELECTIVE_BUILD_ANALYZER
# define SELECTIVE_BUILD_ANALYZER_ON # define SELECTIVE_BUILD_ANALYZER_ON

View File

@ -2,12 +2,11 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <ngraph/except.hpp> #include <gtest/gtest.h>
#include <openvino/op/abs.hpp>
#include <openvino/op/constant.hpp>
#include <openvino/opsets/opset.hpp>
#include "gtest/gtest.h" #include "openvino/op/abs.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/opsets/opset.hpp"
#ifdef SELECTIVE_BUILD_ANALYZER #ifdef SELECTIVE_BUILD_ANALYZER
# define SELECTIVE_BUILD_ANALYZER_ON # define SELECTIVE_BUILD_ANALYZER_ON
@ -33,7 +32,7 @@ TEST(conditional_compilation, disabled_op_scope) {
EXPECT_EQ(n, 42); EXPECT_EQ(n, 42);
// Simple Scope1 is disabled and throws exception // Simple Scope1 is disabled and throws exception
ASSERT_THROW(OV_OP_SCOPE(Scope1), ngraph::ngraph_error); ASSERT_THROW(OV_OP_SCOPE(Scope1), ov::Exception);
#undef ov_op_Scope0 #undef ov_op_Scope0
} }

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <openvino/pass/manager.hpp> #include <gtest/gtest.h>
#include "gtest/gtest.h"
#include "openvino/frontend/extension/decoder_transformation.hpp" #include "openvino/frontend/extension/decoder_transformation.hpp"
#include "openvino/pass/manager.hpp"
using namespace ov::frontend; using namespace ov::frontend;

View File

@ -2,15 +2,14 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <memory> #include <memory>
#include <openvino/frontend/exception.hpp>
#include <openvino/frontend/manager.hpp>
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "gmock/gmock.h" #include "openvino/frontend/exception.hpp"
#include "gtest/gtest.h" #include "openvino/frontend/manager.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/util.hpp"
#include "openvino/util/file_util.hpp" #include "openvino/util/file_util.hpp"
using namespace ov::frontend; using namespace ov::frontend;
@ -156,9 +155,9 @@ TEST(FrontEndManagerTest, testDefaultInputModel) {
ASSERT_ANY_THROW(im->cut_and_add_new_output(nullptr, "")); ASSERT_ANY_THROW(im->cut_and_add_new_output(nullptr, ""));
ASSERT_ANY_THROW(im->add_output(nullptr)); ASSERT_ANY_THROW(im->add_output(nullptr));
ASSERT_ANY_THROW(im->remove_output(nullptr)); ASSERT_ANY_THROW(im->remove_output(nullptr));
ASSERT_ANY_THROW(im->set_partial_shape(nullptr, ngraph::Shape{})); ASSERT_ANY_THROW(im->set_partial_shape(nullptr, ov::Shape{}));
ASSERT_ANY_THROW(im->get_partial_shape(nullptr)); ASSERT_ANY_THROW(im->get_partial_shape(nullptr));
ASSERT_ANY_THROW(im->set_element_type(nullptr, ngraph::element::Type{})); ASSERT_ANY_THROW(im->set_element_type(nullptr, ov::element::Type{}));
ASSERT_ANY_THROW(im->set_tensor_value(nullptr, nullptr)); ASSERT_ANY_THROW(im->set_tensor_value(nullptr, nullptr));
ASSERT_ANY_THROW(im->set_tensor_partial_value(nullptr, nullptr, nullptr)); ASSERT_ANY_THROW(im->set_tensor_partial_value(nullptr, nullptr, nullptr));
} }

View File

@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "ngraph/visibility.hpp"
#include "openvino/frontend/exception.hpp" #include "openvino/frontend/exception.hpp"
#include "openvino/frontend/manager.hpp" #include "openvino/frontend/manager.hpp"
#include "openvino/frontend/visibility.hpp" #include "openvino/frontend/visibility.hpp"
@ -10,7 +9,6 @@
#define MOCK_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS #define MOCK_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS
using namespace ngraph;
using namespace ov::frontend; using namespace ov::frontend;
class InputModelMock : public InputModel { class InputModelMock : public InputModel {
@ -102,16 +100,16 @@ public:
FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); FRONT_END_GENERAL_CHECK(!m_throw, "Test exception");
} }
void set_partial_shape(const Place::Ptr& place, const PartialShape& shape) override { void set_partial_shape(const Place::Ptr& place, const ov::PartialShape& shape) override {
FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); FRONT_END_GENERAL_CHECK(!m_throw, "Test exception");
} }
PartialShape get_partial_shape(const Place::Ptr& place) const override { ov::PartialShape get_partial_shape(const Place::Ptr& place) const override {
FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); FRONT_END_GENERAL_CHECK(!m_throw, "Test exception");
return {}; return {};
} }
void set_element_type(const Place::Ptr& place, const element::Type& type) override { void set_element_type(const Place::Ptr& place, const ov::element::Type& type) override {
FRONT_END_GENERAL_CHECK(!m_throw, "Test exception"); FRONT_END_GENERAL_CHECK(!m_throw, "Test exception");
} }
@ -185,13 +183,14 @@ public:
std::shared_ptr<ov::Model> convert(const InputModel::Ptr& model) const override { std::shared_ptr<ov::Model> convert(const InputModel::Ptr& model) const override {
FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception"); FRONT_END_GENERAL_CHECK(!m_throw_next, "Test exception");
auto shape = Shape{1, 2, 300, 300}; auto shape = ov::Shape{1, 2, 300, 300};
auto param = std::make_shared<ov::opset8::Parameter>(ov::element::f32, shape); auto param = std::make_shared<ov::opset8::Parameter>(ov::element::f32, shape);
std::vector<float> data(ov::shape_size(shape), 1.f); std::vector<float> data(ov::shape_size(shape), 1.f);
auto constant = ov::opset8::Constant::create(ov::element::f32, shape, data); auto constant = ov::opset8::Constant::create(ov::element::f32, shape, data);
auto op = std::make_shared<ov::opset8::Add>(param, constant); auto op = std::make_shared<ov::opset8::Add>(param, constant);
auto res = std::make_shared<ov::opset8::Result>(op); auto res = std::make_shared<ov::opset8::Result>(op);
auto ov_model = std::make_shared<ov::Model>(ResultVector({res}), ParameterVector({param}), "mock1_model"); auto ov_model =
std::make_shared<ov::Model>(ov::ResultVector({res}), ov::ParameterVector({param}), "mock1_model");
ov_model->get_rt_info()["mock_test"] = std::string(1024, 't'); ov_model->get_rt_info()["mock_test"] = std::string(1024, 't');
return ov_model; return ov_model;
} }

View File

@ -4,7 +4,8 @@
#include "openvino/frontend/extension/progress_reporter.hpp" #include "openvino/frontend/extension/progress_reporter.hpp"
#include "gtest/gtest.h" #include <gtest/gtest.h>
#include "openvino/frontend/exception.hpp" #include "openvino/frontend/exception.hpp"
using namespace ov::frontend; using namespace ov::frontend;

View File

@ -29,7 +29,7 @@ protected:
}; };
namespace { namespace {
std::shared_ptr<ngraph::Function> CreateTestFunction(const std::string& name, const ngraph::PartialShape& ps) { std::shared_ptr<ov::Model> create_test_model(const std::string& name, const ov::PartialShape& ps) {
const auto param = std::make_shared<ov::opset8::Parameter>(ov::element::f16, ps); const auto param = std::make_shared<ov::opset8::Parameter>(ov::element::f16, ps);
const auto convert = std::make_shared<ov::opset8::Convert>(param, ov::element::f32); const auto convert = std::make_shared<ov::opset8::Convert>(param, ov::element::f32);
const auto result = std::make_shared<ov::opset8::Result>(convert); const auto result = std::make_shared<ov::opset8::Result>(convert);
@ -38,7 +38,7 @@ std::shared_ptr<ngraph::Function> CreateTestFunction(const std::string& name, co
} // namespace } // namespace
TEST_F(SerializationCleanupTest, SerializationShouldWork) { TEST_F(SerializationCleanupTest, SerializationShouldWork) {
const auto f = CreateTestFunction("StaticFunction", ngraph::PartialShape{2, 2}); const auto f = create_test_model("StaticFunction", ov::PartialShape{2, 2});
ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(f); ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(f);
@ -48,7 +48,7 @@ TEST_F(SerializationCleanupTest, SerializationShouldWork) {
} }
TEST_F(SerializationCleanupTest, SerializationShouldWorkWithDynamicFunction) { TEST_F(SerializationCleanupTest, SerializationShouldWorkWithDynamicFunction) {
const auto f = CreateTestFunction("DynamicFunction", ngraph::PartialShape{ngraph::Dimension()}); const auto f = create_test_model("DynamicFunction", ov::PartialShape{ov::Dimension()});
ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(f); ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(f);

View File

@ -44,9 +44,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsI32) {
auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto B = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -61,9 +61,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsI64) {
auto A = ov::opset8::Constant::create(ov::element::i64, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto A = ov::opset8::Constant::create(ov::element::i64, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto B = ov::opset8::Constant::create(ov::element::i64, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::i64, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -78,9 +78,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsFP16) {
auto A = ov::opset8::Constant::create(ov::element::f16, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto A = ov::opset8::Constant::create(ov::element::f16, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto B = ov::opset8::Constant::create(ov::element::f16, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::f16, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -95,9 +95,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsFP32) {
auto A = ov::opset8::Constant::create(ov::element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto A = ov::opset8::Constant::create(ov::element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto B = ov::opset8::Constant::create(ov::element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -113,9 +113,9 @@ TEST_F(SerializationConstantCompressionTest, NonIdenticalConstantsI64) {
auto A = ov::opset8::Constant::create(ov::element::i64, shape, {2, 2}); auto A = ov::opset8::Constant::create(ov::element::i64, shape, {2, 2});
auto B = ov::opset8::Constant::create(ov::element::i64, shape, {0, 128}); auto B = ov::opset8::Constant::create(ov::element::i64, shape, {0, 128});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -132,9 +132,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsTimesTwo) {
auto C = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3}); auto C = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3});
auto D = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3}); auto D = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B, C, D}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B, C, D}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -153,9 +153,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsTimesTwoMultipleO
auto E = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto E = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto F = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3}); auto F = ov::opset8::Constant::create(ov::element::i32, shape, {0, 3, 1, 2, 5, 6, 25, 3});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B, C, D, E, F}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B, C, D, E, F}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -170,9 +170,9 @@ TEST_F(SerializationConstantCompressionTest, NonIdenticalConstants) {
auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto B = ov::opset8::Constant::create(ov::element::i32, shape, {2, 2, 3, 4, 5, 6, 7, 8}); auto B = ov::opset8::Constant::create(ov::element::i32, shape, {2, 2, 3, 4, 5, 6, 7, 8});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -187,9 +187,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsDifferentTypesI32
auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 0, 2, 0, 3, 0, 4, 0}); auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 0, 2, 0, 3, 0, 4, 0});
auto B = ov::opset8::Constant::create(ov::element::i64, ov::Shape({1, 2, 2}), {1, 2, 3, 4}); auto B = ov::opset8::Constant::create(ov::element::i64, ov::Shape({1, 2, 2}), {1, 2, 3, 4});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);
@ -204,9 +204,9 @@ TEST_F(SerializationConstantCompressionTest, IdenticalConstantsDifferentTypesI32
auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2}); auto A = ov::opset8::Constant::create(ov::element::i32, shape, {1, 2});
auto B = ov::opset8::Constant::create(ov::element::i8, ov::Shape({1, 2, 4}), {1, 0, 0, 0, 2, 0, 0, 0}); auto B = ov::opset8::Constant::create(ov::element::i8, ov::Shape({1, 2, 4}), {1, 0, 0, 0, 2, 0, 0, 0});
auto ngraph_a = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{}); auto model = std::make_shared<ov::Model>(ov::NodeVector{A, B}, ov::ParameterVector{});
ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(ngraph_a); ov::pass::Serialize(m_out_xml_path_1, m_out_bin_path_1).run_on_model(model);
std::ifstream xml_1(m_out_xml_path_1, std::ios::binary); std::ifstream xml_1(m_out_xml_path_1, std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::binary); std::ifstream bin_1(m_out_bin_path_1, std::ios::binary);

View File

@ -3,12 +3,13 @@
// //
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <ie_iextension.h>
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "common_test_utils/ngraph_test_utils.hpp" #include "common_test_utils/graph_comparator.hpp"
#include "ngraph/pass/serialize.hpp" #include "ie_iextension.h"
#include "openvino/pass/manager.hpp"
#include "openvino/pass/serialize.hpp"
#include "openvino/runtime/core.hpp" #include "openvino/runtime/core.hpp"
class CustomOpsSerializationTest : public ::testing::Test { class CustomOpsSerializationTest : public ::testing::Test {

View File

@ -4,12 +4,12 @@
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <common_test_utils/file_utils.hpp> #include "common_test_utils/common_utils.hpp"
#include "common_test_utils/file_utils.hpp"
#include "common_test_utils/ngraph_test_utils.hpp" #include "common_test_utils/test_common.hpp"
#include "ngraph/pass/serialize.hpp"
#include "openvino/frontend/manager.hpp" #include "openvino/frontend/manager.hpp"
#include "openvino/opsets/opset8.hpp" #include "openvino/opsets/opset8.hpp"
#include "openvino/pass/manager.hpp"
#include "transformations/rt_info/attributes.hpp" #include "transformations/rt_info/attributes.hpp"
class RTInfoSerializationTest : public ov::test::TestsCommon { class RTInfoSerializationTest : public ov::test::TestsCommon {

View File

@ -11,8 +11,6 @@
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "common_test_utils/graph_comparator.hpp" #include "common_test_utils/graph_comparator.hpp"
#include "common_test_utils/test_common.hpp" #include "common_test_utils/test_common.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/serialize.hpp"
#include "openvino/util/file_util.hpp" #include "openvino/util/file_util.hpp"
#include "read_ir.hpp" #include "read_ir.hpp"

View File

@ -7,7 +7,7 @@
#include "common_test_utils/common_utils.hpp" #include "common_test_utils/common_utils.hpp"
#include "common_test_utils/data_utils.hpp" #include "common_test_utils/data_utils.hpp"
#include "common_test_utils/file_utils.hpp" #include "common_test_utils/file_utils.hpp"
#include "common_test_utils/ngraph_test_utils.hpp" #include "common_test_utils/graph_comparator.hpp"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ie_blob.h" #include "ie_blob.h"
#include "ie_core.hpp" #include "ie_core.hpp"

View File

@ -29,10 +29,9 @@ protected:
}; };
TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) { TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) {
std::shared_ptr<ngraph::Function> function; std::shared_ptr<ov::Model> model;
{ {
auto parameter = auto parameter = std::make_shared<ov::opset8::Parameter>(ov::element::Type_t::f32, ov::Shape{1, 3, 10, 10});
std::make_shared<ov::opset8::Parameter>(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10});
parameter->set_friendly_name("parameter"); parameter->set_friendly_name("parameter");
parameter->get_output_tensor(0).set_names({"input"}); parameter->get_output_tensor(0).set_names({"input"});
auto relu_prev = std::make_shared<ov::opset8::Relu>(parameter); auto relu_prev = std::make_shared<ov::opset8::Relu>(parameter);
@ -41,18 +40,18 @@ TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) {
auto relu = std::make_shared<ov::opset8::Relu>(relu_prev); auto relu = std::make_shared<ov::opset8::Relu>(relu_prev);
relu->set_friendly_name("relu"); relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu,t", "identity"}); relu->get_output_tensor(0).set_names({"relu,t", "identity"});
const ngraph::ResultVector results{std::make_shared<ov::opset8::Result>(relu)}; const ov::ResultVector results{std::make_shared<ov::opset8::Result>(relu)};
results[0]->set_friendly_name("out"); results[0]->set_friendly_name("out");
ngraph::ParameterVector params{parameter}; ov::ParameterVector params{parameter};
function = std::make_shared<ngraph::Function>(results, params, "TensorNames"); model = std::make_shared<ov::Model>(results, params, "TensorNames");
} }
ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(function); ov::pass::Serialize(m_out_xml_path, m_out_bin_path).run_on_model(model);
auto result = ov::test::readModel(m_out_xml_path, m_out_bin_path); auto result = ov::test::readModel(m_out_xml_path, m_out_bin_path);
const auto fc = FunctionsComparator::with_default() const auto fc = FunctionsComparator::with_default()
.enable(FunctionsComparator::ATTRIBUTES) .enable(FunctionsComparator::ATTRIBUTES)
.enable(FunctionsComparator::CONST_VALUES); .enable(FunctionsComparator::CONST_VALUES);
const auto res = fc.compare(result, function); const auto res = fc.compare(result, model);
EXPECT_TRUE(res.valid) << res.message; EXPECT_TRUE(res.valid) << res.message;
} }

View File

@ -4,6 +4,6 @@
#include "unary_ops.hpp" #include "unary_ops.hpp"
using Type = ::testing::Types<ngraph::op::Abs>; using Type = ::testing::Types<ov::op::v0::Abs>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_abs, UnaryOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_abs, UnaryOperator, Type);

View File

@ -4,6 +4,6 @@
#include "unary_ops.hpp" #include "unary_ops.hpp"
using Type = ::testing::Types<ngraph::op::Acos>; using Type = ::testing::Types<ov::op::v0::Acos>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_acos, UnaryOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_acos, UnaryOperator, Type);

View File

@ -4,6 +4,6 @@
#include "unary_ops.hpp" #include "unary_ops.hpp"
using Type = ::testing::Types<ngraph::op::Acosh>; using Type = ::testing::Types<ov::op::v3::Acosh>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_acosh, UnaryOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_acosh, UnaryOperator, Type);

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h"
#include "openvino/opsets/opset10.hpp" #include "openvino/opsets/opset10.hpp"
using namespace std; using namespace std;

View File

@ -2,9 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h"
#include "openvino/opsets/opset10.hpp" #include "openvino/opsets/opset10.hpp"
using namespace std; using namespace std;

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/add.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Add>; using Type = ::testing::Types<ov::op::v1::Add>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_add, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_add, ArithmeticOperator, Type);

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,6 @@
#include "unary_ops.hpp" #include "unary_ops.hpp"
using Type = ::testing::Types<ngraph::op::Asin>; using Type = ::testing::Types<ov::op::v0::Asin>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_asin, UnaryOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_asin, UnaryOperator, Type);

View File

@ -4,6 +4,6 @@
#include "unary_ops.hpp" #include "unary_ops.hpp"
using Type = ::testing::Types<ngraph::op::Asinh>; using Type = ::testing::Types<ov::op::v3::Asinh>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_asinh, UnaryOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_asinh, UnaryOperator, Type);

View File

@ -2,23 +2,24 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/assign.hpp"
#include <gtest/gtest.h>
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h" #include "openvino/core/model.hpp"
#include "ngraph/ngraph.hpp" #include "openvino/op/read_value.hpp"
#include "ngraph/op/util/variable.hpp" #include "openvino/op/util/variable.hpp"
#include "ngraph/opsets/opset5.hpp"
#include "ngraph/opsets/opset6.hpp"
using namespace std; using namespace std;
using namespace ngraph;
TEST(type_prop, assign_variable_not_found) { TEST(type_prop, assign_variable_not_found) {
auto A = make_shared<op::Parameter>(element::f32, Shape{1, 2, 64, 64}); auto A = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 2, 64, 64});
try { try {
auto space_to_depth = make_shared<opset5::Assign>(A, "variable_id"); auto space_to_depth = make_shared<ov::op::v3::Assign>(A, "variable_id");
// Should have thrown, so fail if it didn't // Should have thrown, so fail if it didn't
FAIL() << "Should not find variable with variable_id"; FAIL() << "Should not find variable with variable_id";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Can't find variable with id = variable_id")); EXPECT_HAS_SUBSTRING(error.what(), std::string("Can't find variable with id = variable_id"));
} catch (...) { } catch (...) {
FAIL() << "Deduced type check failed for unexpected reason"; FAIL() << "Deduced type check failed for unexpected reason";
@ -26,45 +27,51 @@ TEST(type_prop, assign_variable_not_found) {
} }
TEST(type_prop, assign_deduce) { TEST(type_prop, assign_deduce) {
auto input = make_shared<op::Parameter>(element::f32, Shape{1, 2, 64, 64}); auto input = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 2, 64, 64});
auto read_value = make_shared<opset5::ReadValue>(input, "variable_id"); auto read_value = make_shared<ov::op::v3::ReadValue>(input, "variable_id");
auto assign = make_shared<opset5::Assign>(read_value, "variable_id"); auto assign = make_shared<ov::op::v3::Assign>(read_value, "variable_id");
ASSERT_EQ(assign->get_element_type(), element::f32); ASSERT_EQ(assign->get_element_type(), ov::element::f32);
ASSERT_EQ(assign->get_shape(), (Shape{1, 2, 64, 64})); ASSERT_EQ(assign->get_shape(), (ov::Shape{1, 2, 64, 64}));
} }
TEST(type_prop, assign_read_value_new_shape) { TEST(type_prop, assign_read_value_new_shape) {
auto input = make_shared<op::Parameter>(element::f16, Shape{4, 3, 2, 1}); auto input = make_shared<ov::op::v0::Parameter>(ov::element::f16, ov::Shape{4, 3, 2, 1});
auto variable = std::make_shared<Variable>(VariableInfo{PartialShape::dynamic(), element::dynamic, "ID"}); auto variable = std::make_shared<ov::op::util::Variable>(
auto read_value = make_shared<opset6::ReadValue>(input, variable); ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, "ID"});
auto assign = make_shared<opset6::Assign>(read_value, variable); auto read_value = make_shared<ov::op::v6::ReadValue>(input, variable);
auto assign = make_shared<ov::op::v6::Assign>(read_value, variable);
ASSERT_EQ(assign->get_element_type(), element::f16); ASSERT_EQ(assign->get_element_type(), ov::element::f16);
ASSERT_EQ(assign->get_shape(), (Shape{4, 3, 2, 1})); ASSERT_EQ(assign->get_shape(), (ov::Shape{4, 3, 2, 1}));
auto f = std::make_shared<Function>(ResultVector{}, SinkVector{assign}, ParameterVector{input}); auto m = std::make_shared<ov::Model>(ov::ResultVector{}, ov::SinkVector{assign}, ov::ParameterVector{input});
input->set_partial_shape({3, {4, 5}, 8}); input->set_partial_shape({3, {4, 5}, 8});
f->validate_nodes_and_infer_types(); m->validate_nodes_and_infer_types();
ASSERT_EQ(assign->get_element_type(), element::f16); ASSERT_EQ(assign->get_element_type(), ov::element::f16);
ASSERT_EQ(assign->get_output_partial_shape(0), (PartialShape{3, {4, 5}, 8})); ASSERT_EQ(assign->get_output_partial_shape(0), (ov::PartialShape{3, {4, 5}, 8}));
ASSERT_EQ(variable->get_info().data_type, element::f16); ASSERT_EQ(variable->get_info().data_type, ov::element::f16);
ASSERT_EQ(variable->get_info().data_shape, (PartialShape{3, {4, 5}, 8})); ASSERT_EQ(variable->get_info().data_shape, (ov::PartialShape{3, {4, 5}, 8}));
} }
TEST(type_prop, variable_comparison) { TEST(type_prop, variable_comparison) {
auto variable1 = std::make_shared<Variable>(VariableInfo{PartialShape::dynamic(), element::dynamic, "ID"}); auto variable1 = std::make_shared<ov::op::util::Variable>(
ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, "ID"});
auto variable2 = std::make_shared<Variable>(VariableInfo{PartialShape::dynamic(), element::dynamic, "ID"}); auto variable2 = std::make_shared<ov::op::util::Variable>(
ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, "ID"});
auto variable3 = std::make_shared<Variable>(VariableInfo{PartialShape::dynamic(), element::dynamic, "ID1"}); auto variable3 = std::make_shared<ov::op::util::Variable>(
ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::dynamic, "ID1"});
auto variable4 = std::make_shared<Variable>(VariableInfo{PartialShape::dynamic(), element::f32, "ID"}); auto variable4 = std::make_shared<ov::op::util::Variable>(
ov::op::util::VariableInfo{ov::PartialShape::dynamic(), ov::element::f32, "ID"});
auto variable5 = std::make_shared<Variable>(VariableInfo{Shape{1}, element::dynamic, "ID"}); auto variable5 =
std::make_shared<ov::op::util::Variable>(ov::op::util::VariableInfo{ov::Shape{1}, ov::element::dynamic, "ID"});
ASSERT_TRUE(variable1->get_info() == variable2->get_info()); ASSERT_TRUE(variable1->get_info() == variable2->get_info());
ASSERT_FALSE(variable1->get_info() == variable3->get_info()); ASSERT_FALSE(variable1->get_info() == variable3->get_info());

View File

@ -4,6 +4,6 @@
#include "unary_ops.hpp" #include "unary_ops.hpp"
using Type = ::testing::Types<ngraph::op::Atan>; using Type = ::testing::Types<ov::op::v0::Atan>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_atan, UnaryOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_atan, UnaryOperator, Type);

View File

@ -4,6 +4,6 @@
#include "unary_ops.hpp" #include "unary_ops.hpp"
using Type = ::testing::Types<ngraph::op::Atanh>; using Type = ::testing::Types<ov::op::v3::Atanh>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_atanh, UnaryOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_atanh, UnaryOperator, Type);

View File

@ -4,8 +4,9 @@
#include "ov_ops/augru_cell.hpp" #include "ov_ops/augru_cell.hpp"
#include <gtest/gtest.h>
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h"
#include "openvino/core/attribute_visitor.hpp" #include "openvino/core/attribute_visitor.hpp"
#include "openvino/opsets/opset9.hpp" #include "openvino/opsets/opset9.hpp"
@ -145,42 +146,42 @@ TEST(type_prop, augru_cell_invalid_input_rank) {
// Invalid rank for W tensor. // Invalid rank for W tensor.
auto W = make_shared<opset9::Parameter>(element::f32, PartialShape{}); auto W = make_shared<opset9::Parameter>(element::f32, PartialShape{});
ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size), ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size),
ngraph::NodeValidationFailure) ov::NodeValidationFailure)
<< "AUGRUCell node was created with invalid data."; << "AUGRUCell node was created with invalid data.";
// Invalid rank for X tensor. // Invalid rank for X tensor.
W = make_shared<opset9::Parameter>(element::f32, PartialShape{gates_count * hidden_size, input_size}); W = make_shared<opset9::Parameter>(element::f32, PartialShape{gates_count * hidden_size, input_size});
X = make_shared<opset9::Parameter>(element::f32, PartialShape{}); X = make_shared<opset9::Parameter>(element::f32, PartialShape{});
ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size), ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size),
ngraph::NodeValidationFailure) ov::NodeValidationFailure)
<< "AUGRUCell node was created with invalid data."; << "AUGRUCell node was created with invalid data.";
// Invalid rank for H_t tensor. // Invalid rank for H_t tensor.
X = make_shared<opset9::Parameter>(element::f32, PartialShape{batch_size, input_size}); X = make_shared<opset9::Parameter>(element::f32, PartialShape{batch_size, input_size});
H_t = make_shared<opset9::Parameter>(element::f32, PartialShape{}); H_t = make_shared<opset9::Parameter>(element::f32, PartialShape{});
ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size), ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size),
ngraph::NodeValidationFailure) ov::NodeValidationFailure)
<< "AUGRUCell node was created with invalid data."; << "AUGRUCell node was created with invalid data.";
// Invalid rank for R tensor. // Invalid rank for R tensor.
H_t = make_shared<opset9::Parameter>(element::f32, PartialShape{batch_size, hidden_size}); H_t = make_shared<opset9::Parameter>(element::f32, PartialShape{batch_size, hidden_size});
R = make_shared<opset9::Parameter>(element::f32, PartialShape{}); R = make_shared<opset9::Parameter>(element::f32, PartialShape{});
ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size), ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size),
ngraph::NodeValidationFailure) ov::NodeValidationFailure)
<< "AUGRUCell node was created with invalid data."; << "AUGRUCell node was created with invalid data.";
// Invalid rank for B tensor. // Invalid rank for B tensor.
R = make_shared<opset9::Parameter>(element::f32, PartialShape{gates_count * hidden_size, input_size}); R = make_shared<opset9::Parameter>(element::f32, PartialShape{gates_count * hidden_size, input_size});
B = make_shared<opset9::Parameter>(element::f32, PartialShape{}); B = make_shared<opset9::Parameter>(element::f32, PartialShape{});
ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size), ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size),
ngraph::NodeValidationFailure) ov::NodeValidationFailure)
<< "AUGRUCell node was created with invalid data."; << "AUGRUCell node was created with invalid data.";
// Invalid rank for A tensor. // Invalid rank for A tensor.
B = make_shared<opset9::Parameter>(element::f32, PartialShape{gates_count * hidden_size}); B = make_shared<opset9::Parameter>(element::f32, PartialShape{gates_count * hidden_size});
A = make_shared<opset9::Parameter>(element::f32, PartialShape{}); A = make_shared<opset9::Parameter>(element::f32, PartialShape{});
ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size), ASSERT_THROW(const auto unused = make_shared<op::internal::AUGRUCell>(X, H_t, W, R, B, A, hidden_size),
ngraph::NodeValidationFailure) ov::NodeValidationFailure)
<< "AUGRUCell node was created with invalid data."; << "AUGRUCell node was created with invalid data.";
} }

View File

@ -4,10 +4,12 @@
#include "ov_ops/augru_sequence.hpp" #include "ov_ops/augru_sequence.hpp"
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h"
#include "openvino/core/attribute_visitor.hpp" #include "openvino/core/attribute_visitor.hpp"
#include "openvino/core/except.hpp"
#include "openvino/opsets/opset9.hpp" #include "openvino/opsets/opset9.hpp"
using namespace std; using namespace std;
@ -172,7 +174,7 @@ TEST(type_prop, augru_sequence_invalid_input_dimension) {
for (size_t i = 0; i < augru_sequence->get_input_size(); i++) { for (size_t i = 0; i < augru_sequence->get_input_size(); i++) {
augru_sequence = augru_seq_init(params); augru_sequence = augru_seq_init(params);
augru_sequence->set_argument(i, invalid_rank_tensor); augru_sequence->set_argument(i, invalid_rank_tensor);
ASSERT_THROW(augru_sequence->validate_and_infer_types(), ngraph::CheckFailure) ASSERT_THROW(augru_sequence->validate_and_infer_types(), ov::AssertFailure)
<< "AUGRUSequence node was created with invalid data."; << "AUGRUSequence node was created with invalid data.";
} }
} }

File diff suppressed because it is too large Load Diff

View File

@ -2,37 +2,37 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/batch_norm.hpp"
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
using namespace std; using namespace std;
using namespace ngraph;
struct BatchNormInferInputs { struct BatchNormInferInputs {
element::Type in_et; ov::element::Type in_et;
PartialShape in_shape; ov::PartialShape in_shape;
std::string in_name; std::string in_name;
}; };
struct BatchNormInferParams { struct BatchNormInferParams {
element::Type data_batch_et; ov::element::Type data_batch_et;
PartialShape data_batch_ps; ov::PartialShape data_batch_ps;
std::vector<BatchNormInferInputs> inputs; std::vector<BatchNormInferInputs> inputs;
double epsilon; double epsilon;
}; };
template <class T> template <class T>
std::shared_ptr<Node> makeBatchNormOp(const BatchNormInferParams& p) { std::shared_ptr<ov::Node> makeBatchNormOp(const BatchNormInferParams& p) {
if (p.inputs.size() != 4) { if (p.inputs.size() != 4) {
throw runtime_error("BatchNormInference requires 4 additional inputs for batch" throw runtime_error("BatchNormInference requires 4 additional inputs for batch"
"normalization transformation"); "normalization transformation");
} }
auto data_batch = make_shared<op::Parameter>(p.data_batch_et, p.data_batch_ps); auto data_batch = make_shared<ov::op::v0::Parameter>(p.data_batch_et, p.data_batch_ps);
auto gamma = make_shared<op::Parameter>(p.inputs[0].in_et, p.inputs[0].in_shape); auto gamma = make_shared<ov::op::v0::Parameter>(p.inputs[0].in_et, p.inputs[0].in_shape);
auto beta = make_shared<op::Parameter>(p.inputs[1].in_et, p.inputs[1].in_shape); auto beta = make_shared<ov::op::v0::Parameter>(p.inputs[1].in_et, p.inputs[1].in_shape);
auto mean = make_shared<op::Parameter>(p.inputs[2].in_et, p.inputs[2].in_shape); auto mean = make_shared<ov::op::v0::Parameter>(p.inputs[2].in_et, p.inputs[2].in_shape);
auto variance = make_shared<op::Parameter>(p.inputs[3].in_et, p.inputs[3].in_shape); auto variance = make_shared<ov::op::v0::Parameter>(p.inputs[3].in_et, p.inputs[3].in_shape);
return make_shared<T>(data_batch, gamma, beta, mean, variance, p.epsilon); return make_shared<T>(data_batch, gamma, beta, mean, variance, p.epsilon);
} }
@ -42,13 +42,13 @@ class BatchNormTest : public ::testing::Test {};
TYPED_TEST_SUITE_P(BatchNormTest); TYPED_TEST_SUITE_P(BatchNormTest);
TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_2) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_2) {
PartialShape data_batch_shape{10, 100}; ov::PartialShape data_batch_shape{10, 100};
element::Type inputs_et = element::f32; ov::element::Type inputs_et = ov::element::f32;
std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, PartialShape{100}, "gamma"}, std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, ov::PartialShape{100}, "gamma"},
{inputs_et, PartialShape{100}, "beta"}, {inputs_et, ov::PartialShape{100}, "beta"},
{inputs_et, PartialShape{100}, "mean"}, {inputs_et, ov::PartialShape{100}, "mean"},
{inputs_et, PartialShape{100}, "variance"}}; {inputs_et, ov::PartialShape{100}, "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -62,13 +62,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_2) {
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_4) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_4) {
PartialShape data_batch_shape{1, 10, 224, 224}; ov::PartialShape data_batch_shape{1, 10, 224, 224};
element::Type inputs_et = element::f16; ov::element::Type inputs_et = ov::element::f16;
std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, PartialShape{10}, "gamma"}, std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, ov::PartialShape{10}, "gamma"},
{inputs_et, PartialShape{10}, "beta"}, {inputs_et, ov::PartialShape{10}, "beta"},
{inputs_et, PartialShape{10}, "mean"}, {inputs_et, ov::PartialShape{10}, "mean"},
{inputs_et, PartialShape{10}, "variance"}}; {inputs_et, ov::PartialShape{10}, "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -82,13 +82,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_basic_data_batch_rank_4) {
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_inputs_rank_dynamic) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_inputs_rank_dynamic) {
PartialShape data_batch_shape{PartialShape::dynamic()}; ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()};
element::Type inputs_et = element::f32; ov::element::Type inputs_et = ov::element::f32;
std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, PartialShape::dynamic(), "gamma"}, std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, ov::PartialShape::dynamic(), "gamma"},
{inputs_et, PartialShape::dynamic(), "beta"}, {inputs_et, ov::PartialShape::dynamic(), "beta"},
{inputs_et, PartialShape::dynamic(), "mean"}, {inputs_et, ov::PartialShape::dynamic(), "mean"},
{inputs_et, PartialShape::dynamic(), "variance"}}; {inputs_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -101,13 +101,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_inputs_rank_dynamic) {
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_channel_inputs_rank_dynamic) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_channel_inputs_rank_dynamic) {
PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}; ov::PartialShape data_batch_shape{64, ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()};
element::Type inputs_et = element::f32; ov::element::Type inputs_et = ov::element::f32;
std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, PartialShape::dynamic(), "gamma"}, std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, ov::PartialShape::dynamic(), "gamma"},
{inputs_et, PartialShape::dynamic(), "beta"}, {inputs_et, ov::PartialShape::dynamic(), "beta"},
{inputs_et, PartialShape::dynamic(), "mean"}, {inputs_et, ov::PartialShape::dynamic(), "mean"},
{inputs_et, PartialShape::dynamic(), "variance"}}; {inputs_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -117,17 +117,17 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_channel_
ASSERT_EQ(bn->get_output_size(), 1); ASSERT_EQ(bn->get_output_size(), 1);
ASSERT_EQ(bn->get_output_element_type(0), inputs_et); ASSERT_EQ(bn->get_output_element_type(0), inputs_et);
ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme( ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(
PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); ov::PartialShape{64, ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}));
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_dynamic_some_channel_inputs_rank_static) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_dynamic_some_channel_inputs_rank_static) {
PartialShape data_batch_shape{PartialShape::dynamic()}; ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()};
element::Type input_et = element::f32; ov::element::Type input_et = ov::element::f32;
std::vector<BatchNormInferInputs> inputs = {{input_et, PartialShape{Dimension::dynamic()}, "gamma"}, std::vector<BatchNormInferInputs> inputs = {{input_et, ov::PartialShape{ov::Dimension::dynamic()}, "gamma"},
{input_et, PartialShape::dynamic(), "beta"}, {input_et, ov::PartialShape::dynamic(), "beta"},
{input_et, PartialShape{Dimension::dynamic()}, "mean"}, {input_et, ov::PartialShape{ov::Dimension::dynamic()}, "mean"},
{input_et, PartialShape::dynamic(), "variance"}}; {input_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -140,13 +140,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_dynamic_some_ch
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_some_channel_inputs_rank_static) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_some_channel_inputs_rank_static) {
PartialShape data_batch_shape{64, Dimension::dynamic(), Dimension::dynamic(), 224}; ov::PartialShape data_batch_shape{64, ov::Dimension::dynamic(), ov::Dimension::dynamic(), 224};
element::Type input_et = element::f32; ov::element::Type input_et = ov::element::f32;
std::vector<BatchNormInferInputs> inputs = {{input_et, PartialShape{3}, "gamma"}, std::vector<BatchNormInferInputs> inputs = {{input_et, ov::PartialShape{3}, "gamma"},
{input_et, PartialShape::dynamic(), "beta"}, {input_et, ov::PartialShape::dynamic(), "beta"},
{input_et, PartialShape{3}, "mean"}, {input_et, ov::PartialShape{3}, "mean"},
{input_et, PartialShape{Dimension::dynamic()}, "variance"}}; {input_et, ov::PartialShape{ov::Dimension::dynamic()}, "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -155,22 +155,22 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_data_batch_rank_static_some_cha
ASSERT_EQ(bn->get_output_size(), 1); ASSERT_EQ(bn->get_output_size(), 1);
ASSERT_EQ(bn->get_output_element_type(0), input_et); ASSERT_EQ(bn->get_output_element_type(0), input_et);
ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(PartialShape{64, 3, Dimension::dynamic(), 224})); ASSERT_TRUE(bn->get_output_partial_shape(0).same_scheme(ov::PartialShape{64, 3, ov::Dimension::dynamic(), 224}));
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_inputs_element_types) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_inputs_element_types) {
PartialShape data_batch_shape{10, 100}; ov::PartialShape data_batch_shape{10, 100};
const std::vector<element::Type> inputs_et{element::i32, element::u32, element::boolean}; const std::vector<ov::element::Type> inputs_et{ov::element::i32, ov::element::u32, ov::element::boolean};
double eps = 0.001; double eps = 0.001;
std::vector<BatchNormInferParams> bn_tests; std::vector<BatchNormInferParams> bn_tests;
for (const auto& et : inputs_et) { for (const auto& et : inputs_et) {
std::vector<BatchNormInferInputs> ch_inputs = {{et, PartialShape{100}, "gamma"}, std::vector<BatchNormInferInputs> ch_inputs = {{et, ov::PartialShape{100}, "gamma"},
{et, PartialShape{100}, "beta"}, {et, ov::PartialShape{100}, "beta"},
{et, PartialShape{100}, "mean"}, {et, ov::PartialShape{100}, "mean"},
{et, PartialShape{100}, "variance"}}; {et, ov::PartialShape{100}, "variance"}};
bn_tests.push_back(BatchNormInferParams{et, data_batch_shape, ch_inputs, eps}); bn_tests.push_back(BatchNormInferParams{et, data_batch_shape, ch_inputs, eps});
} }
@ -179,7 +179,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_inputs_element_types) {
try { try {
auto bn = makeBatchNormOp<TypeParam>(params); auto bn = makeBatchNormOp<TypeParam>(params);
FAIL() << "Invalid input element types not detected"; FAIL() << "Invalid input element types not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Input element types must be floating-point"); EXPECT_HAS_SUBSTRING(error.what(), "Input element types must be floating-point");
} catch (...) { } catch (...) {
FAIL() << "Input element types check failed for unexpected reason"; FAIL() << "Input element types check failed for unexpected reason";
@ -189,23 +189,23 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_inputs_element_types) {
TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_inputs_element_types) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_inputs_element_types) {
// Data batch input element type and shape // Data batch input element type and shape
const element::Type data_batch_et = element::f32; const ov::element::Type data_batch_et = ov::element::f32;
const PartialShape data_batch_ps{10, 200}; const ov::PartialShape data_batch_ps{10, 200};
// Invalid combination of element types of gamma/beta/mean/variance inputs // Invalid combination of element types of gamma/beta/mean/variance inputs
vector<BatchNormInferInputs> bn_ch_inputs = {{element::f32, PartialShape{200}, "gamma"}, vector<BatchNormInferInputs> bn_ch_inputs = {{ov::element::f32, ov::PartialShape{200}, "gamma"},
{element::f32, PartialShape{200}, "beta"}, {ov::element::f32, ov::PartialShape{200}, "beta"},
{element::f32, PartialShape{200}, "mean"}, {ov::element::f32, ov::PartialShape{200}, "mean"},
{element::f32, PartialShape{200}, "variance"}}; {ov::element::f32, ov::PartialShape{200}, "variance"}};
const double epsilon = 0.001; const double epsilon = 0.001;
std::vector<BatchNormInferParams> bn_params; std::vector<BatchNormInferParams> bn_params;
bn_params.push_back(BatchNormInferParams{element::f16, data_batch_ps, bn_ch_inputs, epsilon}); bn_params.push_back(BatchNormInferParams{ov::element::f16, data_batch_ps, bn_ch_inputs, epsilon});
for (size_t i = 0; i < bn_ch_inputs.size(); i++) { for (size_t i = 0; i < bn_ch_inputs.size(); i++) {
std::vector<BatchNormInferInputs> inputs = bn_ch_inputs; std::vector<BatchNormInferInputs> inputs = bn_ch_inputs;
(inputs[i]).in_et = element::f16; (inputs[i]).in_et = ov::element::f16;
bn_params.push_back(BatchNormInferParams{data_batch_et, data_batch_ps, inputs, epsilon}); bn_params.push_back(BatchNormInferParams{data_batch_et, data_batch_ps, inputs, epsilon});
} }
@ -214,7 +214,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_inputs_element_typ
try { try {
auto bn = makeBatchNormOp<TypeParam>(bn_p); auto bn = makeBatchNormOp<TypeParam>(bn_p);
FAIL() << "Incompatible input element types not detected"; FAIL() << "Incompatible input element types not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Input element types do not match"); EXPECT_HAS_SUBSTRING(error.what(), "Input element types do not match");
} catch (...) { } catch (...) {
FAIL() << "Input element types check failed for unexpected reason"; FAIL() << "Input element types check failed for unexpected reason";
@ -223,13 +223,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_inputs_element_typ
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_data_batch_input_rank) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_data_batch_input_rank) {
PartialShape data_batch_shape{Dimension::dynamic()}; ov::PartialShape data_batch_shape{ov::Dimension::dynamic()};
element::Type inputs_et = element::f32; ov::element::Type inputs_et = ov::element::f32;
std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, PartialShape::dynamic(), "gamma"}, std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, ov::PartialShape::dynamic(), "gamma"},
{inputs_et, PartialShape::dynamic(), "beta"}, {inputs_et, ov::PartialShape::dynamic(), "beta"},
{inputs_et, PartialShape::dynamic(), "mean"}, {inputs_et, ov::PartialShape::dynamic(), "mean"},
{inputs_et, PartialShape::dynamic(), "variance"}}; {inputs_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -237,7 +237,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_data_batch_input_rank)
try { try {
auto bn = makeBatchNormOp<TypeParam>(params); auto bn = makeBatchNormOp<TypeParam>(params);
FAIL() << "Data batch input with invalid rank 1 not detected"; FAIL() << "Data batch input with invalid rank 1 not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Input argument must have rank of at least 2 (input argument shape: [?])"); EXPECT_HAS_SUBSTRING(error.what(), "Input argument must have rank of at least 2 (input argument shape: [?])");
} catch (...) { } catch (...) {
FAIL() << "Data batch input rank check failed for unexpected reason"; FAIL() << "Data batch input rank check failed for unexpected reason";
@ -245,13 +245,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_data_batch_input_rank)
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_input_ranks) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_input_ranks) {
PartialShape data_batch_shape{PartialShape::dynamic()}; ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()};
element::Type input_et = element::f32; ov::element::Type input_et = ov::element::f32;
std::vector<BatchNormInferInputs> inputs = {{input_et, PartialShape{3, Dimension::dynamic()}, "gamma"}, std::vector<BatchNormInferInputs> inputs = {{input_et, ov::PartialShape{3, ov::Dimension::dynamic()}, "gamma"},
{input_et, PartialShape::dynamic(), "beta"}, {input_et, ov::PartialShape::dynamic(), "beta"},
{input_et, PartialShape{Dimension::dynamic()}, "mean"}, {input_et, ov::PartialShape{ov::Dimension::dynamic()}, "mean"},
{input_et, PartialShape::dynamic(), "variance"}}; {input_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -259,7 +259,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_input_rank
try { try {
auto bn = makeBatchNormOp<TypeParam>(params); auto bn = makeBatchNormOp<TypeParam>(params);
FAIL() << "Incompatible gamma/beta/mean/variance input ranks not detected"; FAIL() << "Incompatible gamma/beta/mean/variance input ranks not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Shapes for gamma/beta/mean/variance do not match"); EXPECT_HAS_SUBSTRING(error.what(), "Shapes for gamma/beta/mean/variance do not match");
} catch (...) { } catch (...) {
FAIL() << "gamma/beta/mean/variance input ranks check failed for unexpected reason"; FAIL() << "gamma/beta/mean/variance input ranks check failed for unexpected reason";
@ -267,13 +267,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_input_rank
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_inputs_channel_count) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_inputs_channel_count) {
PartialShape data_batch_shape{PartialShape::dynamic()}; ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()};
element::Type input_et = element::f32; ov::element::Type input_et = ov::element::f32;
std::vector<BatchNormInferInputs> inputs = {{input_et, PartialShape{3}, "gamma"}, std::vector<BatchNormInferInputs> inputs = {{input_et, ov::PartialShape{3}, "gamma"},
{input_et, PartialShape::dynamic(), "beta"}, {input_et, ov::PartialShape::dynamic(), "beta"},
{input_et, PartialShape{4}, "mean"}, {input_et, ov::PartialShape{4}, "mean"},
{input_et, PartialShape::dynamic(), "variance"}}; {input_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -281,7 +281,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_inputs_cha
try { try {
auto bn = makeBatchNormOp<TypeParam>(params); auto bn = makeBatchNormOp<TypeParam>(params);
FAIL() << "Incompatible gamma/beta/mean/variance inputs channel count not detected"; FAIL() << "Incompatible gamma/beta/mean/variance inputs channel count not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Shapes for gamma/beta/mean/variance do not match"); EXPECT_HAS_SUBSTRING(error.what(), "Shapes for gamma/beta/mean/variance do not match");
} catch (...) { } catch (...) {
FAIL() << "gamma/beta/mean/variance inputs channel count check failed for unexpected reason"; FAIL() << "gamma/beta/mean/variance inputs channel count check failed for unexpected reason";
@ -289,14 +289,14 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_channel_inputs_cha
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_channel_inputs_rank) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_channel_inputs_rank) {
PartialShape data_batch_shape{PartialShape::dynamic()}; ov::PartialShape data_batch_shape{ov::PartialShape::dynamic()};
element::Type input_et = element::f32; ov::element::Type input_et = ov::element::f32;
std::vector<BatchNormInferInputs> inputs = { std::vector<BatchNormInferInputs> inputs = {
{input_et, PartialShape{Dimension::dynamic(), Dimension::dynamic()}, "gamma"}, {input_et, ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, "gamma"},
{input_et, PartialShape::dynamic(), "beta"}, {input_et, ov::PartialShape::dynamic(), "beta"},
{input_et, PartialShape{Dimension::dynamic(), Dimension::dynamic()}, "mean"}, {input_et, ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, "mean"},
{input_et, PartialShape::dynamic(), "variance"}}; {input_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -304,7 +304,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_channel_inputs_rank) {
try { try {
auto bn = makeBatchNormOp<TypeParam>(params); auto bn = makeBatchNormOp<TypeParam>(params);
FAIL() << "Invalid rank of gamma/beta/mean/variance inputs not detected"; FAIL() << "Invalid rank of gamma/beta/mean/variance inputs not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Shape for gamma/beta/mean/variance ([?,?]) does not have rank 1"); EXPECT_HAS_SUBSTRING(error.what(), "Shape for gamma/beta/mean/variance ([?,?]) does not have rank 1");
} catch (...) { } catch (...) {
FAIL() << "gamma/beta/mean/variance inputs rank check failed for unexpected reason"; FAIL() << "gamma/beta/mean/variance inputs rank check failed for unexpected reason";
@ -312,13 +312,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_channel_inputs_rank) {
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_data_batch_and_channel_inputs_channel_count) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_data_batch_and_channel_inputs_channel_count) {
PartialShape data_batch_shape{64, 4, Dimension::dynamic(), 224}; ov::PartialShape data_batch_shape{64, 4, ov::Dimension::dynamic(), 224};
element::Type input_et = element::f32; ov::element::Type input_et = ov::element::f32;
std::vector<BatchNormInferInputs> inputs = {{input_et, PartialShape{3}, "gamma"}, std::vector<BatchNormInferInputs> inputs = {{input_et, ov::PartialShape{3}, "gamma"},
{input_et, PartialShape::dynamic(), "beta"}, {input_et, ov::PartialShape::dynamic(), "beta"},
{input_et, PartialShape{3}, "mean"}, {input_et, ov::PartialShape{3}, "mean"},
{input_et, PartialShape::dynamic(), "variance"}}; {input_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -326,7 +326,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_data_batch_and_cha
try { try {
auto bn = makeBatchNormOp<TypeParam>(params); auto bn = makeBatchNormOp<TypeParam>(params);
FAIL() << "Incompatible data batch and gamma/beta/mean/variance channel count not detected"; FAIL() << "Incompatible data batch and gamma/beta/mean/variance channel count not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
"Input channel dimension (4) does not match " "Input channel dimension (4) does not match "
"shape for gamma/beta/mean/variance ([3])"); "shape for gamma/beta/mean/variance ([3])");
@ -337,13 +337,13 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_incompatible_data_batch_and_cha
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_zero) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_zero) {
PartialShape data_batch_shape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; ov::PartialShape data_batch_shape{ov::Dimension::dynamic(), 0, ov::Dimension::dynamic(), ov::Dimension::dynamic()};
element::Type inputs_et = element::f32; ov::element::Type inputs_et = ov::element::f32;
std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, PartialShape::dynamic(), "gamma"}, std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, ov::PartialShape::dynamic(), "gamma"},
{inputs_et, PartialShape::dynamic(), "beta"}, {inputs_et, ov::PartialShape::dynamic(), "beta"},
{inputs_et, PartialShape::dynamic(), "mean"}, {inputs_et, ov::PartialShape::dynamic(), "mean"},
{inputs_et, PartialShape::dynamic(), "variance"}}; {inputs_et, ov::PartialShape::dynamic(), "variance"}};
double epsilon = 0.001; double epsilon = 0.001;
@ -351,7 +351,7 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_ze
try { try {
auto bn = makeBatchNormOp<TypeParam>(params); auto bn = makeBatchNormOp<TypeParam>(params);
FAIL() << "Data batch channel count zero not detected"; FAIL() << "Data batch channel count zero not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Channel count must be at least 1"); EXPECT_HAS_SUBSTRING(error.what(), "Channel count must be at least 1");
} catch (...) { } catch (...) {
FAIL() << "Data batch channel count check failed for unexpected reason"; FAIL() << "Data batch channel count check failed for unexpected reason";
@ -359,20 +359,20 @@ TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_input_channels_count_ze
} }
TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_epsilon) { TYPED_TEST_P(BatchNormTest, batch_norm_inference_invalid_epsilon) {
PartialShape data_batch_shape{10, 100}; ov::PartialShape data_batch_shape{10, 100};
element::Type inputs_et = element::f32; ov::element::Type inputs_et = ov::element::f32;
std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, PartialShape{100}, "gamma"}, std::vector<BatchNormInferInputs> ch_inputs = {{inputs_et, ov::PartialShape{100}, "gamma"},
{inputs_et, PartialShape{100}, "beta"}, {inputs_et, ov::PartialShape{100}, "beta"},
{inputs_et, PartialShape{100}, "mean"}, {inputs_et, ov::PartialShape{100}, "mean"},
{inputs_et, PartialShape{100}, "variance"}}; {inputs_et, ov::PartialShape{100}, "variance"}};
double eps_neg = -1.0; double eps_neg = -1.0;
const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, eps_neg}; const BatchNormInferParams params{inputs_et, data_batch_shape, ch_inputs, eps_neg};
try { try {
auto bn = makeBatchNormOp<TypeParam>(params); auto bn = makeBatchNormOp<TypeParam>(params);
FAIL() << "Invalid 'epsilon' attribute value not detected"; FAIL() << "Invalid 'epsilon' attribute value not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
"Attribute 'epsilon' must be a floating-point value greater than or equal to zero."); "Attribute 'epsilon' must be a floating-point value greater than or equal to zero.");
} catch (...) { } catch (...) {
@ -397,5 +397,5 @@ REGISTER_TYPED_TEST_SUITE_P(BatchNormTest,
batch_norm_inference_invalid_input_channels_count_zero, batch_norm_inference_invalid_input_channels_count_zero,
batch_norm_inference_invalid_epsilon); batch_norm_inference_invalid_epsilon);
using Types = ::testing::Types<op::v0::BatchNormInference, op::v5::BatchNormInference>; using Types = ::testing::Types<ov::op::v0::BatchNormInference, ov::op::v5::BatchNormInference>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop, BatchNormTest, Types); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop, BatchNormTest, Types);

View File

@ -2,14 +2,18 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/batch_to_space.hpp"
#include <gtest/gtest.h>
#include <array> #include <array>
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h" #include "ngraph/util.hpp"
#include "ngraph/ngraph.hpp" #include "openvino/op/constant.hpp"
#include "openvino/op/space_to_batch.hpp"
using namespace std; using namespace std;
using namespace ngraph;
using namespace testing; using namespace testing;
namespace { namespace {
@ -19,33 +23,34 @@ constexpr size_t crops_begin_input_idx = 2;
constexpr size_t crops_end_input_idx = 3; constexpr size_t crops_end_input_idx = 3;
constexpr size_t batch_to_space_required_inputs = 4; constexpr size_t batch_to_space_required_inputs = 4;
struct InputInfo { struct InputInfo {
element::Type in_et; ov::element::Type in_et;
PartialShape in_pshape; ov::PartialShape in_pshape;
}; };
using BatchToSpaceInputParams = std::array<InputInfo, batch_to_space_required_inputs>; using BatchToSpaceInputParams = std::array<InputInfo, batch_to_space_required_inputs>;
std::shared_ptr<Node> makeBatchToSpaceOp(const BatchToSpaceInputParams& p) { std::shared_ptr<ov::Node> makeBatchToSpaceOp(const BatchToSpaceInputParams& p) {
if (p.size() != batch_to_space_required_inputs) { if (p.size() != batch_to_space_required_inputs) {
throw runtime_error("BatchToSpace requires 4 inputs"); throw runtime_error("BatchToSpace requires 4 inputs");
} }
auto data = make_shared<op::Parameter>(p.at(data_input_idx).in_et, p.at(data_input_idx).in_pshape); auto data = make_shared<ov::op::v0::Parameter>(p.at(data_input_idx).in_et, p.at(data_input_idx).in_pshape);
auto block_shape = auto block_shape =
make_shared<op::Parameter>(p.at(block_shape_input_idx).in_et, p.at(block_shape_input_idx).in_pshape); make_shared<ov::op::v0::Parameter>(p.at(block_shape_input_idx).in_et, p.at(block_shape_input_idx).in_pshape);
auto crops_begin = auto crops_begin =
make_shared<op::Parameter>(p.at(crops_begin_input_idx).in_et, p.at(crops_begin_input_idx).in_pshape); make_shared<ov::op::v0::Parameter>(p.at(crops_begin_input_idx).in_et, p.at(crops_begin_input_idx).in_pshape);
auto crops_end = make_shared<op::Parameter>(p.at(crops_end_input_idx).in_et, p.at(crops_end_input_idx).in_pshape); auto crops_end =
return make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); make_shared<ov::op::v0::Parameter>(p.at(crops_end_input_idx).in_et, p.at(crops_end_input_idx).in_pshape);
return make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
} }
} // namespace } // namespace
TEST(type_prop, batch_to_space_incompatible_input_element_types) { TEST(type_prop, batch_to_space_incompatible_input_element_types) {
element::Type float_et = element::f32; ov::element::Type float_et = ov::element::f32;
element::Type integer64_et = element::i64; ov::element::Type integer64_et = ov::element::i64;
element::Type integer32_et = element::i32; ov::element::Type integer32_et = ov::element::i32;
Shape data_sshape{10, 26}; ov::Shape data_sshape{10, 26};
Shape inputs_sshape{2}; ov::Shape inputs_sshape{2};
vector<BatchToSpaceInputParams> test_cases; vector<BatchToSpaceInputParams> test_cases;
test_cases.push_back(BatchToSpaceInputParams{InputInfo{float_et, data_sshape}, test_cases.push_back(BatchToSpaceInputParams{InputInfo{float_et, data_sshape},
@ -67,7 +72,7 @@ TEST(type_prop, batch_to_space_incompatible_input_element_types) {
try { try {
auto batch_to_space = makeBatchToSpaceOp(test_case); auto batch_to_space = makeBatchToSpaceOp(test_case);
FAIL() << "Incompatible element types for block_shape/crops_begin/crops_end inputs not detected"; FAIL() << "Incompatible element types for block_shape/crops_begin/crops_end inputs not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
"block_shape, crops_begin and crops_end inputs must have same element type."); "block_shape, crops_begin and crops_end inputs must have same element type.");
} catch (...) { } catch (...) {
@ -77,10 +82,10 @@ TEST(type_prop, batch_to_space_incompatible_input_element_types) {
} }
TEST(type_prop, batch_to_space_invalid_input_element_types) { TEST(type_prop, batch_to_space_invalid_input_element_types) {
element::Type float_et = element::f32; ov::element::Type float_et = ov::element::f32;
Shape data_sshape{10, 26}; ov::Shape data_sshape{10, 26};
Shape inputs_sshape{2}; ov::Shape inputs_sshape{2};
const BatchToSpaceInputParams params{InputInfo{float_et, data_sshape}, const BatchToSpaceInputParams params{InputInfo{float_et, data_sshape},
InputInfo{float_et, inputs_sshape}, InputInfo{float_et, inputs_sshape},
@ -90,7 +95,7 @@ TEST(type_prop, batch_to_space_invalid_input_element_types) {
try { try {
auto batch_to_space = makeBatchToSpaceOp(params); auto batch_to_space = makeBatchToSpaceOp(params);
FAIL() << "Invalid non-integer element type for block_shape/crops_begin/crops_end inputs not detected"; FAIL() << "Invalid non-integer element type for block_shape/crops_begin/crops_end inputs not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "block_shape and crops inputs must have integer element type."); EXPECT_HAS_SUBSTRING(error.what(), "block_shape and crops inputs must have integer element type.");
} catch (...) { } catch (...) {
FAIL() << "Element type check for block_shape/crops_begin/crops_end inputs failed for unexpected reason"; FAIL() << "Element type check for block_shape/crops_begin/crops_end inputs failed for unexpected reason";
@ -98,11 +103,11 @@ TEST(type_prop, batch_to_space_invalid_input_element_types) {
} }
TEST(type_prop, batch_to_space_invalid_data_input_rank) { TEST(type_prop, batch_to_space_invalid_data_input_rank) {
Shape data_sshape{4}; ov::Shape data_sshape{4};
element::Type data_et = element::f32; ov::element::Type data_et = ov::element::f32;
Shape inputs_sshape{2}; ov::Shape inputs_sshape{2};
element::Type inputs_et = element::i64; ov::element::Type inputs_et = ov::element::i64;
const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape}, const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape},
InputInfo{inputs_et, inputs_sshape}, InputInfo{inputs_et, inputs_sshape},
@ -112,7 +117,7 @@ TEST(type_prop, batch_to_space_invalid_data_input_rank) {
try { try {
auto batch_to_space = makeBatchToSpaceOp(params); auto batch_to_space = makeBatchToSpaceOp(params);
FAIL() << "Invalid rank of data input not detected"; FAIL() << "Invalid rank of data input not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "data input must have rank greater or equal than 2."); EXPECT_HAS_SUBSTRING(error.what(), "data input must have rank greater or equal than 2.");
} catch (...) { } catch (...) {
FAIL() << "Rank check for data input failed for unexpected reason"; FAIL() << "Rank check for data input failed for unexpected reason";
@ -120,12 +125,12 @@ TEST(type_prop, batch_to_space_invalid_data_input_rank) {
} }
TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) { TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) {
Shape data_sshape{10, 26}; ov::Shape data_sshape{10, 26};
element::Type data_et = element::f32; ov::element::Type data_et = ov::element::f32;
Shape inputs_sshape_1D{2}; ov::Shape inputs_sshape_1D{2};
Shape inputs_sshape_2D{2, 1}; ov::Shape inputs_sshape_2D{2, 1};
element::Type inputs_et = element::i64; ov::element::Type inputs_et = ov::element::i64;
vector<BatchToSpaceInputParams> test_cases; vector<BatchToSpaceInputParams> test_cases;
test_cases.push_back(BatchToSpaceInputParams{InputInfo{data_et, data_sshape}, test_cases.push_back(BatchToSpaceInputParams{InputInfo{data_et, data_sshape},
@ -147,7 +152,7 @@ TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) {
try { try {
auto batch_to_space = makeBatchToSpaceOp(test_case); auto batch_to_space = makeBatchToSpaceOp(test_case);
FAIL() << "Incompatible shapes for block_shape/crops_begin/crops_end inputs not detected"; FAIL() << "Incompatible shapes for block_shape/crops_begin/crops_end inputs not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
"block_shape, crops_begin and crops_end inputs must have the same shape."); "block_shape, crops_begin and crops_end inputs must have the same shape.");
} catch (...) { } catch (...) {
@ -157,11 +162,11 @@ TEST(type_prop, batch_to_space_incompatible_secondary_inputs_shapes) {
} }
TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) { TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) {
Shape data_sshape{10, 26}; ov::Shape data_sshape{10, 26};
element::Type data_et = element::f32; ov::element::Type data_et = ov::element::f32;
Shape inputs_sshape_2D{2, 1}; ov::Shape inputs_sshape_2D{2, 1};
element::Type inputs_et = element::i64; ov::element::Type inputs_et = ov::element::i64;
const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape}, const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape},
InputInfo{inputs_et, inputs_sshape_2D}, InputInfo{inputs_et, inputs_sshape_2D},
@ -171,7 +176,7 @@ TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) {
try { try {
auto batch_to_space = makeBatchToSpaceOp(params); auto batch_to_space = makeBatchToSpaceOp(params);
FAIL() << "Invalid rank for block_shape/crops_begin/crops_end inputs not detected"; FAIL() << "Invalid rank for block_shape/crops_begin/crops_end inputs not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "block_shape and crops inputs must have rank 1."); EXPECT_HAS_SUBSTRING(error.what(), "block_shape and crops inputs must have rank 1.");
} catch (...) { } catch (...) {
FAIL() << "Rank check for block_shape/crops_begin/crops_end inputs failed for unexpected reason"; FAIL() << "Rank check for block_shape/crops_begin/crops_end inputs failed for unexpected reason";
@ -179,11 +184,11 @@ TEST(type_prop, batch_to_space_invalid_secondary_inputs_rank) {
} }
TEST(type_prop, batch_to_space_incompatible_data_and_secondary_inputs_shapes) { TEST(type_prop, batch_to_space_incompatible_data_and_secondary_inputs_shapes) {
Shape data_sshape{10, 26}; ov::Shape data_sshape{10, 26};
element::Type data_et = element::f32; ov::element::Type data_et = ov::element::f32;
Shape inputs_sshape{5}; ov::Shape inputs_sshape{5};
element::Type inputs_et = element::i64; ov::element::Type inputs_et = ov::element::i64;
const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape}, const BatchToSpaceInputParams params{InputInfo{data_et, data_sshape},
InputInfo{inputs_et, inputs_sshape}, InputInfo{inputs_et, inputs_sshape},
@ -193,7 +198,7 @@ TEST(type_prop, batch_to_space_incompatible_data_and_secondary_inputs_shapes) {
try { try {
auto batch_to_space = makeBatchToSpaceOp(params); auto batch_to_space = makeBatchToSpaceOp(params);
FAIL() << "Incompatible shapes for data and block_shape/crops_begin/crops_end inputs not detected"; FAIL() << "Incompatible shapes for data and block_shape/crops_begin/crops_end inputs not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
"block_shape and crop inputs must have same number of elements " "block_shape and crop inputs must have same number of elements "
"as data input rank."); "as data input rank.");
@ -204,21 +209,21 @@ TEST(type_prop, batch_to_space_incompatible_data_and_secondary_inputs_shapes) {
} }
TEST(type_prop, batch_to_space_invalid_block_shape_input) { TEST(type_prop, batch_to_space_invalid_block_shape_input) {
Shape data_sshape{100, 7, 13, 3}; ov::Shape data_sshape{100, 7, 13, 3};
element::Type data_et = element::f32; ov::element::Type data_et = ov::element::f32;
Shape inputs_sshape{4}; ov::Shape inputs_sshape{4};
element::Type inputs_et = element::i64; ov::element::Type inputs_et = ov::element::i64;
auto data = make_shared<op::Parameter>(data_et, data_sshape); auto data = make_shared<ov::op::v0::Parameter>(data_et, data_sshape);
auto block_shape = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 10, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 10, 5, 1});
auto crops_begin = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, 0});
auto crops_end = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 0, 0});
try { try {
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
FAIL() << "Invalid elements of block_shape input not detected"; FAIL() << "Invalid elements of block_shape input not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Elements of block_shape input must be greater or equal to one."); EXPECT_HAS_SUBSTRING(error.what(), "Elements of block_shape input must be greater or equal to one.");
} catch (...) { } catch (...) {
FAIL() << "Greater than zero elements of block_shape input check failed for unexpected reason"; FAIL() << "Greater than zero elements of block_shape input check failed for unexpected reason";
@ -226,20 +231,20 @@ TEST(type_prop, batch_to_space_invalid_block_shape_input) {
} }
TEST(type_prop, batch_to_space_invalid_crops_input_values) { TEST(type_prop, batch_to_space_invalid_crops_input_values) {
Shape data_sshape{100, 7, 13, 3}; ov::Shape data_sshape{100, 7, 13, 3};
element::Type data_et = element::f32; ov::element::Type data_et = ov::element::f32;
Shape inputs_sshape{4}; ov::Shape inputs_sshape{4};
element::Type inputs_et = element::i64; ov::element::Type inputs_et = ov::element::i64;
try { try {
auto data = make_shared<op::Parameter>(data_et, data_sshape); auto data = make_shared<ov::op::v0::Parameter>(data_et, data_sshape);
auto block_shape = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{1, 10, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{1, 10, 5, 1});
auto crops_begin = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, -1}); auto crops_begin = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, -1});
auto crops_end = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
FAIL() << "Invalid crops_begin input values not detected"; FAIL() << "Invalid crops_begin input values not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
"Elements of crops_begin and crops_end inputs must be greater or equal to zero."); "Elements of crops_begin and crops_end inputs must be greater or equal to zero.");
} catch (...) { } catch (...) {
@ -247,13 +252,13 @@ TEST(type_prop, batch_to_space_invalid_crops_input_values) {
} }
try { try {
auto data = make_shared<op::Parameter>(data_et, data_sshape); auto data = make_shared<ov::op::v0::Parameter>(data_et, data_sshape);
auto block_shape = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{1, 10, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{1, 10, 5, 1});
auto crops_begin = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, 0});
auto crops_end = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, -1, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, -1, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
FAIL() << "Invalid crops_end input values not detected"; FAIL() << "Invalid crops_end input values not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
"Elements of crops_begin and crops_end inputs must be greater or equal to zero."); "Elements of crops_begin and crops_end inputs must be greater or equal to zero.");
} catch (...) { } catch (...) {
@ -262,19 +267,19 @@ TEST(type_prop, batch_to_space_invalid_crops_input_values) {
} }
TEST(type_prop, batch_to_space_incompatible_block_shape_input_values_with_data_shape) { TEST(type_prop, batch_to_space_incompatible_block_shape_input_values_with_data_shape) {
Shape data_sshape{80, 7, 13, 3}; ov::Shape data_sshape{80, 7, 13, 3};
element::Type data_et = element::f32; ov::element::Type data_et = ov::element::f32;
Shape inputs_sshape{4}; ov::Shape inputs_sshape{4};
element::Type inputs_et = element::i64; ov::element::Type inputs_et = ov::element::i64;
auto data = make_shared<op::Parameter>(data_et, data_sshape); auto data = make_shared<ov::op::v0::Parameter>(data_et, data_sshape);
auto block_shape = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{1, 10, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{1, 10, 5, 1});
auto crops_begin = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, 0});
auto crops_end = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 0, 0});
try { try {
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
FAIL() << "Incompatible data shape and block_shape input values not detected"; FAIL() << "Incompatible data shape and block_shape input values not detected";
} catch (const ov::Exception& error) { } catch (const ov::Exception& error) {
EXPECT_HAS_SUBSTRING(error.what(), "[ 80, 80] must be a multiple of divisor: 50"); EXPECT_HAS_SUBSTRING(error.what(), "[ 80, 80] must be a multiple of divisor: 50");
@ -284,21 +289,21 @@ TEST(type_prop, batch_to_space_incompatible_block_shape_input_values_with_data_s
} }
TEST(type_prop, batch_to_space_invalid_crops_out_of_bounds) { TEST(type_prop, batch_to_space_invalid_crops_out_of_bounds) {
Shape data_sshape{32, 4, 1, 3}; ov::Shape data_sshape{32, 4, 1, 3};
element::Type data_et = element::f32; ov::element::Type data_et = ov::element::f32;
Shape inputs_sshape{4}; ov::Shape inputs_sshape{4};
element::Type inputs_et = element::i64; ov::element::Type inputs_et = ov::element::i64;
auto data = make_shared<op::Parameter>(data_et, data_sshape); auto data = make_shared<ov::op::v0::Parameter>(data_et, data_sshape);
auto block_shape = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{1, 2, 2, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{1, 2, 2, 1});
auto crops_begin = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, 2}); auto crops_begin = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 1, 2});
auto crops_end = make_shared<op::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 0, 2}); auto crops_end = make_shared<ov::op::v0::Constant>(inputs_et, inputs_sshape, vector<int64_t>{0, 3, 0, 2});
try { try {
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
FAIL() << "Invalid out of bound crops values not detected"; FAIL() << "Invalid out of bound crops values not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
"crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]"); "crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]");
} catch (...) { } catch (...) {
@ -307,172 +312,180 @@ TEST(type_prop, batch_to_space_invalid_crops_out_of_bounds) {
} }
TEST(type_prop, batch_to_space_output_shape_2D) { TEST(type_prop, batch_to_space_output_shape_2D) {
auto data = make_shared<op::Parameter>(element::f32, Shape{10, 26}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{10, 26});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 5}); auto block_shape = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{2}, vector<int64_t>{1, 5});
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 2}); auto crops_begin = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{2}, vector<int64_t>{0, 2});
auto crops_end = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{2}, vector<int64_t>{0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32); ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32);
ASSERT_EQ(batch_to_space->get_shape(), (Shape{10 / 5, 26 * 5 - 2})); ASSERT_EQ(batch_to_space->get_shape(), (ov::Shape{10 / 5, 26 * 5 - 2}));
} }
TEST(type_prop, batch_to_space_output_shape_4D) { TEST(type_prop, batch_to_space_output_shape_4D) {
auto data = make_shared<op::Parameter>(element::f32, Shape{100, 7, 13, 3}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{100, 7, 13, 3});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 10, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{1, 10, 5, 1});
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 1, 0});
auto crops_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32); ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32);
ASSERT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); ASSERT_EQ(batch_to_space->get_shape(), (ov::Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3}));
} }
TEST(type_prop, batch_to_space_output_shape_5D) { TEST(type_prop, batch_to_space_output_shape_5D) {
auto data = make_shared<op::Parameter>(element::f32, Shape{960, 6, 13, 128, 16}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{960, 6, 13, 128, 16});
auto block_shape = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{1, 6, 5, 1, 16}); auto block_shape =
auto crops_begin = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 0, 0, 0}); make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{1, 6, 5, 1, 16});
auto crops_end = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 1, 0, 0}); auto crops_begin =
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{0, 2, 0, 0, 0});
auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{0, 2, 1, 0, 0});
auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32); ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32);
ASSERT_EQ(batch_to_space->get_shape(), (Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16})); ASSERT_EQ(batch_to_space->get_shape(), (ov::Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16}));
} }
TEST(type_prop, batch_to_space_output_dynamic_shape_5D_when_batch_is_static) { TEST(type_prop, batch_to_space_output_dynamic_shape_5D_when_batch_is_static) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape{960, {2, 20}, {12, 14}, {100, 150}, {10, 20}}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32,
auto block_shape = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{1, 6, 5, 1, 16}); ov::PartialShape{960, {2, 20}, {12, 14}, {100, 150}, {10, 20}});
auto crops_begin = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 0, 0, 0}); auto block_shape =
auto crops_end = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 1, 0, 0}); make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{1, 6, 5, 1, 16});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto crops_begin =
make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{0, 2, 0, 0, 0});
auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{0, 2, 1, 0, 0});
auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_output_partial_shape(0), ASSERT_EQ(batch_to_space->get_output_partial_shape(0),
(PartialShape{960 / (6 * 5 * 16), (ov::PartialShape{960 / (6 * 5 * 16),
{2 * 6 - 2 - 2, 20 * 6 - 2 - 2}, {2 * 6 - 2 - 2, 20 * 6 - 2 - 2},
{12 * 5 - 1, 14 * 5 - 1}, {12 * 5 - 1, 14 * 5 - 1},
{100, 150}, {100, 150},
{10 * 16, 20 * 16}})); {10 * 16, 20 * 16}}));
} }
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
TEST(type_prop, batch_to_space_output_dynamic_shape_5D_when_batch_is_dynamic) { TEST(type_prop, batch_to_space_output_dynamic_shape_5D_when_batch_is_dynamic) {
auto data_shape = PartialShape{{959, 962}, {2, 34}, {9, 21}, {100, 162}, {1, 1999}}; auto data_shape = ov::PartialShape{{959, 962}, {2, 34}, {9, 21}, {100, 162}, {1, 1999}};
set_shape_labels(data_shape, 10); set_shape_labels(data_shape, 10);
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, data_shape);
auto block_shape = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{1, 6, 5, 1, 16}); auto block_shape =
auto crops_begin = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 0, 0, 0}); make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{1, 6, 5, 1, 16});
auto crops_end = make_shared<op::Constant>(element::i32, Shape{5}, vector<int64_t>{0, 2, 1, 0, 0}); auto crops_begin =
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{0, 2, 0, 0, 0});
auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{5}, vector<int64_t>{0, 2, 1, 0, 0});
auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
EXPECT_EQ(batch_to_space->get_output_partial_shape(0), EXPECT_EQ(batch_to_space->get_output_partial_shape(0),
(PartialShape{{ceil_div(959, (6 * 5 * 16)), 962 / (6 * 5 * 16)}, (ov::PartialShape{{ngraph::ceil_div(959, (6 * 5 * 16)), 962 / (6 * 5 * 16)},
{2 * 6 - 2 - 2, 34 * 6 - 2 - 2}, {2 * 6 - 2 - 2, 34 * 6 - 2 - 2},
{9 * 5 - 1, 21 * 5 - 1}, {9 * 5 - 1, 21 * 5 - 1},
{100, 162}, {100, 162},
{1 * 16, 1999 * 16}})); {1 * 16, 1999 * 16}}));
EXPECT_THAT(get_shape_labels(batch_to_space->get_output_partial_shape(0)), EXPECT_THAT(get_shape_labels(batch_to_space->get_output_partial_shape(0)),
ElementsAre(ov::no_label, ov::no_label, ov::no_label, 13, ov::no_label)); ElementsAre(ov::no_label, ov::no_label, ov::no_label, 13, ov::no_label));
} }
TEST(type_prop, batch_to_space_input_interval_shape_block_one) { TEST(type_prop, batch_to_space_input_interval_shape_block_one) {
auto data_shape = PartialShape{{959, 962}, {2, 34}, {9, 21}}; auto data_shape = ov::PartialShape{{959, 962}, {2, 34}, {9, 21}};
set_shape_labels(data_shape, 10); set_shape_labels(data_shape, 10);
auto data = make_shared<op::Parameter>(element::f32, data_shape); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, data_shape);
auto block_shape = make_shared<op::Constant>(element::i32, Shape{3}, vector<int64_t>{1, 1, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{3}, vector<int64_t>{1, 1, 1});
auto crops_begin = make_shared<op::Constant>(element::i32, Shape{3}, vector<int64_t>{0, 0, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{3}, vector<int64_t>{0, 0, 0});
auto crops_end = make_shared<op::Constant>(element::i32, Shape{3}, vector<int64_t>{0, 0, 1}); auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{3}, vector<int64_t>{0, 0, 1});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
EXPECT_EQ(batch_to_space->get_output_partial_shape(0), EXPECT_EQ(batch_to_space->get_output_partial_shape(0),
PartialShape({{959, 962}, {2, 34}, {9 * 1 - 1, 21 * 1 - 1}})); ov::PartialShape({{959, 962}, {2, 34}, {9 * 1 - 1, 21 * 1 - 1}}));
EXPECT_THAT(get_shape_labels(batch_to_space->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label)); EXPECT_THAT(get_shape_labels(batch_to_space->get_output_partial_shape(0)), ElementsAre(10, 11, ov::no_label));
} }
TEST(type_prop, batch_to_space_and_space_to_batch) { TEST(type_prop, batch_to_space_and_space_to_batch) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape{4800, 9, {11, -1}, 2}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{4800, 9, {11, -1}, 2});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 12, 100, 2}); auto block_shape =
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 38, 1}); make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{1, 12, 100, 2});
auto crops_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 5, 38, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 38, 1});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 5, 38, 0});
auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32); ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32);
ASSERT_EQ(batch_to_space->get_output_partial_shape(0), ASSERT_EQ(batch_to_space->get_output_partial_shape(0),
(PartialShape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, {11 * 100 - 38 - 38, -1}, 2 * 2 - 1})); (ov::PartialShape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, {11 * 100 - 38 - 38, -1}, 2 * 2 - 1}));
auto space_to_batch = make_shared<op::v1::SpaceToBatch>(batch_to_space, block_shape, crops_begin, crops_end); auto space_to_batch = make_shared<ov::op::v1::SpaceToBatch>(batch_to_space, block_shape, crops_begin, crops_end);
ASSERT_EQ(space_to_batch->get_element_type(), element::f32); ASSERT_EQ(space_to_batch->get_element_type(), ov::element::f32);
ASSERT_EQ(space_to_batch->get_output_partial_shape(0), (PartialShape{4800, 9, {11, -1}, 2})); ASSERT_EQ(space_to_batch->get_output_partial_shape(0), (ov::PartialShape{4800, 9, {11, -1}, 2}));
} }
TEST(type_prop, batch_to_space_dynamic_shape_static_rank) { TEST(type_prop, batch_to_space_dynamic_shape_static_rank) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic(4)); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic(4));
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 10, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{1, 10, 5, 1});
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 1, 0});
auto crops_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32); ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32);
ASSERT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic(4)); ASSERT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape::dynamic(4));
} }
TEST(type_prop, batch_to_space_dynamic_shape_dynamic_rank) { TEST(type_prop, batch_to_space_dynamic_shape_dynamic_rank) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape::dynamic()); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 10, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{1, 10, 5, 1});
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 1, 0});
auto crops_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
ASSERT_EQ(batch_to_space->get_element_type(), element::f32); ASSERT_EQ(batch_to_space->get_element_type(), ov::element::f32);
ASSERT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic()); ASSERT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape::dynamic());
} }
TEST(type_prop, batch_to_space_default_ctor) { TEST(type_prop, batch_to_space_default_ctor) {
auto data = make_shared<op::Parameter>(element::i16, Shape{100, 7, 13, 3}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::i16, ov::Shape{100, 7, 13, 3});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 10, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{1, 10, 5, 1});
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 1, 0});
auto crops_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>();
batch_to_space->set_arguments(OutputVector{data, block_shape, crops_begin, crops_end}); batch_to_space->set_arguments(ov::OutputVector{data, block_shape, crops_begin, crops_end});
batch_to_space->validate_and_infer_types(); batch_to_space->validate_and_infer_types();
EXPECT_EQ(batch_to_space->get_input_size(), 4); EXPECT_EQ(batch_to_space->get_input_size(), 4);
EXPECT_EQ(batch_to_space->get_output_size(), 1); EXPECT_EQ(batch_to_space->get_output_size(), 1);
EXPECT_EQ(batch_to_space->get_element_type(), element::i16); EXPECT_EQ(batch_to_space->get_element_type(), ov::element::i16);
EXPECT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); EXPECT_EQ(batch_to_space->get_shape(), (ov::Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3}));
} }
TEST(type_prop, batch_to_space_non_const_inputs) { TEST(type_prop, batch_to_space_non_const_inputs) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{100, 7, 13, 3});
auto block_shape = make_shared<op::Parameter>(element::i64, PartialShape{4}); auto block_shape = make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::PartialShape{4});
auto crops_begin = make_shared<op::Parameter>(element::i64, PartialShape{4}); auto crops_begin = make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::PartialShape{4});
auto crops_end = make_shared<op::Parameter>(element::i64, PartialShape{4}); auto crops_end = make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::PartialShape{4});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
EXPECT_EQ(batch_to_space->get_element_type(), element::f32); EXPECT_EQ(batch_to_space->get_element_type(), ov::element::f32);
EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic(4)); EXPECT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape::dynamic(4));
} }
TEST(type_prop, batch_to_space_block_non_constant_only) { TEST(type_prop, batch_to_space_block_non_constant_only) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{100, 7, 13, 3});
auto block_shape = make_shared<op::Parameter>(element::i64, PartialShape{4}); auto block_shape = make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::PartialShape{4});
auto crops_begin = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 1, 0}); auto crops_begin = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 1, 0});
auto crops_end = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{0, 3, 0, 0}); auto crops_end = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{0, 3, 0, 0});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
EXPECT_EQ(batch_to_space->get_element_type(), element::f32); EXPECT_EQ(batch_to_space->get_element_type(), ov::element::f32);
EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({-1, {1, -1}, {12, -1}, {3, -1}})); EXPECT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape({-1, {1, -1}, {12, -1}, {3, -1}}));
} }
TEST(type_prop, batch_to_space_crops_non_constant_only) { TEST(type_prop, batch_to_space_crops_non_constant_only) {
auto data = make_shared<op::Parameter>(element::f32, PartialShape{100, 7, 13, 3}); auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{100, 7, 13, 3});
auto block_shape = make_shared<op::Constant>(element::i64, Shape{4}, vector<int64_t>{1, 2, 5, 1}); auto block_shape = make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{4}, vector<int64_t>{1, 2, 5, 1});
auto crops_begin = make_shared<op::Parameter>(element::i64, PartialShape{4}); auto crops_begin = make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::PartialShape{4});
auto crops_end = make_shared<op::Parameter>(element::i64, PartialShape{4}); auto crops_end = make_shared<ov::op::v0::Parameter>(ov::element::i64, ov::PartialShape{4});
auto batch_to_space = make_shared<op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end); auto batch_to_space = make_shared<ov::op::v1::BatchToSpace>(data, block_shape, crops_begin, crops_end);
EXPECT_EQ(batch_to_space->get_element_type(), element::f32); EXPECT_EQ(batch_to_space->get_element_type(), ov::element::f32);
EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({10, -1, -1, -1})); EXPECT_EQ(batch_to_space->get_output_partial_shape(0), ov::PartialShape({10, -1, -1, -1}));
} }

View File

@ -2,219 +2,224 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/binary_convolution.hpp"
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h" #include "openvino/core/coordinate_diff.hpp"
#include "ngraph/ngraph.hpp"
using namespace std; using namespace std;
using namespace ngraph;
using namespace testing; using namespace testing;
TEST(type_prop, bin_convolution_auto_padding_same) { TEST(type_prop, bin_convolution_auto_padding_same) {
PartialShape data_batch_shape{1, 1, 5, 5}; ov::PartialShape data_batch_shape{1, 1, 5, 5};
PartialShape filters_shape{1, 1, 3, 3}; ov::PartialShape filters_shape{1, 1, 3, 3};
set_shape_labels(data_batch_shape, 10); set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20); set_shape_labels(filters_shape, 20);
Strides strides{1, 1}; ov::Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0}; ov::CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0}; ov::CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1}; ov::Strides dilations{1, 1};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f; const float pad_value = 1.0f;
const auto auto_pad = op::PadType::SAME_LOWER; const auto auto_pad = ov::op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape); auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::u1, filters_shape); auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, filters_shape);
auto conv = make_shared<op::v1::BinaryConvolution>(data_batch, auto conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
strides, strides,
pads_begin, pads_begin,
pads_end, pads_end,
dilations, dilations,
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label)); EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{1, 1, 5, 5})); EXPECT_EQ(conv->get_output_partial_shape(0), (ov::PartialShape{1, 1, 5, 5}));
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1})); EXPECT_EQ(conv->get_pads_begin(), (ov::CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1})); EXPECT_EQ(conv->get_pads_end(), (ov::CoordinateDiff{1, 1}));
} }
TEST(type_prop, bin_convolution_auto_padding_same_lower_spatial_dims_static) { TEST(type_prop, bin_convolution_auto_padding_same_lower_spatial_dims_static) {
PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5}; ov::PartialShape data_batch_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5, 5};
PartialShape filters_shape{Dimension::dynamic(), Dimension::dynamic(), 3, 3}; ov::PartialShape filters_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 3, 3};
set_shape_labels(data_batch_shape, 10); set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20); set_shape_labels(filters_shape, 20);
Strides strides{1, 1}; ov::Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0}; ov::CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0}; ov::CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1}; ov::Strides dilations{1, 1};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f; const float pad_value = 1.0f;
const auto auto_pad = op::PadType::SAME_LOWER; const auto auto_pad = ov::op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape); auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::u1, filters_shape); auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, filters_shape);
auto conv = make_shared<op::v1::BinaryConvolution>(data_batch, auto conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
strides, strides,
pads_begin, pads_begin,
pads_end, pads_end,
dilations, dilations,
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label)); EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), Dimension::dynamic(), 5, 5})); EXPECT_EQ(conv->get_output_partial_shape(0),
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1})); (ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5, 5}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1})); EXPECT_EQ(conv->get_pads_begin(), (ov::CoordinateDiff{1, 1}));
EXPECT_EQ(conv->get_pads_end(), (ov::CoordinateDiff{1, 1}));
} }
TEST(type_prop, bin_convolution_auto_padding_same_upper_spatial_dims_static) { TEST(type_prop, bin_convolution_auto_padding_same_upper_spatial_dims_static) {
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5}; const ov::PartialShape data_batch_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 2}; const ov::PartialShape filters_shape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 2, 2};
Strides strides{1, 1}; ov::Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0}; ov::CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0}; ov::CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1}; ov::Strides dilations{1, 1};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f; const float pad_value = 1.0f;
const auto auto_pad = op::PadType::SAME_UPPER; const auto auto_pad = ov::op::PadType::SAME_UPPER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape); auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::u1, filters_shape); auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, filters_shape);
auto conv = make_shared<op::v1::BinaryConvolution>(data_batch, auto conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
strides, strides,
pads_begin, pads_begin,
pads_end, pads_end,
dilations, dilations,
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{Dimension::dynamic(), Dimension::dynamic(), 5, 5})); EXPECT_EQ(conv->get_output_partial_shape(0),
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0})); (ov::PartialShape{ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5, 5}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1})); EXPECT_EQ(conv->get_pads_begin(), (ov::CoordinateDiff{0, 0}));
EXPECT_EQ(conv->get_pads_end(), (ov::CoordinateDiff{1, 1}));
} }
TEST(type_prop, bin_convolution_auto_padding_same_data_batch_spatial_dims_dynamic) { TEST(type_prop, bin_convolution_auto_padding_same_data_batch_spatial_dims_dynamic) {
PartialShape data_batch_shape{1, 1, Dimension::dynamic(), 5}; ov::PartialShape data_batch_shape{1, 1, ov::Dimension::dynamic(), 5};
PartialShape filters_shape{Dimension::dynamic(), 1, 3, 3}; ov::PartialShape filters_shape{ov::Dimension::dynamic(), 1, 3, 3};
set_shape_labels(data_batch_shape, 10); set_shape_labels(data_batch_shape, 10);
set_shape_labels(filters_shape, 20); set_shape_labels(filters_shape, 20);
Strides strides{1, 1}; ov::Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0}; ov::CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0}; ov::CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1}; ov::Strides dilations{1, 1};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f; const float pad_value = 1.0f;
const auto auto_pad = op::PadType::SAME_LOWER; const auto auto_pad = ov::op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape); auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::u1, filters_shape); auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, filters_shape);
auto conv = make_shared<op::v1::BinaryConvolution>(data_batch, auto conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
strides, strides,
pads_begin, pads_begin,
pads_end, pads_end,
dilations, dilations,
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label)); EXPECT_THAT(get_shape_labels(conv->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(conv->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 5})); EXPECT_EQ(conv->get_output_partial_shape(0),
EXPECT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 1})); (ov::PartialShape{1, ov::Dimension::dynamic(), ov::Dimension::dynamic(), 5}));
EXPECT_EQ(conv->get_pads_end(), (CoordinateDiff{0, 1})); EXPECT_EQ(conv->get_pads_begin(), (ov::CoordinateDiff{0, 1}));
EXPECT_EQ(conv->get_pads_end(), (ov::CoordinateDiff{0, 1}));
} }
TEST(type_prop, bin_convolution_dyn_data_batch) { TEST(type_prop, bin_convolution_dyn_data_batch) {
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f; const float pad_value = 1.0f;
const auto auto_pad = op::PadType::EXPLICIT; const auto auto_pad = ov::op::PadType::EXPLICIT;
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape::dynamic()); const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3}); const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch, const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{},
mode,
pad_value,
auto_pad);
EXPECT_EQ(bin_conv->get_output_partial_shape(0), (PartialShape{-1, 1, {1, -1}, {1, -1}}));
}
TEST(type_prop, bin_convolution_dyn_filters) {
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = op::PadType::EXPLICIT;
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape::dynamic());
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{},
mode,
pad_value,
auto_pad);
EXPECT_EQ(bin_conv->get_output_partial_shape(0), (PartialShape{1, -1, {1, 5}, {1, 5}}));
}
TEST(type_prop, bin_convolution_dyn_data_batch_and_filters) {
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = op::PadType::EXPLICIT;
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape::dynamic());
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch,
filters,
Strides{},
CoordinateDiff{},
CoordinateDiff{},
Strides{},
mode,
pad_value,
auto_pad);
EXPECT_EQ(bin_conv->get_output_partial_shape(0), PartialShape::dynamic());
}
TEST(type_prop, bin_convolution_invalid_inputs_et) {
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = op::PadType::EXPLICIT;
try {
const auto data_batch = make_shared<op::Parameter>(element::boolean, PartialShape{1, 1, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch,
filters, filters,
Strides{}, ov::Strides{},
CoordinateDiff{}, ov::CoordinateDiff{},
CoordinateDiff{}, ov::CoordinateDiff{},
Strides{}, ov::Strides{},
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
EXPECT_EQ(bin_conv->get_output_partial_shape(0), (ov::PartialShape{-1, 1, {1, -1}, {1, -1}}));
}
TEST(type_prop, bin_convolution_dyn_filters) {
const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = ov::op::PadType::EXPLICIT;
const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 1, 5, 5});
const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape::dynamic());
const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters,
ov::Strides{},
ov::CoordinateDiff{},
ov::CoordinateDiff{},
ov::Strides{},
mode,
pad_value,
auto_pad);
EXPECT_EQ(bin_conv->get_output_partial_shape(0), (ov::PartialShape{1, -1, {1, 5}, {1, 5}}));
}
TEST(type_prop, bin_convolution_dyn_data_batch_and_filters) {
const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = ov::op::PadType::EXPLICIT;
const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape::dynamic());
const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape::dynamic());
const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters,
ov::Strides{},
ov::CoordinateDiff{},
ov::CoordinateDiff{},
ov::Strides{},
mode,
pad_value,
auto_pad);
EXPECT_EQ(bin_conv->get_output_partial_shape(0), ov::PartialShape::dynamic());
}
TEST(type_prop, bin_convolution_invalid_inputs_et) {
const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = ov::op::PadType::EXPLICIT;
try {
const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::boolean, ov::PartialShape{1, 1, 5, 5});
const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters,
ov::Strides{},
ov::CoordinateDiff{},
ov::CoordinateDiff{},
ov::Strides{},
mode,
pad_value,
auto_pad);
// data batch element type must be float point // data batch element type must be float point
FAIL() << "Incompatible element type of data batch input not detected"; FAIL() << "Incompatible element type of data batch input not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Data batch element type must be numeric"); EXPECT_HAS_SUBSTRING(error.what(), "Data batch element type must be numeric");
} catch (...) { } catch (...) {
FAIL() << "Data batch element type validation check failed for unexpected reason"; FAIL() << "Data batch element type validation check failed for unexpected reason";
@ -224,25 +229,25 @@ TEST(type_prop, bin_convolution_invalid_inputs_et) {
} }
TEST(type_prop, bin_convolution_incompatible_input_channels) { TEST(type_prop, bin_convolution_incompatible_input_channels) {
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f; const float pad_value = 1.0f;
const auto auto_pad = op::PadType::EXPLICIT; const auto auto_pad = ov::op::PadType::EXPLICIT;
auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5}); auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 1, 5, 5});
auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 2, 3, 3}); auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape{1, 2, 3, 3});
try { try {
auto conv = make_shared<op::v1::BinaryConvolution>(data_batch, auto conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
Strides{}, ov::Strides{},
CoordinateDiff{}, ov::CoordinateDiff{},
CoordinateDiff{}, ov::CoordinateDiff{},
Strides{}, ov::Strides{},
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
FAIL() << "Incompatible input channel dimension in data batch and filters not detected"; FAIL() << "Incompatible input channel dimension in data batch and filters not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch channel count")); EXPECT_HAS_SUBSTRING(error.what(), std::string("Data batch channel count"));
} catch (...) { } catch (...) {
FAIL() << "Data batch and filters input channel count validation check failed for " FAIL() << "Data batch and filters input channel count validation check failed for "
@ -251,27 +256,27 @@ TEST(type_prop, bin_convolution_incompatible_input_channels) {
} }
TEST(type_prop, bin_convolution_invalid_input_ranks) { TEST(type_prop, bin_convolution_invalid_input_ranks) {
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f; const float pad_value = 1.0f;
const auto auto_pad = op::PadType::EXPLICIT; const auto auto_pad = ov::op::PadType::EXPLICIT;
// data partial shape provided is rank 4 (Conv2D) // data partial shape provided is rank 4 (Conv2D)
// filter partial shape provided is rank 5 (Conv3D) // filter partial shape provided is rank 5 (Conv3D)
try { try {
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5}); const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 1, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3, 3}); const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape{1, 1, 3, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch, const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
Strides{}, ov::Strides{},
CoordinateDiff{}, ov::CoordinateDiff{},
CoordinateDiff{}, ov::CoordinateDiff{},
Strides{}, ov::Strides{},
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
// data batch and filters have incompatible ranks // data batch and filters have incompatible ranks
FAIL() << "Incompatible input ranks not detected"; FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match"); EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match");
} catch (...) { } catch (...) {
FAIL() << "Rank validation check of inputs failed for unexpected reason"; FAIL() << "Rank validation check of inputs failed for unexpected reason";
@ -280,20 +285,20 @@ TEST(type_prop, bin_convolution_invalid_input_ranks) {
// data partial shape provided is rank 5 (Conv3D) // data partial shape provided is rank 5 (Conv3D)
// filter partial shape provided is rank 4 (Conv2D) // filter partial shape provided is rank 4 (Conv2D)
try { try {
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5, 5}); const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 1, 5, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3}); const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch, const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
Strides{}, ov::Strides{},
CoordinateDiff{}, ov::CoordinateDiff{},
CoordinateDiff{}, ov::CoordinateDiff{},
Strides{}, ov::Strides{},
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
// data batch and filters have incompatible ranks // data batch and filters have incompatible ranks
FAIL() << "Incompatible input ranks not detected"; FAIL() << "Incompatible input ranks not detected";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match"); EXPECT_HAS_SUBSTRING(error.what(), "Data batch and filters rank do not match");
} catch (...) { } catch (...) {
FAIL() << "Rank validation check of inputs failed for unexpected reason"; FAIL() << "Rank validation check of inputs failed for unexpected reason";
@ -301,34 +306,34 @@ TEST(type_prop, bin_convolution_invalid_input_ranks) {
} }
TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) { TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) {
Strides strides_1d{1}; ov::Strides strides_1d{1};
Strides strides_3d{1, 1, 1}; ov::Strides strides_3d{1, 1, 1};
Strides dilations_2d{1, 1}; ov::Strides dilations_2d{1, 1};
Strides dilations_3d{1, 1, 1}; ov::Strides dilations_3d{1, 1, 1};
CoordinateDiff pads_end_2d{0, 0}; ov::CoordinateDiff pads_end_2d{0, 0};
CoordinateDiff pads_begin_3d{0, 0, 0}; ov::CoordinateDiff pads_begin_3d{0, 0, 0};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; const auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f; const float pad_value = 1.0f;
const auto auto_pad = op::PadType::EXPLICIT; const auto auto_pad = ov::op::PadType::EXPLICIT;
try { try {
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5}); const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 1, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3}); const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch, const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
strides_3d, strides_3d,
CoordinateDiff{}, ov::CoordinateDiff{},
CoordinateDiff{}, ov::CoordinateDiff{},
dilations_2d, dilations_2d,
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
// Strides have incompatible number of spatial dimensions // ov::Strides have incompatible number of spatial dimensions
FAIL() << "Incompatible stride number of spatial dimensions not detected."; FAIL() << "Incompatible stride number of spatial dimensions not detected.";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
std::string("Strides should be defined for all and only spatial dimensions.")); std::string("Strides should be defined for all and only spatial dimensions."));
} catch (...) { } catch (...) {
@ -336,20 +341,20 @@ TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) {
} }
try { try {
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5}); const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 1, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3}); const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch, const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
Strides{1, 1}, ov::Strides{1, 1},
CoordinateDiff{}, ov::CoordinateDiff{},
CoordinateDiff{}, ov::CoordinateDiff{},
dilations_3d, dilations_3d,
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
// Dilations have incompatible number of spatial dimensions // Dilations have incompatible number of spatial dimensions
FAIL() << "Incompatible dilations number of spatial dimensions not detected."; FAIL() << "Incompatible dilations number of spatial dimensions not detected.";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
std::string("Dilations should be defined for all and only spatial dimensions.")); std::string("Dilations should be defined for all and only spatial dimensions."));
} catch (...) { } catch (...) {
@ -357,20 +362,20 @@ TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) {
} }
try { try {
const auto data_batch = make_shared<op::Parameter>(element::f32, PartialShape{1, 1, 5, 5}); const auto data_batch = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 1, 5, 5});
const auto filters = make_shared<op::Parameter>(element::u1, PartialShape{1, 1, 3, 3}); const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::u1, ov::PartialShape{1, 1, 3, 3});
const auto bin_conv = make_shared<op::v1::BinaryConvolution>(data_batch, const auto bin_conv = make_shared<ov::op::v1::BinaryConvolution>(data_batch,
filters, filters,
Strides{1, 1}, ov::Strides{1, 1},
pads_begin_3d, pads_begin_3d,
pads_end_2d, pads_end_2d,
dilations_2d, dilations_2d,
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
// Pads have incompatible number of spatial dimensions // Pads have incompatible number of spatial dimensions
FAIL() << "Incompatible pads number of spatial dimensions not detected."; FAIL() << "Incompatible pads number of spatial dimensions not detected.";
} catch (const NodeValidationFailure& error) { } catch (const ov::NodeValidationFailure& error) {
EXPECT_HAS_SUBSTRING(error.what(), EXPECT_HAS_SUBSTRING(error.what(),
std::string("Pads begin and end should be defined for all and only spatial dimensions.")); std::string("Pads begin and end should be defined for all and only spatial dimensions."));
} catch (...) { } catch (...) {
@ -378,60 +383,60 @@ TEST(type_prop, bin_convolution_invalid_spatial_dims_parameters) {
} }
} }
class TypePropBinaryConvolutionV1Test : public TypePropOpTest<op::v1::BinaryConvolution> { class TypePropBinaryConvolutionV1Test : public TypePropOpTest<ov::op::v1::BinaryConvolution> {
protected: protected:
CoordinateDiff empty_pad{}; ov::CoordinateDiff empty_pad{};
}; };
TEST_F(TypePropBinaryConvolutionV1Test, default_ctor) { TEST_F(TypePropBinaryConvolutionV1Test, default_ctor) {
const auto data = make_shared<op::Parameter>(element::f32, PartialShape{1, 3, 5, 5}); const auto data = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 3, 5, 5});
const auto filters = make_shared<op::Parameter>(element::f32, PartialShape{2, 3, 4, 4}); const auto filters = make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{2, 3, 4, 4});
const auto op = make_op(); const auto op = make_op();
op->set_arguments(OutputVector{data, filters}); op->set_arguments(ov::OutputVector{data, filters});
op->set_strides({1, 3}); op->set_strides({1, 3});
op->set_dilations({1, 2}); op->set_dilations({1, 2});
op->set_pads_begin({2, 2}); op->set_pads_begin({2, 2});
op->set_pads_end({2, 2}); op->set_pads_end({2, 2});
op->set_auto_pad(op::PadType::EXPLICIT); op->set_auto_pad(ov::op::PadType::EXPLICIT);
op->set_mode(op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT); op->set_mode(ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT);
op->set_pad_value(1.0f); op->set_pad_value(1.0f);
op->validate_and_infer_types(); op->validate_and_infer_types();
EXPECT_EQ(op->get_input_size(), 2); EXPECT_EQ(op->get_input_size(), 2);
EXPECT_EQ(op->get_output_size(), 1); EXPECT_EQ(op->get_output_size(), 1);
EXPECT_EQ(op->get_strides(), Strides({1, 3})); EXPECT_EQ(op->get_strides(), ov::Strides({1, 3}));
EXPECT_EQ(op->get_dilations(), Strides({1, 2})); EXPECT_EQ(op->get_dilations(), ov::Strides({1, 2}));
EXPECT_EQ(op->get_pads_begin(), CoordinateDiff({2, 2})); EXPECT_EQ(op->get_pads_begin(), ov::CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_pads_end(), CoordinateDiff({2, 2})); EXPECT_EQ(op->get_pads_end(), ov::CoordinateDiff({2, 2}));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({1, 2, 6, 1})); EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape({1, 2, 6, 1}));
} }
TEST_F(TypePropBinaryConvolutionV1Test, interval_shapes) { TEST_F(TypePropBinaryConvolutionV1Test, interval_shapes) {
PartialShape data_batch_pshape{{1, 3}, 1, {1, 5}, {3, 10}}; ov::PartialShape data_batch_pshape{{1, 3}, 1, {1, 5}, {3, 10}};
PartialShape filters_pshape{2, {1, 3}, 3, 3}; ov::PartialShape filters_pshape{2, {1, 3}, 3, 3};
set_shape_labels(data_batch_pshape, 10); set_shape_labels(data_batch_pshape, 10);
set_shape_labels(filters_pshape, 20); set_shape_labels(filters_pshape, 20);
constexpr auto et = element::f32; constexpr auto et = ov::element::f32;
constexpr auto auto_pad = op::PadType::EXPLICIT; constexpr auto auto_pad = ov::op::PadType::EXPLICIT;
constexpr auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT; constexpr auto mode = ov::op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
constexpr auto pad_value = 1.0f; constexpr auto pad_value = 1.0f;
const auto data_batch = make_shared<op::Parameter>(et, data_batch_pshape); const auto data_batch = make_shared<ov::op::v0::Parameter>(et, data_batch_pshape);
const auto filters = make_shared<op::Parameter>(et, filters_pshape); const auto filters = make_shared<ov::op::v0::Parameter>(et, filters_pshape);
const auto op = make_op(data_batch, const auto op = make_op(data_batch,
filters, filters,
Strides{}, ov::Strides{},
CoordinateDiff{}, ov::CoordinateDiff{},
CoordinateDiff{}, ov::CoordinateDiff{},
Strides{}, ov::Strides{},
mode, mode,
pad_value, pad_value,
auto_pad); auto_pad);
EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label)); EXPECT_THAT(get_shape_labels(op->get_output_partial_shape(0)), ElementsAre(10, 20, ov::no_label, ov::no_label));
EXPECT_EQ(op->get_output_partial_shape(0), PartialShape({{1, 3}, 2, {1, 3}, {1, 8}})); EXPECT_EQ(op->get_output_partial_shape(0), ov::PartialShape({{1, 3}, 2, {1, 3}, {1, 8}}));
EXPECT_EQ(op->get_pads_begin(), (CoordinateDiff{0, 0})); EXPECT_EQ(op->get_pads_begin(), (ov::CoordinateDiff{0, 0}));
EXPECT_EQ(op->get_pads_end(), (CoordinateDiff{0, 0})); EXPECT_EQ(op->get_pads_end(), (ov::CoordinateDiff{0, 0}));
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2,10 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include <gtest/gtest.h>
#include "common_test_utils/test_assertions.hpp" #include "common_test_utils/test_assertions.hpp"
#include "common_test_utils/type_prop.hpp" #include "common_test_utils/type_prop.hpp"
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "openvino/opsets/opset11.hpp" #include "openvino/opsets/opset11.hpp"
using namespace std; using namespace std;

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/divide.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Divide>; using Type = ::testing::Types<ov::op::v1::Divide>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_divide, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_divide, ArithmeticOperator, Type);

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/floor_mod.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::FloorMod>; using Type = ::testing::Types<ov::op::v1::FloorMod>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_floormod, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_floormod, ArithmeticOperator, Type);

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/maximum.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Maximum>; using Type = ::testing::Types<ov::op::v1::Maximum>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_maximum, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_maximum, ArithmeticOperator, Type);

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/minimum.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Minimum>; using Type = ::testing::Types<ov::op::v1::Minimum>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_minimum, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_minimum, ArithmeticOperator, Type);

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/mod.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Mod>; using Type = ::testing::Types<ov::op::v1::Mod>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_mod, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_mod, ArithmeticOperator, Type);

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/multiply.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Multiply>; using Type = ::testing::Types<ov::op::v1::Multiply>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_multiply, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_multiply, ArithmeticOperator, Type);

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/power.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Power>; using Type = ::testing::Types<ov::op::v1::Power>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_power, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_power, ArithmeticOperator, Type);

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/squared_difference.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::SquaredDifference>; using Type = ::testing::Types<ov::op::v0::SquaredDifference>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_squared_difference, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_squared_difference, ArithmeticOperator, Type);

View File

@ -2,8 +2,10 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "openvino/op/subtract.hpp"
#include "arithmetic_ops.hpp" #include "arithmetic_ops.hpp"
using Type = ::testing::Types<ngraph::op::v1::Subtract>; using Type = ::testing::Types<ov::op::v1::Subtract>;
INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_subtract, ArithmeticOperator, Type); INSTANTIATE_TYPED_TEST_SUITE_P(type_prop_subtract, ArithmeticOperator, Type);