From 2d2977ff4a3855b6f71e6776ceb059032fe922ad Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Thu, 31 Aug 2023 16:39:46 +0400 Subject: [PATCH] Moved inference unit tests to new API (#19452) * Moved inference unit tests to new API * Added infer request and variable state * Try to fix LTO * Try to avoid warning from gmock * Try to fix azure build * Try to fix Windows build * Comment all variable_state_test file for future investigation --- ..._test.cpp => compilation_context_test.cpp} | 88 +++--- .../tests/unit/compiled_model_test.cpp | 187 ++++++++++++ .../tests/unit/infer_request_test.cpp | 150 ++++++++++ src/inference/tests/unit/iplugin_test.cpp | 116 ++++++++ .../tests/unit/memory_solver_test.cpp | 5 +- src/inference/tests/unit/query_model_test.cpp | 265 +++++++++--------- .../tests/unit/variable_state_test.cpp | 193 +++++++++++++ .../common_test_utils/test_assertions.hpp | 109 +++---- .../runtime/mock_iasync_infer_request.hpp | 40 +++ .../openvino/runtime/mock_icompiled_model.hpp | 39 +++ .../mocks/openvino/runtime/mock_icore.hpp | 63 +++++ .../mocks/openvino/runtime/mock_iplugin.hpp | 50 ++++ .../runtime/mock_isync_infer_request.hpp | 25 ++ .../openvino/runtime/mock_ivariable_state.hpp | 25 ++ 14 files changed, 1124 insertions(+), 231 deletions(-) rename src/inference/tests/unit/{ie_compilation_context_test.cpp => compilation_context_test.cpp} (83%) create mode 100644 src/inference/tests/unit/compiled_model_test.cpp create mode 100644 src/inference/tests/unit/infer_request_test.cpp create mode 100644 src/inference/tests/unit/iplugin_test.cpp create mode 100644 src/inference/tests/unit/variable_state_test.cpp create mode 100644 src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp create mode 100644 src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp create mode 100644 src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp create mode 100644 src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp create mode 100644 src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp create mode 100644 src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp diff --git a/src/inference/tests/unit/ie_compilation_context_test.cpp b/src/inference/tests/unit/compilation_context_test.cpp similarity index 83% rename from src/inference/tests/unit/ie_compilation_context_test.cpp rename to src/inference/tests/unit/compilation_context_test.cpp index 3d0085d0203..1f3d5266729 100644 --- a/src/inference/tests/unit/ie_compilation_context_test.cpp +++ b/src/inference/tests/unit/compilation_context_test.cpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "compilation_context.hpp" + #include #include @@ -11,17 +13,15 @@ #include "common_test_utils/common_utils.hpp" #include "common_test_utils/test_constants.hpp" -#include "compilation_context.hpp" #include "cpp/ie_cnn_network.h" -#include "ngraph/function.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/opsets/opset6.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/parameter.hpp" #include "transformations/rt_info/fused_names_attribute.hpp" #include "transformations/rt_info/primitives_priority_attribute.hpp" -using namespace InferenceEngine; using namespace ov; -using namespace ngraph; using namespace ::testing; using namespace std::chrono; @@ -107,45 +107,45 @@ TEST_F(NetworkContext_CalcFileInfoTests, SizeModified) { //////////////////////////////////////////////////// -static std::shared_ptr create_simple_function() { - // This example is taken from docs, shows how to create ngraph::Function +static std::shared_ptr create_simple_model() { + // This example is taken from docs, shows how to create ov::Model // // Parameter--->Multiply--->Add--->Result // Constant---' / // Constant---' // Create opset6::Parameter operation with static shape - auto data = std::make_shared(ngraph::element::i8, ngraph::Shape{3, 1, 2}); + auto data = std::make_shared(ov::element::i8, ov::Shape{3, 1, 2}); data->set_friendly_name("Parameter"); data->get_output_tensor(0).set_names({"parameter"}); - auto mul_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {3}); + auto mul_constant = ov::op::v0::Constant::create(ov::element::i8, ov::Shape{1}, {3}); mul_constant->set_friendly_name("mul_constant"); mul_constant->get_output_tensor(0).set_names({"mul_constant"}); - auto mul = std::make_shared(data, mul_constant); + auto mul = std::make_shared(data, mul_constant); mul->set_friendly_name("mul"); mul->get_output_tensor(0).set_names({"mul"}); - auto add_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {2}); + auto add_constant = ov::op::v0::Constant::create(ov::element::i8, ov::Shape{1}, {2}); add_constant->set_friendly_name("add_constant"); add_constant->get_output_tensor(0).set_names({"add_constant"}); - auto add = std::make_shared(mul, add_constant); + auto add = std::make_shared(mul, add_constant); add->set_friendly_name("add"); add->get_output_tensor(0).set_names({"add"}); // Create opset3::Result operation - auto res = std::make_shared(add); + auto res = std::make_shared(add); res->set_friendly_name("res"); - // Create nGraph function - auto func = std::make_shared(ngraph::ResultVector{res}, ngraph::ParameterVector{data}); - return func; + // Create ov function + auto model = std::make_shared(ov::ResultVector{res}, ov::ParameterVector{data}); + return model; } static void checkCustomRt(const std::function& emptyCb, const std::function& nameCb) { - auto model1 = create_simple_function(); - auto model2 = create_simple_function(); + auto model1 = create_simple_model(); + auto model2 = create_simple_model(); auto& op1 = model1->get_ops().front()->get_rt_info(); auto& op2 = model2->get_ops().front()->get_rt_info(); @@ -166,22 +166,22 @@ static void checkCustomRt(const std::function& emptyCb, } TEST(NetworkContext, HashOfSame) { - auto model1 = create_simple_function(); - auto model2 = create_simple_function(); + auto model1 = create_simple_model(); + auto model2 = create_simple_model(); ASSERT_EQ(ModelCache::compute_hash(model1, {}), ModelCache::compute_hash(model2, {})); } TEST(NetworkContext, HashWithConfig) { - auto net1 = create_simple_function(); - auto net2 = create_simple_function(); + auto net1 = create_simple_model(); + auto net2 = create_simple_model(); ASSERT_NE(ModelCache::compute_hash(net1, {{"key", "value"}}), ModelCache::compute_hash(net2, {})); ASSERT_EQ(ModelCache::compute_hash(net1, {{"key", "value"}}), ModelCache::compute_hash(net2, {{"key", "value"}})); } TEST(NetworkContext, HashWithPrimitivesPriority) { - auto net1 = create_simple_function(); - auto net2 = create_simple_function(); - auto net3 = create_simple_function(); + auto net1 = create_simple_model(); + auto net2 = create_simple_model(); + auto net3 = create_simple_model(); auto& op2 = net2->get_ops().front()->get_rt_info(); op2[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("testPriority"); @@ -214,9 +214,9 @@ TEST(NetworkContext, HashWithPrimitivesPriorityType) { } TEST(NetworkContext, HashWithAffinity) { - auto net1 = create_simple_function(); - auto net2 = create_simple_function(); - auto net3 = create_simple_function(); + auto net1 = create_simple_model(); + auto net2 = create_simple_model(); + auto net3 = create_simple_model(); auto& op2 = net2->get_ops().front()->get_rt_info(); op2["affinity"] = "testAffinity"; @@ -229,9 +229,9 @@ TEST(NetworkContext, HashWithAffinity) { } TEST(NetworkContext, HashWithFutureRt_string) { - auto net1 = create_simple_function(); - auto net2 = create_simple_function(); - auto net3 = create_simple_function(); + auto net1 = create_simple_model(); + auto net2 = create_simple_model(); + auto net3 = create_simple_model(); auto& op1 = net1->get_ops().front()->get_rt_info(); op1["someFutureKey"] = "hello"; @@ -248,9 +248,9 @@ TEST(NetworkContext, HashWithFutureRt_string) { } TEST(NetworkContext, HashWithFutureRt_int64) { - auto net1 = create_simple_function(); - auto net2 = create_simple_function(); - auto net3 = create_simple_function(); + auto net1 = create_simple_model(); + auto net2 = create_simple_model(); + auto net3 = create_simple_model(); auto& op1 = net1->get_ops().front()->get_rt_info(); op1["someFutureKey"] = int64_t(42); @@ -267,9 +267,9 @@ TEST(NetworkContext, HashWithFutureRt_int64) { } TEST(NetworkContext, HashWithTensorNames) { - auto fun1 = create_simple_function(); - auto fun2 = create_simple_function(); - auto fun3 = create_simple_function(); + auto fun1 = create_simple_model(); + auto fun2 = create_simple_model(); + auto fun3 = create_simple_model(); std::unordered_set names1, names2; std::vector testNames; testNames.reserve(100); @@ -292,19 +292,19 @@ TEST(NetworkContext, HashWithTensorNames) { } TEST(NetworkContext, HashWithDifferentResults) { - auto net1 = create_simple_function(); - auto net2 = create_simple_function(); + auto net1 = create_simple_model(); + auto net2 = create_simple_model(); net2->remove_result(net2->get_results().front()); - auto net3 = create_simple_function(); + auto net3 = create_simple_model(); net3->remove_result(net3->get_results().front()); ASSERT_NE(ModelCache::compute_hash(net1, {}), ModelCache::compute_hash(net2, {})); ASSERT_EQ(ModelCache::compute_hash(net2, {}), ModelCache::compute_hash(net3, {})); } -// Verify all internal hash calculations are thread-safe (like ngraph::function serialization) +// Verify all internal hash calculations are thread-safe (like ov::Model serialization) TEST(NetworkContext, HashOfSameMultiThreading) { - auto net1 = create_simple_function(); - auto net2 = create_simple_function(); + auto net1 = create_simple_model(); + auto net2 = create_simple_model(); std::atomic_bool fail{false}; const auto TEST_DURATION_MS = 1000; auto start = high_resolution_clock::now(); diff --git a/src/inference/tests/unit/compiled_model_test.cpp b/src/inference/tests/unit/compiled_model_test.cpp new file mode 100644 index 00000000000..a8742de5727 --- /dev/null +++ b/src/inference/tests/unit/compiled_model_test.cpp @@ -0,0 +1,187 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/runtime/compiled_model.hpp" + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "common_test_utils/test_assertions.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/runtime/iinfer_request.hpp" +#include "openvino/runtime/ivariable_state.hpp" +#include "openvino/runtime/variable_state.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp" + +using namespace ::testing; + +namespace { + +struct CompiledModel_Impl { + typedef std::shared_ptr ov::CompiledModel::*type; + friend type get(CompiledModel_Impl); +}; + +template +struct Rob { + friend typename Tag::type get(Tag) { + return M; + } +}; + +template struct Rob; + +} // namespace + +class CompiledModelTests : public ::testing::Test { +private: + std::shared_ptr create_model() { + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1, 3, 2, 2}); + param->set_friendly_name("Param"); + param->output(0).set_names({"param"}); + + auto relu = std::make_shared(param); + relu->set_friendly_name("ReLU"); + relu->output(0).set_names({"relu"}); + + return std::make_shared(ov::OutputVector{relu->output(0)}, ov::ParameterVector{param}); + } + +protected: + std::shared_ptr mock_compiled_model; + ov::CompiledModel compiled_model; + std::shared_ptr plugin; + std::shared_ptr model; + + void TearDown() override { + mock_compiled_model.reset(); + compiled_model = {}; + plugin = {}; + } + + void SetUp() override { + model = create_model(); + auto mock_plugin = std::make_shared(); + plugin = mock_plugin; + mock_compiled_model = std::make_shared(model, plugin); + compiled_model.*get(CompiledModel_Impl()) = mock_compiled_model; + } +}; + +TEST_F(CompiledModelTests, GetOutputsThrowsIfReturnErr) { + EXPECT_CALL(*mock_compiled_model.get(), outputs()).Times(1).WillOnce(Throw(std::runtime_error{""})); + + ASSERT_THROW(compiled_model.outputs(), std::runtime_error); +} + +TEST_F(CompiledModelTests, GetOutputs) { + std::vector> data; + EXPECT_CALL(*mock_compiled_model.get(), outputs()).Times(1).WillOnce(ReturnRefOfCopy(model->outputs())); + ASSERT_NO_THROW(data = compiled_model.outputs()); + ASSERT_EQ(data, model->outputs()); +} + +TEST_F(CompiledModelTests, GetInputsThrowsIfReturnErr) { + EXPECT_CALL(*mock_compiled_model.get(), inputs()).Times(1).WillOnce(Throw(std::runtime_error{""})); + + ASSERT_THROW(compiled_model.inputs(), std::runtime_error); +} + +TEST_F(CompiledModelTests, GetInputs) { + EXPECT_CALL(*mock_compiled_model.get(), inputs()).Times(1).WillOnce(ReturnRefOfCopy(model->inputs())); + + std::vector> info; + ASSERT_NO_THROW(info = compiled_model.inputs()); + ASSERT_EQ(info, model->inputs()); +} + +class CompiledModelWithIInferReqTests : public CompiledModelTests { +protected: + std::shared_ptr mock_infer_request; + + void SetUp() override { + CompiledModelTests::SetUp(); + mock_infer_request = std::make_shared(); + } +}; + +TEST_F(CompiledModelWithIInferReqTests, CanCreateInferRequest) { + EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request)); + ov::InferRequest actualInferReq; + ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); +} + +TEST_F(CompiledModelWithIInferReqTests, CreateInferRequestThrowsIfReturnNotOK) { + EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Throw(std::runtime_error{""})); + ASSERT_THROW(compiled_model.create_infer_request(), std::runtime_error); +} + +TEST_F(CompiledModelWithIInferReqTests, QueryStateThrowsIfReturnErr) { + EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request)); + ov::InferRequest actualInferReq; + ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); + EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillOnce(Throw(std::runtime_error{""})); + EXPECT_THROW(actualInferReq.query_state(), std::runtime_error); +} + +TEST_F(CompiledModelWithIInferReqTests, QueryState) { + EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request)); + ov::InferRequest actualInferReq; + ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request()); + ov::SoPtr state = std::make_shared(); + EXPECT_CALL(*mock_infer_request.get(), query_state()) + .Times(1) + .WillOnce(Return(std::vector>(1, state))); + std::vector MemState_v; + MemState_v = actualInferReq.query_state(); + EXPECT_EQ(MemState_v.size(), 1); +} + +class CompiledModelBaseTests : public ::testing::Test { +protected: + std::shared_ptr mock_compiled_model; + ov::CompiledModel compiled_model; + std::shared_ptr plugin; + + void SetUp() override { + auto mock_plugin = std::make_shared(); + plugin = mock_plugin; + mock_compiled_model = std::make_shared(nullptr, plugin); + compiled_model.*get(CompiledModel_Impl()) = mock_compiled_model; + } +}; + +// CreateInferRequest +TEST_F(CompiledModelBaseTests, canForwardCreateInferRequest) { + auto inferReqInternal = std::make_shared(); + EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).Times(1).WillRepeatedly(Return(inferReqInternal)); + ASSERT_NO_THROW(compiled_model.create_infer_request()); +} + +TEST_F(CompiledModelBaseTests, canReportErrorInCreateInferRequest) { + EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(compiled_model.create_infer_request(), std::runtime_error, "compare"); +} + +// Export +TEST_F(CompiledModelBaseTests, canForwardExport) { + std::stringstream out_model; + EXPECT_CALL(*mock_compiled_model.get(), export_model(_)).Times(1); + EXPECT_NO_THROW(compiled_model.export_model(out_model)); +} + +TEST_F(CompiledModelBaseTests, canReportErrorInExport) { + std::stringstream out_model; + EXPECT_CALL(*mock_compiled_model.get(), export_model(_)).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(compiled_model.export_model(out_model), std::runtime_error, "compare"); +} diff --git a/src/inference/tests/unit/infer_request_test.cpp b/src/inference/tests/unit/infer_request_test.cpp new file mode 100644 index 00000000000..dcc59d2952a --- /dev/null +++ b/src/inference/tests/unit/infer_request_test.cpp @@ -0,0 +1,150 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/runtime/infer_request.hpp" + +#include +#include + +#include + +#include "common_test_utils/test_assertions.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/runtime/make_tensor.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp" + +using namespace ::testing; +using namespace std; +namespace { + +struct InferRequest_Impl { + typedef std::shared_ptr ov::InferRequest::*type; + friend type get(InferRequest_Impl); +}; + +template +struct Rob { + friend typename Tag::type get(Tag) { + return M; + } +}; + +template struct Rob; + +} // namespace + +class OVInferRequestBaseTests : public ::testing::Test { +protected: + std::shared_ptr mock_impl; + ov::InferRequest request; + + void SetUp() override { + mock_impl.reset(new ov::MockIAsyncInferRequest()); + request.*get(InferRequest_Impl()) = mock_impl; + } +}; + +// start_async +TEST_F(OVInferRequestBaseTests, canForwardStartAsync) { + EXPECT_CALL(*mock_impl.get(), start_async()).Times(1); + ASSERT_NO_THROW(request.start_async()); +} + +TEST_F(OVInferRequestBaseTests, canReportErrorInStartAsync) { + EXPECT_CALL(*mock_impl.get(), start_async()).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(request.start_async(), std::runtime_error, "compare"); +} + +// wait +TEST_F(OVInferRequestBaseTests, canForwardWait) { + EXPECT_CALL(*mock_impl.get(), wait()).WillOnce(Return()); + ASSERT_NO_THROW(request.wait()); +} + +TEST_F(OVInferRequestBaseTests, canReportErrorInWait) { + EXPECT_CALL(*mock_impl.get(), wait()).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(request.wait(), std::runtime_error, "compare"); +} + +// Infer +TEST_F(OVInferRequestBaseTests, canForwardInfer) { + EXPECT_CALL(*mock_impl.get(), infer()).Times(1); + ASSERT_NO_THROW(request.infer()); +} + +TEST_F(OVInferRequestBaseTests, canReportErrorInInfer) { + EXPECT_CALL(*mock_impl.get(), infer()).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(request.infer(), std::runtime_error, "compare"); +} + +// get_profiling_info +TEST_F(OVInferRequestBaseTests, canForwardGetPerformanceCounts) { + std::vector info; + EXPECT_CALL(*mock_impl.get(), get_profiling_info()).WillOnce(Return(std::vector{})); + ASSERT_NO_THROW(request.get_profiling_info()); +} + +TEST_F(OVInferRequestBaseTests, canReportErrorInGetPerformanceCounts) { + std::vector info; + EXPECT_CALL(*mock_impl.get(), get_profiling_info()).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(request.get_profiling_info(), std::runtime_error, "compare"); +} + +// get_tensor +TEST_F(OVInferRequestBaseTests, canForwardGetTensor) { + ov::Tensor data; + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1}); + param->output(0).set_names({"test_name"}); + std::vector> inputs{param->output(0)}; + EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs)); + EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs)); + EXPECT_CALL(*mock_impl.get(), get_tensors(_)).WillOnce(Return(std::vector>{})); + EXPECT_CALL(*mock_impl.get(), get_tensor(_)).WillOnce(Return(ov::make_tensor(ov::element::f32, {1, 2, 3, 3}))); + ASSERT_NO_THROW(request.get_tensor("test_name")); +} + +TEST_F(OVInferRequestBaseTests, canReportErrorInGetTensor) { + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1}); + param->output(0).set_names({"test_name"}); + std::vector> inputs{param->output(0)}; + EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs)); + EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs)); + EXPECT_CALL(*mock_impl.get(), get_tensors(_)).WillOnce(Return(std::vector>{})); + EXPECT_CALL(*mock_impl.get(), get_tensor(_)).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(request.get_tensor("test_name"), std::runtime_error, "compare"); +} + +// set_tensor +TEST_F(OVInferRequestBaseTests, canForwardSetTensor) { + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1}); + param->output(0).set_names({"test_name"}); + std::vector> inputs{param->output(0)}; + ov::Tensor data; + EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs)); + EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs)); + EXPECT_CALL(*mock_impl.get(), set_tensor(_, _)).Times(1); + ASSERT_NO_THROW(request.set_tensor("test_name", data)); +} + +TEST_F(OVInferRequestBaseTests, canReportErrorInSetTensor) { + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1}); + param->output(0).set_names({"test_name"}); + std::vector> inputs{param->output(0)}; + ov::Tensor data; + EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs)); + EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs)); + EXPECT_CALL(*mock_impl.get(), set_tensor(_, _)).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(request.set_tensor("test_name", data), std::runtime_error, "compare"); +} + +// set_callback +TEST_F(OVInferRequestBaseTests, canForwardSetCompletionCallback) { + EXPECT_CALL(*mock_impl.get(), set_callback(_)).Times(1); + ASSERT_NO_THROW(request.set_callback(nullptr)); +} + +TEST_F(OVInferRequestBaseTests, canReportErrorInSetCompletionCallback) { + EXPECT_CALL(*mock_impl.get(), set_callback(_)).WillOnce(Throw(std::runtime_error("compare"))); + OV_EXPECT_THROW_HAS_SUBSTRING(request.set_callback(nullptr), std::runtime_error, "compare"); +} diff --git a/src/inference/tests/unit/iplugin_test.cpp b/src/inference/tests/unit/iplugin_test.cpp new file mode 100644 index 00000000000..f8debebdcc2 --- /dev/null +++ b/src/inference/tests/unit/iplugin_test.cpp @@ -0,0 +1,116 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/runtime/iplugin.hpp" + +#include +#include + +#include "openvino/core/node_output.hpp" +#include "openvino/core/node_vector.hpp" +#include "openvino/core/partial_shape.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/result.hpp" +#include "openvino/runtime/compiled_model.hpp" +#include "openvino/runtime/iinfer_request.hpp" +#include "openvino/runtime/make_tensor.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp" +#include "unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp" + +using namespace ::testing; +using namespace std; + +class IPluginTest : public ::testing::Test { +private: + std::shared_ptr create_model() { + auto param = std::make_shared(ov::element::f32, ov::PartialShape{1, 3, 2, 2}); + param->set_friendly_name("Param"); + param->output(0).set_names({"param"}); + + auto relu = std::make_shared(param); + relu->set_friendly_name("ReLU"); + relu->output(0).set_names({"relu"}); + + return std::make_shared(ov::OutputVector{relu->output(0)}, ov::ParameterVector{param}); + } + +protected: + shared_ptr plugin; + shared_ptr mock_plugin_impl; + shared_ptr mock_compiled_model; + shared_ptr mock_infer_request; + std::shared_ptr model = create_model(); + std::string pluginId; + + void TearDown() override { + EXPECT_TRUE(Mock::VerifyAndClearExpectations(mock_plugin_impl.get())); + EXPECT_TRUE(Mock::VerifyAndClearExpectations(mock_compiled_model.get())); + EXPECT_TRUE(Mock::VerifyAndClearExpectations(mock_infer_request.get())); + } + + void SetUp() override { + pluginId = "TEST"; + mock_plugin_impl.reset(new ov::MockIPlugin()); + mock_plugin_impl->set_device_name(pluginId); + plugin = std::static_pointer_cast(mock_plugin_impl); + mock_compiled_model = make_shared(model, plugin); + ON_CALL(*mock_compiled_model.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs())); + ON_CALL(*mock_compiled_model.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs())); + mock_infer_request = make_shared(mock_compiled_model); + } + + void getInferRequestWithMockImplInside(std::shared_ptr& request) { + std::shared_ptr compiled_model; + EXPECT_CALL(*mock_plugin_impl.get(), compile_model(A&>(), _)) + .WillOnce(Return(mock_compiled_model)); + EXPECT_CALL(*mock_compiled_model.get(), create_sync_infer_request()).WillOnce(Return(mock_infer_request)); + ON_CALL(*mock_compiled_model.get(), create_infer_request()).WillByDefault([&]() { + return mock_compiled_model->create_infer_request_default(); + }); + compiled_model = plugin->compile_model(model, {}); + ASSERT_NE(nullptr, compiled_model); + request = compiled_model->create_infer_request(); + ASSERT_NE(nullptr, request); + } +}; + +MATCHER_P(blob_in_map_pointer_is_same, ref_blob, "") { + return reinterpret_cast(arg.begin()->second->buffer()) == reinterpret_cast(ref_blob->buffer()); +} + +TEST_F(IPluginTest, failToSetTensorWithIncorrectPort) { + auto incorrect_param = std::make_shared(ov::element::f32, ov::PartialShape{1, 2}); + ov::SoPtr tensor = ov::make_tensor(ov::element::f32, {1, 1, 1, 1}); + std::string refError = "Cannot find tensor for port"; + std::shared_ptr infer_request; + getInferRequestWithMockImplInside(infer_request); + try { + infer_request->set_tensor(incorrect_param->output(0), tensor); + } catch (ov::Exception& ex) { + ASSERT_TRUE(std::string{ex.what()}.find(refError) != std::string::npos) + << "\tExpected: " << refError << "\n\tActual: " << ex.what(); + } +} + +TEST_F(IPluginTest, failToSetEmptyITensor) { + ov::SoPtr tensor; + std::string refError = "Failed to set tensor. "; + std::shared_ptr infer_request; + getInferRequestWithMockImplInside(infer_request); + try { + infer_request->set_tensor(model->input(0), tensor); + } catch (ov::Exception& ex) { + ASSERT_TRUE(std::string{ex.what()}.find(refError) != std::string::npos) + << "\tExpected: " << refError << "\n\tActual: " << ex.what(); + } +} + +TEST_F(IPluginTest, SetTensorWithCorrectPort) { + ov::SoPtr tensor = ov::make_tensor(ov::element::f32, {1, 3, 2, 2}); + std::shared_ptr infer_request; + getInferRequestWithMockImplInside(infer_request); + EXPECT_NO_THROW(infer_request->set_tensor(model->input(0), tensor)); +} diff --git a/src/inference/tests/unit/memory_solver_test.cpp b/src/inference/tests/unit/memory_solver_test.cpp index 665d4f6fd70..b2c95e8ee46 100644 --- a/src/inference/tests/unit/memory_solver_test.cpp +++ b/src/inference/tests/unit/memory_solver_test.cpp @@ -5,14 +5,11 @@ #include "memory_solver.hpp" #include -#include #include using Box = MemorySolver::Box; -IE_SUPPRESS_DEPRECATED_START - TEST(MemSolverTest, CanConstruct) { { // Empty vector MemorySolver ms(std::vector{}); @@ -80,7 +77,7 @@ TEST(MemSolverTest, GetOffsetThrowException) { MemorySolver ms(boxes); ms.solve(); - EXPECT_THROW(ms.getOffset(100), InferenceEngine::Exception); + EXPECT_THROW(ms.getOffset(100), std::runtime_error); } // | diff --git a/src/inference/tests/unit/query_model_test.cpp b/src/inference/tests/unit/query_model_test.cpp index 065d23a3b8c..a2508185e9a 100644 --- a/src/inference/tests/unit/query_model_test.cpp +++ b/src/inference/tests/unit/query_model_test.cpp @@ -7,10 +7,17 @@ #include #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" -#include "ngraph/ops.hpp" -#include "ngraph/pass/constant_folding.hpp" -#include "openvino/opsets/opset9.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/log_softmax.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/reduce_l2.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/result.hpp" +#include "openvino/pass/constant_folding.hpp" #include "openvino/pass/manager.hpp" +#include "openvino/runtime/iplugin.hpp" #include "transformations/common_optimizations/common_optimizations.hpp" #include "transformations/common_optimizations/nop_elimination.hpp" #include "transformations/convert_precision.hpp" @@ -41,9 +48,9 @@ protected: public: void Run(std::function&)> transform, - std::function)> is_node_supported, + std::function)> is_node_supported, const std::unordered_set& expected) { - auto supported = InferenceEngine::GetSupportedNodes(m_function, transform, is_node_supported); + auto supported = ov::get_supported_nodes(m_function, transform, is_node_supported); auto const is_in_expected = [&expected](const std::string& x) { return expected.find(x) != expected.end(); }; @@ -59,27 +66,27 @@ public: TEST_F(GetSupportedNodesTest, UnsupportedCompressedConstantCF) { { - auto param = std::make_shared(ov::element::f32, m_shape); + auto param = std::make_shared(ov::element::f32, m_shape); param->set_friendly_name("input"); - auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1}); + auto constant_compressed = ov::op::v0::Constant::create(ov::element::f16, m_shape, {1}); constant_compressed->set_friendly_name("constant_compressed"); - auto convert = std::make_shared(constant_compressed, ov::element::f32); + auto convert = std::make_shared(constant_compressed, ov::element::f32); convert->set_friendly_name("constant"); ov::mark_as_decompression(convert); - auto add = std::make_shared(param, convert); + auto add = std::make_shared(param, convert); add->set_friendly_name("add"); - auto result = std::make_shared(add); + auto result = std::make_shared(add); result->set_friendly_name("result"); - m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); } Run( [&](std::shared_ptr& model) { ov::pass::Manager m; m.register_pass(); - m.register_pass(); + m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op); }, {}); @@ -87,34 +94,34 @@ TEST_F(GetSupportedNodesTest, UnsupportedCompressedConstantCF) { TEST_F(GetSupportedNodesTest, ConstantSubgraphCF) { { - auto constant_compressed1 = ngraph::op::Constant::create(ov::element::f16, m_shape, {1}); + auto constant_compressed1 = ov::op::v0::Constant::create(ov::element::f16, m_shape, {1}); constant_compressed1->set_friendly_name("constant_compressed1"); - auto convert1 = std::make_shared(constant_compressed1, ov::element::f32); + auto convert1 = std::make_shared(constant_compressed1, ov::element::f32); convert1->set_friendly_name("constant1"); ov::mark_as_decompression(convert1); - auto constant_compressed2 = ngraph::op::Constant::create(ov::element::f16, m_shape, {2}); + auto constant_compressed2 = ov::op::v0::Constant::create(ov::element::f16, m_shape, {2}); constant_compressed2->set_friendly_name("constant_compressed2"); - auto convert2 = std::make_shared(constant_compressed2, ov::element::f32); + auto convert2 = std::make_shared(constant_compressed2, ov::element::f32); convert2->set_friendly_name("constant2"); ov::mark_as_decompression(convert2); - auto add = std::make_shared(convert1, convert2); + auto add = std::make_shared(convert1, convert2); add->set_friendly_name("add"); - auto const_reshape = ov::opset9::Constant::create(ngraph::element::i64, ov::Shape{1}, {84}); + auto const_reshape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {84}); const_reshape->set_friendly_name("const_reshape"); - auto reshape = std::make_shared(add, const_reshape, false); + auto reshape = std::make_shared(add, const_reshape, false); reshape->set_friendly_name("reshape"); - auto result = std::make_shared(reshape); + auto result = std::make_shared(reshape); result->set_friendly_name("result"); - m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{}); + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{}); } Run( [&](std::shared_ptr& model) { ov::pass::Manager m; m.register_pass(); - m.register_pass(); + m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op); }, {"constant_compressed1", @@ -129,44 +136,44 @@ TEST_F(GetSupportedNodesTest, ConstantSubgraphCF) { TEST_F(GetSupportedNodesTest, SupportedCompressedConstantNop) { { - auto param = std::make_shared(ov::element::f32, m_shape); + auto param = std::make_shared(ov::element::f32, m_shape); param->set_friendly_name("input"); - auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1}); + auto constant_compressed = ov::op::v0::Constant::create(ov::element::f16, m_shape, {1}); constant_compressed->set_friendly_name("constant_compressed"); - auto convert = std::make_shared(constant_compressed, ov::element::f32); + auto convert = std::make_shared(constant_compressed, ov::element::f32); convert->set_friendly_name("constant"); - auto add = std::make_shared(param, convert); + auto add = std::make_shared(param, convert); add->set_friendly_name("add"); - auto result = std::make_shared(add); + auto result = std::make_shared(add); result->set_friendly_name("result"); - m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); } Run( [&](std::shared_ptr& model) { ov::pass::Manager m; m.register_pass(); - m.register_pass(precisions_map{{ngraph::element::f16, ngraph::element::f32}}); + m.register_pass(precisions_map{{ov::element::f16, ov::element::f32}}); m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || - (std::dynamic_pointer_cast(op) != nullptr); + (std::dynamic_pointer_cast(op) != nullptr); }, {"input", "constant_compressed", "constant", "add", "result"}); } TEST_F(GetSupportedNodesTest, SupportedConstantInsertAdditionalOp) { { - auto param = std::make_shared(ov::element::f32, m_shape); + auto param = std::make_shared(ov::element::f32, m_shape); param->set_friendly_name("input"); - auto mul_const = ngraph::op::Constant::create(ov::element::f32, m_shape, {1}); + auto mul_const = ov::op::v0::Constant::create(ov::element::f32, m_shape, {1}); mul_const->set_friendly_name("constant"); - auto mul = std::make_shared(param, mul_const); + auto mul = std::make_shared(param, mul_const); mul->set_friendly_name("output_operation"); - auto result = std::make_shared(mul); + auto result = std::make_shared(mul); result->set_friendly_name("result"); - m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); } Run( [&](std::shared_ptr& model) { @@ -174,12 +181,12 @@ TEST_F(GetSupportedNodesTest, SupportedConstantInsertAdditionalOp) { m.register_pass(); m.run_passes(model); for (auto& op : model->get_ops()) { - if (std::dynamic_pointer_cast(op) != nullptr) { + if (std::dynamic_pointer_cast(op) != nullptr) { // Add one more dummy operation auto consumers = op->output(0).get_target_inputs(); auto shape = op->get_shape(); - auto add_const = ngraph::op::Constant::create(ov::element::f32, m_shape, {0}); - auto add = std::make_shared(op, add_const); + auto add_const = ov::op::v0::Constant::create(ov::element::f32, m_shape, {0}); + auto add = std::make_shared(op, add_const); add->set_friendly_name(op->get_friendly_name()); op->set_friendly_name(op->get_friendly_name() + "/previous"); ov::copy_runtime_info(op, add); @@ -189,89 +196,89 @@ TEST_F(GetSupportedNodesTest, SupportedConstantInsertAdditionalOp) { } } }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || - (std::dynamic_pointer_cast(op) != nullptr) || - (std::dynamic_pointer_cast(op) != nullptr); + (std::dynamic_pointer_cast(op) != nullptr) || + (std::dynamic_pointer_cast(op) != nullptr); }, {"input", "constant", "output_operation", "result"}); } TEST_F(GetSupportedNodesTest, PartiallySupportedCompressedConstant) { { - auto param1 = std::make_shared(ov::element::f32, m_shape); + auto param1 = std::make_shared(ov::element::f32, m_shape); param1->set_friendly_name("input1"); - auto param2 = std::make_shared(ov::element::f32, m_shape); + auto param2 = std::make_shared(ov::element::f32, m_shape); param2->set_friendly_name("input2"); - auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1}); + auto constant_compressed = ov::op::v0::Constant::create(ov::element::f16, m_shape, {1}); constant_compressed->set_friendly_name("constant_compressed"); - auto convert = std::make_shared(constant_compressed, ov::element::f32); + auto convert = std::make_shared(constant_compressed, ov::element::f32); convert->set_friendly_name("constant"); ov::mark_as_decompression(convert); - auto add = std::make_shared(param1, convert); + auto add = std::make_shared(param1, convert); add->set_friendly_name("add"); - auto result1 = std::make_shared(add); + auto result1 = std::make_shared(add); result1->set_friendly_name("result1"); - auto mul = std::make_shared(param2, convert); + auto mul = std::make_shared(param2, convert); mul->set_friendly_name("mul"); - auto result2 = std::make_shared(mul); + auto result2 = std::make_shared(mul); result2->set_friendly_name("result2"); - m_function = std::make_shared(ngraph::ResultVector{result1, result2}, - ngraph::ParameterVector{param1, param2}); + m_function = + std::make_shared(ov::ResultVector{result1, result2}, ov::ParameterVector{param1, param2}); } Run( [&](std::shared_ptr& model) { ov::pass::Manager m; m.register_pass(); - m.register_pass(); + m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || - (std::dynamic_pointer_cast(op) != nullptr); + (std::dynamic_pointer_cast(op) != nullptr); }, {"input2", "constant_compressed", "constant", "mul", "result2"}); } TEST_F(GetSupportedNodesTest, ConstantSubgraphSupported) { { - auto param = std::make_shared(ov::element::f32, m_shape); + auto param = std::make_shared(ov::element::f32, m_shape); param->set_friendly_name("input"); - auto weights = ov::opset9::Constant::create(ov::element::Type_t::f32, {10, 84}, {1}); + auto weights = ov::op::v0::Constant::create(ov::element::Type_t::f32, {10, 84}, {1}); weights->set_friendly_name("weights"); - auto shapeOf = std::make_shared(weights); + auto shapeOf = std::make_shared(weights); shapeOf->set_friendly_name("shapeof"); - auto const1 = ov::opset9::Constant::create(ov::element::Type_t::i32, {1}, {1}); + auto const1 = ov::op::v0::Constant::create(ov::element::Type_t::i32, {1}, {1}); const1->set_friendly_name("const1"); - auto const2 = ov::opset9::Constant::create(ov::element::Type_t::i64, {}, {0}); + auto const2 = ov::op::v0::Constant::create(ov::element::Type_t::i64, {}, {0}); const2->set_friendly_name("const2"); - auto gather = std::make_shared(shapeOf, const1, const2); + auto gather = std::make_shared(shapeOf, const1, const2); gather->set_friendly_name("gather"); - auto const3 = ov::opset9::Constant::create(ov::element::Type_t::i64, {1}, {1}); + auto const3 = ov::op::v0::Constant::create(ov::element::Type_t::i64, {1}, {1}); const3->set_friendly_name("const3"); - auto concat = std::make_shared(ov::NodeVector{const3, gather}, 0); + auto concat = std::make_shared(ov::NodeVector{const3, gather}, 0); concat->set_friendly_name("concat"); - auto reshape = std::make_shared(param, concat, false); + auto reshape = std::make_shared(param, concat, false); reshape->set_friendly_name("reshape"); - auto matmul = std::make_shared(reshape, weights, false, true); + auto matmul = std::make_shared(reshape, weights, false, true); matmul->set_friendly_name("matmul"); - auto result = std::make_shared(matmul); + auto result = std::make_shared(matmul); result->set_friendly_name("result"); - m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); } Run( [&](std::shared_ptr& model) { ov::pass::Manager m; m.register_pass(); - m.register_pass(); + m.register_pass(); m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || - (std::dynamic_pointer_cast(op) != nullptr); + (std::dynamic_pointer_cast(op) != nullptr); }, {"input", "weights", @@ -288,76 +295,75 @@ TEST_F(GetSupportedNodesTest, ConstantSubgraphSupported) { TEST_F(GetSupportedNodesTest, UnmarkedSupportedInputsOutputs) { { - auto param = std::make_shared(ov::element::f32, m_shape); + auto param = std::make_shared(ov::element::f32, m_shape); param->set_friendly_name("input"); - auto constant = ngraph::op::Constant::create(ov::element::f32, ov::Shape{m_shape[1]}, {1}); + auto constant = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{m_shape[1]}, {1}); constant->set_friendly_name("constant"); - auto const_reshape = ov::opset9::Constant::create(ngraph::element::i64, ov::Shape{2}, m_shape); + auto const_reshape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, m_shape); const_reshape->set_friendly_name("const_reshape"); - auto reshape = std::make_shared(constant, const_reshape, false); + auto reshape = std::make_shared(constant, const_reshape, false); reshape->set_friendly_name("reshape"); - auto add = std::make_shared(param, reshape); + auto add = std::make_shared(param, reshape); add->set_friendly_name("add"); - auto result = std::make_shared(add); + auto result = std::make_shared(add); result->set_friendly_name("result"); - m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); } Run( [&](std::shared_ptr& model) { ov::pass::Manager m; m.register_pass(); - m.register_pass(); + m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { // Plugin don't mark input, constant and result as supported - return (std::dynamic_pointer_cast(op) != nullptr); + return (std::dynamic_pointer_cast(op) != nullptr); }, {"add"}); } TEST_F(GetSupportedNodesTest, WrongFusedNamesInOriginalModel) { { - auto param = std::make_shared(ov::element::f32, m_shape); + auto param = std::make_shared(ov::element::f32, m_shape); param->set_friendly_name("input"); - auto weights = ov::opset9::Constant::create(ov::element::Type_t::f32, {10, 84}, {1}); + auto weights = ov::op::v0::Constant::create(ov::element::Type_t::f32, {10, 84}, {1}); weights->set_friendly_name("weights"); - auto matmul = std::make_shared(param, weights, false, true); + auto matmul = std::make_shared(param, weights, false, true); matmul->get_rt_info()[ov::FusedNames::get_type_info_static()] = ov::FusedNames("add"); matmul->set_friendly_name("matmul"); - auto constant = ngraph::op::Constant::create(ov::element::f32, {1, 10}, {1}); + auto constant = ov::op::v0::Constant::create(ov::element::f32, {1, 10}, {1}); constant->set_friendly_name("constant"); - auto add = std::make_shared(matmul, constant); + auto add = std::make_shared(matmul, constant); add->get_rt_info()[ov::FusedNames::get_type_info_static()] = ov::FusedNames("matmul"); add->set_friendly_name("add"); - auto result = std::make_shared(add); + auto result = std::make_shared(add); result->set_friendly_name("result"); - m_function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param}); + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param}); } Run( [&](std::shared_ptr& model) { return; }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || - (std::dynamic_pointer_cast(op) != nullptr); + (std::dynamic_pointer_cast(op) != nullptr); }, {"input", "weights", "matmul"}); } TEST_F(GetSupportedNodesTest, FusedNamesSupportedUnsupportedBoth) { { - auto param = std::make_shared(ov::element::f32, m_shape); + auto param = std::make_shared(ov::element::f32, m_shape); param->set_friendly_name("input"); - auto dummy_param = std::make_shared(ov::element::f32, m_shape); + auto dummy_param = std::make_shared(ov::element::f32, m_shape); dummy_param->set_friendly_name("dummy_param"); - auto logsoftmax = std::make_shared(param, 1); + auto logsoftmax = std::make_shared(param, 1); logsoftmax->set_friendly_name("logsoftmax"); - auto result = std::make_shared(logsoftmax); + auto result = std::make_shared(logsoftmax); result->set_friendly_name("result"); - m_function = - std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param, dummy_param}); + m_function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param, dummy_param}); } Run( [&](std::shared_ptr& model) { @@ -366,47 +372,46 @@ TEST_F(GetSupportedNodesTest, FusedNamesSupportedUnsupportedBoth) { m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { // Exp is not supported and all constants are missing return ov::op::util::is_parameter(op) || ov::op::util::is_output(op) || - (std::dynamic_pointer_cast(op) != nullptr) || - (std::dynamic_pointer_cast(op) != nullptr) || - (std::dynamic_pointer_cast(op) != nullptr) || - (std::dynamic_pointer_cast(op) != nullptr); + (std::dynamic_pointer_cast(op) != nullptr) || + (std::dynamic_pointer_cast(op) != nullptr) || + (std::dynamic_pointer_cast(op) != nullptr) || + (std::dynamic_pointer_cast(op) != nullptr); }, {"dummy_param"}); // kepp dummy only since it has no unsupported consumers } TEST_F(GetSupportedNodesTest, ShapeOfNonConstantNode) { { - auto param = std::make_shared(ov::element::f32, m_shape); + auto param = std::make_shared(ov::element::f32, m_shape); param->set_friendly_name("input"); - auto slope_compressed = ov::opset9::Constant::create(ngraph::element::f16, ngraph::Shape{}, {-2.f}); + auto slope_compressed = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{}, {-2.f}); slope_compressed->set_friendly_name("slope_compressed"); - auto convert_slope = std::make_shared(slope_compressed, ov::element::f32); + auto convert_slope = std::make_shared(slope_compressed, ov::element::f32); convert_slope->set_friendly_name("slope"); ov::mark_as_decompression(convert_slope); - auto prelu = std::make_shared(param, convert_slope); + auto prelu = std::make_shared(param, convert_slope); prelu->set_friendly_name("prelu"); - auto shapeOf = std::make_shared(prelu); + auto shapeOf = std::make_shared(prelu); shapeOf->set_friendly_name("shapeof"); - auto convert_fp32 = std::make_shared(shapeOf, ov::element::f32); + auto convert_fp32 = std::make_shared(shapeOf, ov::element::f32); convert_fp32->set_friendly_name("convert_fp32"); - auto scale = ov::opset9::Constant::create(ngraph::element::f32, ngraph::Shape{}, {2.0f}); + auto scale = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {2.0f}); scale->set_friendly_name("scale"); - auto mul_scale = std::make_shared(convert_fp32, scale); + auto mul_scale = std::make_shared(convert_fp32, scale); mul_scale->set_friendly_name("mul_scale"); - auto convert_i64 = std::make_shared(mul_scale, ov::element::i64); + auto convert_i64 = std::make_shared(mul_scale, ov::element::i64); convert_i64->set_friendly_name("convert_i64"); - auto interpolate = std::make_shared(prelu, + auto interpolate = std::make_shared(prelu, convert_i64, scale, - ov::opset9::Interpolate::InterpolateAttrs()); + ov::op::v4::Interpolate::InterpolateAttrs()); interpolate->set_friendly_name("interpolate"); - auto interpolate_result = std::make_shared(interpolate); + auto interpolate_result = std::make_shared(interpolate); interpolate_result->set_friendly_name("interpolate_result"); - m_function = - std::make_shared(ngraph::ResultVector{interpolate_result}, ngraph::ParameterVector{param}); + m_function = std::make_shared(ov::ResultVector{interpolate_result}, ov::ParameterVector{param}); } Run( [&](std::shared_ptr& model) { @@ -415,9 +420,9 @@ TEST_F(GetSupportedNodesTest, ShapeOfNonConstantNode) { m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) || - (std::dynamic_pointer_cast(op) != nullptr); + (std::dynamic_pointer_cast(op) != nullptr); }, {"input", "slope_compressed", "slope", "prelu"}); // keep dummy only since it has no unsupported consumers } @@ -425,28 +430,28 @@ TEST_F(GetSupportedNodesTest, ShapeOfNonConstantNode) { TEST_F(GetSupportedNodesTest, ShuffleChannelFusion) { { ov::Shape input_shape = {1, 112, 56, 56}; - auto input = std::make_shared(ov::element::f32, input_shape); + auto input = std::make_shared(ov::element::f32, input_shape); input->set_friendly_name("input"); ov::Shape reshape_before_shape = {1, 4, 28, 56, 56}; - auto shape_reshape_before = ov::opset9::Constant::create(ov::element::i64, + auto shape_reshape_before = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{reshape_before_shape.size()}, reshape_before_shape); shape_reshape_before->set_friendly_name("shape_reshape_before"); - auto reshape_before = std::make_shared(input, shape_reshape_before, true); + auto reshape_before = std::make_shared(input, shape_reshape_before, true); reshape_before->set_friendly_name("reshape_before"); ov::Shape permute_order = {0, 2, 1, 3, 4}; auto permutation = - ov::opset9::Constant::create(ov::element::i64, ov::Shape{permute_order.size()}, permute_order); + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{permute_order.size()}, permute_order); permutation->set_friendly_name("permutation"); - auto permute = std::make_shared(reshape_before, permutation); + auto permute = std::make_shared(reshape_before, permutation); permute->set_friendly_name("permute"); auto shape_reshape_after = - ov::opset9::Constant::create(ov::element::i64, ov::Shape{input_shape.size()}, input_shape); + ov::op::v0::Constant::create(ov::element::i64, ov::Shape{input_shape.size()}, input_shape); shape_reshape_after->set_friendly_name("shape_reshape_after"); - auto reshape_after = std::make_shared(permute, shape_reshape_after, true); + auto reshape_after = std::make_shared(permute, shape_reshape_after, true); reshape_after->set_friendly_name("reshape_after"); m_function = std::make_shared(ov::NodeVector{reshape_after}, ov::ParameterVector{input}); @@ -466,14 +471,14 @@ TEST_F(GetSupportedNodesTest, ShuffleChannelFusion) { TEST_F(GetSupportedNodesTest, FusedNameReduceL2Test) { { - auto data = std::make_shared(ov::element::f32, ov::Shape{1, 512}); + auto data = std::make_shared(ov::element::f32, ov::Shape{1, 512}); data->set_friendly_name("data"); - auto axes = ov::opset9::Constant::create(ov::element::i64, ov::Shape{1}, {1}); + auto axes = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {1}); axes->set_friendly_name("axes"); - auto reduce_l2 = std::make_shared(data, axes, true); + auto reduce_l2 = std::make_shared(data, axes, true); reduce_l2->set_friendly_name("reduce_l2"); - m_function = std::make_shared(ov::NodeVector{reduce_l2}, ov::ParameterVector{data}); + m_function = std::make_shared(ov::NodeVector{reduce_l2}, ov::ParameterVector{data}); } Run( [&](std::shared_ptr& model) { @@ -483,7 +488,7 @@ TEST_F(GetSupportedNodesTest, FusedNameReduceL2Test) { m.register_pass(); m.run_passes(model); }, - [&](const std::shared_ptr& op) { + [&](const std::shared_ptr& op) { // Pooling is supported, but Sqrt is not return ov::op::util::is_parameter(op) || ov::op::util::is_output(op) || ov::op::util::is_constant(op) || (std::dynamic_pointer_cast(op) != nullptr); diff --git a/src/inference/tests/unit/variable_state_test.cpp b/src/inference/tests/unit/variable_state_test.cpp new file mode 100644 index 00000000000..676401a7e8c --- /dev/null +++ b/src/inference/tests/unit/variable_state_test.cpp @@ -0,0 +1,193 @@ +// // Copyright (C) 2018-2023 Intel Corporation +// // SPDX-License-Identifier: Apache-2.0 +// // +// +// #include "openvino/runtime/variable_state.hpp" +// +// #include +// +// #include "openvino/runtime/iasync_infer_request.hpp" +// #include "openvino/runtime/infer_request.hpp" +// #include "openvino/runtime/iplugin.hpp" +// #include "openvino/runtime/ivariable_state.hpp" +// #include "openvino/runtime/make_tensor.hpp" +// #include "unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp" +// #include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp" +// #include "unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp" +// +// using namespace ::testing; +// using namespace std; +// +// namespace { +// +// struct InferRequest_Impl { +// typedef std::shared_ptr ov::InferRequest::*type; +// friend type get(InferRequest_Impl); +// }; +// +// template +// struct Rob { +// friend typename Tag::type get(Tag) { +// return M; +// } +// }; +// +// template struct Rob; +// +// } // namespace +// +// class VariableStateTests : public ::testing::Test { +// protected: +// shared_ptr mock_infer_request; +// shared_ptr mock_variable_state; +// ov::InferRequest req; +// +// void SetUp() override { +// mock_infer_request = make_shared(); +// mock_variable_state = make_shared(); +// req.*get(InferRequest_Impl()) = mock_infer_request; +// } +// }; +// +// class VariableStateMockImpl : public ov::IVariableState { +// public: +// VariableStateMockImpl(const std::string& name) : ov::IVariableState(name) {} +// MOCK_METHOD0(reset, void()); +// }; +// +// TEST_F(VariableStateTests, VariableStateInternalCanSaveName) { +// std::shared_ptr pState(new VariableStateMockImpl("VariableStateMockImpl")); +// ASSERT_STREQ(pState->get_name().c_str(), "VariableStateMockImpl"); +// } +// +// TEST_F(VariableStateTests, VariableStateInternalCanSaveState) { +// std::shared_ptr pState(new VariableStateMockImpl("VariableStateMockImpl")); +// float data[] = {123, 124, 125}; +// auto state_tensor = ov::make_tensor(ov::element::f32, {3}, data); +// +// pState->set_state(state_tensor); +// auto saver = pState->get_state(); +// +// ASSERT_NE(saver, nullptr); +// ASSERT_FLOAT_EQ(saver->data()[0], 123); +// ASSERT_FLOAT_EQ(saver->data()[1], 124); +// ASSERT_FLOAT_EQ(saver->data()[2], 125); +// } +// +// TEST_F(VariableStateTests, VariableStateInternalCanSaveStateByReference) { +// std::shared_ptr pState(new VariableStateMockImpl("VariableStateMockImpl")); +// float data[] = {123, 124, 125}; +// auto state_tensor = ov::make_tensor(ov::element::f32, {3}, data); +// +// pState->set_state(state_tensor); +// +// data[0] = 121; +// data[1] = 122; +// data[2] = 123; +// auto saver = pState->get_state(); +// +// ASSERT_NE(saver, nullptr); +// ASSERT_FLOAT_EQ(saver->data()[0], 121); +// ASSERT_FLOAT_EQ(saver->data()[1], 122); +// ASSERT_FLOAT_EQ(saver->data()[2], 123); +// } +// +// // Tests for InferRequest::QueryState +// TEST_F(VariableStateTests, InferRequestCanConvertOneVariableStateFromCppToAPI) { +// std::vector> toReturn(1); +// toReturn[0] = mock_variable_state; +// +// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn)); +// +// auto state = req.query_state(); +// ASSERT_EQ(state.size(), 1); +// } +// +// TEST_F(VariableStateTests, InferRequestCanConvertZeroVariableStateFromCppToAPI) { +// std::vector> toReturn; +// +// EXPECT_CALL(*mock_infer_request.get(), query_state()).WillOnce(Return(toReturn)); +// +// auto state = req.query_state(); +// ASSERT_EQ(state.size(), 0); +// } +// +// TEST_F(VariableStateTests, InferRequestCanConvert2VariableStatesFromCPPtoAPI) { +// std::vector> toReturn; +// toReturn.push_back(mock_variable_state); +// toReturn.push_back(mock_variable_state); +// +// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn)); +// +// auto state = req.query_state(); +// ASSERT_EQ(state.size(), 2); +// } +// +// TEST_F(VariableStateTests, InfReqVariableStatePropagatesReset) { +// std::vector> toReturn; +// toReturn.push_back(mock_variable_state); +// +// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn)); +// EXPECT_CALL(*mock_variable_state.get(), reset()).Times(1); +// +// auto state = req.query_state(); +// state.front().reset(); +// } +// +// TEST_F(VariableStateTests, InfReqVariableStatePropagatesExceptionsFromReset) { +// std::vector> toReturn; +// toReturn.push_back(mock_variable_state); +// +// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn)); +// EXPECT_CALL(*mock_variable_state.get(), reset()).WillOnce(Throw(std::logic_error("some error"))); +// +// auto state = req.query_state(); +// EXPECT_ANY_THROW(state.front().reset()); +// } +// +// TEST_F(VariableStateTests, InfReqVariableStatePropagatesGetName) { +// std::vector> toReturn; +// std::string test_name = "someName"; +// toReturn.push_back(mock_variable_state); +// +// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn)); +// EXPECT_CALL(*mock_variable_state.get(), get_name()).WillOnce(ReturnRef(test_name)); +// +// auto state = req.query_state(); +// EXPECT_STREQ(state.front().get_name().c_str(), "someName"); +// } +// +// TEST_F(VariableStateTests, InfReqVariableStateCanPropagateSetState) { +// std::vector> toReturn; +// ov::SoPtr saver; +// toReturn.push_back(mock_variable_state); +// +// EXPECT_CALL(*mock_infer_request.get(), query_state()).WillRepeatedly(Return(toReturn)); +// EXPECT_CALL(*mock_variable_state.get(), set_state(_)).WillOnce(SaveArg<0>(&saver)); +// +// float data[] = {123, 124, 125}; +// auto stateBlob = ov::Tensor(ov::element::f32, {3}, data); +// +// EXPECT_NO_THROW(req.query_state().front().set_state(stateBlob)); +// ASSERT_FLOAT_EQ(saver->data()[0], 123); +// ASSERT_FLOAT_EQ(saver->data()[1], 124); +// ASSERT_FLOAT_EQ(saver->data()[2], 125); +// } +// +// TEST_F(VariableStateTests, DISABLED_InfReqVariableStateCanPropagateGetLastState) { +// std::vector> toReturn; +// +// float data[] = {123, 124, 125}; +// auto stateBlob = ov::make_tensor(ov::element::f32, {3}, data); +// +// toReturn.push_back(mock_variable_state); +// +// EXPECT_CALL(*mock_infer_request.get(), query_state()).WillRepeatedly(Return(toReturn)); +// EXPECT_CALL(*mock_variable_state.get(), get_state()).WillOnce(ReturnRef(stateBlob)); +// +// auto saver = req.query_state().front().get_state(); +// ASSERT_TRUE(saver); +// ASSERT_FLOAT_EQ(saver.data()[0], 123); +// ASSERT_FLOAT_EQ(saver.data()[1], 124); +// ASSERT_FLOAT_EQ(saver.data()[2], 125); +// } diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp index 2dd4c64f927..f2b26523c5c 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/test_assertions.hpp @@ -4,75 +4,63 @@ #pragma once - #include -#include "openvino/util/pp.hpp" -#include "gtest/gtest.h" #include "gmock/gmock-matchers.h" -#include "openvino/core/deprecated.hpp" +#include "gtest/gtest.h" #include "ie_blob.h" #include "ie_data.h" -#include "ie_preprocess.hpp" #include "ie_input_info.hpp" +#include "ie_preprocess.hpp" +#include "openvino/core/deprecated.hpp" +#include "openvino/util/pp.hpp" -inline bool strContains(const std::string & str, const std::string & substr) { +inline bool strContains(const std::string& str, const std::string& substr) { return str.find(substr) != std::string::npos; } -inline bool strDoesnotContain(const std::string & str, const std::string & substr) { +inline bool strDoesnotContain(const std::string& str, const std::string& substr) { return !strContains(str, substr); } -#define ASSERT_STR_CONTAINS(str, substr) \ - ASSERT_PRED2(&strContains, str, substr) +#define ASSERT_STR_CONTAINS(str, substr) ASSERT_PRED2(&strContains, str, substr) -#define ASSERT_STR_DOES_NOT_CONTAIN(str, substr) \ - ASSERT_PRED2(&strDoesnotContain, str, substr) +#define ASSERT_STR_DOES_NOT_CONTAIN(str, substr) ASSERT_PRED2(&strDoesnotContain, str, substr) -#define EXPECT_STR_CONTAINS(str, substr) \ - EXPECT_PRED2(&strContains, str, substr) +#define EXPECT_STR_CONTAINS(str, substr) EXPECT_PRED2(&strContains, str, substr) -#define ASSERT_BLOB_EQ(lhs, rhs) \ - compare_blob(lhs, rhs) +#define ASSERT_BLOB_EQ(lhs, rhs) compare_blob(lhs, rhs) -#define ASSERT_DIMS_EQ(lhs, rhs) \ - compare_dims(lhs, rhs) +#define ASSERT_DIMS_EQ(lhs, rhs) compare_dims(lhs, rhs) -#define ASSERT_DATA_EQ(lhs, rhs) \ - compare_data(lhs, rhs) +#define ASSERT_DATA_EQ(lhs, rhs) compare_data(lhs, rhs) -#define ASSERT_PREPROCESS_CHANNEL_EQ(lhs, rhs) \ - compare_preprocess(lhs, rhs) +#define ASSERT_PREPROCESS_CHANNEL_EQ(lhs, rhs) compare_preprocess(lhs, rhs) -#define ASSERT_PREPROCESS_INFO_EQ(lhs, rhs) \ - compare_preprocess_info(lhs, rhs) +#define ASSERT_PREPROCESS_INFO_EQ(lhs, rhs) compare_preprocess_info(lhs, rhs) -#define ASSERT_OUTPUTS_INFO_EQ(lhs, rhs) \ - compare_outputs_info(lhs, rhs) +#define ASSERT_OUTPUTS_INFO_EQ(lhs, rhs) compare_outputs_info(lhs, rhs) -#define ASSERT_INPUTS_INFO_EQ(lhs, rhs) \ - compare_inputs_info(lhs, rhs) +#define ASSERT_INPUTS_INFO_EQ(lhs, rhs) compare_inputs_info(lhs, rhs) -#define ASSERT_STRINGEQ(lhs, rhs) \ - compare_cpp_strings(lhs, rhs) +#define ASSERT_STRINGEQ(lhs, rhs) compare_cpp_strings(lhs, rhs) -#define OV_ASSERT_NO_THROW(statement) \ - OV_ASSERT_NO_THROW_(statement, GTEST_FATAL_FAILURE_) +#define OV_ASSERT_NO_THROW(statement) OV_ASSERT_NO_THROW_(statement, GTEST_FATAL_FAILURE_) -#define OV_ASSERT_NO_THROW_(statement, fail) \ - GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ - if (::testing::internal::AlwaysTrue()) { \ - try { \ - GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ - } catch (const std::exception& e) { \ - fail("Expected: " #statement " doesn't throw an exception.\n" \ - " Actual: it throws.") << e.what(); \ - } catch (...) { \ - fail("Expected: " #statement " doesn't throw an exception.\n" \ - " Actual: it throws."); \ - } \ - } +#define OV_ASSERT_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const std::exception& e) { \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws.") \ + << e.what(); \ + } catch (...) { \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws."); \ + } \ + } #define OV_EXPECT_THROW(statement, exp_exception, exception_what_matcher) \ try { \ @@ -86,32 +74,46 @@ inline bool strDoesnotContain(const std::string & str, const std::string & subst FAIL() << "Unknown exception"; \ } +#define OV_EXPECT_THROW_HAS_SUBSTRING(statement, exp_exception, exception_what_matcher) \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + FAIL() << "Expected exception " << OV_PP_TOSTRING(exp_exception); \ + } catch (const exp_exception& ex) { \ + EXPECT_THAT(ex.what(), ::testing::HasSubstr(exception_what_matcher)); \ + } catch (const std::exception& e) { \ + FAIL() << "Unexpected exception " << e.what(); \ + } catch (...) { \ + FAIL() << "Unknown exception"; \ + } + OPENVINO_SUPPRESS_DEPRECATED_START inline void compare_blob(InferenceEngine::Blob::Ptr lhs, InferenceEngine::Blob::Ptr rhs) { ASSERT_EQ(lhs.get(), rhs.get()); - //TODO: add blob specific comparison for general case + // TODO: add blob specific comparison for general case } -inline void compare_dims(const InferenceEngine::SizeVector & lhs, const InferenceEngine::SizeVector & rhs) { +inline void compare_dims(const InferenceEngine::SizeVector& lhs, const InferenceEngine::SizeVector& rhs) { ASSERT_EQ(lhs.size(), rhs.size()); for (size_t i = 0; i < lhs.size(); i++) { ASSERT_EQ(lhs[i], rhs[i]); } } -inline void compare_data(const InferenceEngine::Data & lhs, const InferenceEngine::Data & rhs) { +inline void compare_data(const InferenceEngine::Data& lhs, const InferenceEngine::Data& rhs) { ASSERT_DIMS_EQ(lhs.getDims(), rhs.getDims()); ASSERT_STREQ(lhs.getName().c_str(), rhs.getName().c_str()); ASSERT_EQ(lhs.getPrecision(), rhs.getPrecision()); } -inline void compare_preprocess(const InferenceEngine::PreProcessChannel & lhs, const InferenceEngine::PreProcessChannel & rhs) { +inline void compare_preprocess(const InferenceEngine::PreProcessChannel& lhs, + const InferenceEngine::PreProcessChannel& rhs) { ASSERT_FLOAT_EQ(lhs.meanValue, rhs.meanValue); ASSERT_FLOAT_EQ(lhs.stdScale, rhs.stdScale); ASSERT_BLOB_EQ(lhs.meanData, rhs.meanData); } -inline void compare_preprocess_info(const InferenceEngine::PreProcessInfo & lhs, const InferenceEngine::PreProcessInfo & rhs) { +inline void compare_preprocess_info(const InferenceEngine::PreProcessInfo& lhs, + const InferenceEngine::PreProcessInfo& rhs) { ASSERT_EQ(lhs.getMeanVariant(), rhs.getMeanVariant()); ASSERT_EQ(lhs.getNumberOfChannels(), rhs.getNumberOfChannels()); for (size_t i = 0; i < lhs.getNumberOfChannels(); i++) { @@ -119,18 +121,19 @@ inline void compare_preprocess_info(const InferenceEngine::PreProcessInfo & lhs, } } -inline void compare_outputs_info(const InferenceEngine::OutputsDataMap & lhs, const InferenceEngine::OutputsDataMap & rhs) { +inline void compare_outputs_info(const InferenceEngine::OutputsDataMap& lhs, + const InferenceEngine::OutputsDataMap& rhs) { ASSERT_EQ(lhs.size(), rhs.size()); auto i = lhs.begin(); auto j = rhs.begin(); - for (size_t k =0; k != lhs.size(); k++, i++, j++) { + for (size_t k = 0; k != lhs.size(); k++, i++, j++) { ASSERT_STREQ(i->first.c_str(), j->first.c_str()); ASSERT_DATA_EQ(*i->second.get(), *j->second.get()); } } -inline void compare_inputs_info(const InferenceEngine::InputsDataMap & lhs, const InferenceEngine::InputsDataMap & rhs) { +inline void compare_inputs_info(const InferenceEngine::InputsDataMap& lhs, const InferenceEngine::InputsDataMap& rhs) { ASSERT_EQ(lhs.size(), rhs.size()); auto i = lhs.begin(); auto j = rhs.begin(); @@ -144,6 +147,6 @@ inline void compare_inputs_info(const InferenceEngine::InputsDataMap & lhs, cons } OPENVINO_SUPPRESS_DEPRECATED_END -inline void compare_cpp_strings(const std::string & lhs, const std::string &rhs) { +inline void compare_cpp_strings(const std::string& lhs, const std::string& rhs) { ASSERT_STREQ(lhs.c_str(), rhs.c_str()); } diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp new file mode 100644 index 00000000000..51d6c325fad --- /dev/null +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include +#include +#include + +#include "openvino/runtime/iasync_infer_request.hpp" +#include "openvino/runtime/ivariable_state.hpp" +#include "openvino/runtime/profiling_info.hpp" + +namespace ov { + +class MockIAsyncInferRequest : public ov::IAsyncInferRequest { +public: + MockIAsyncInferRequest() : ov::IAsyncInferRequest(nullptr, nullptr, nullptr) {} + + MOCK_METHOD(void, start_async, ()); + MOCK_METHOD(void, wait, ()); + MOCK_METHOD(bool, wait_for, (const std::chrono::milliseconds&)); + MOCK_METHOD(void, cancel, ()); + MOCK_METHOD(void, set_callback, (std::function)); + MOCK_METHOD(void, infer, ()); + MOCK_METHOD(std::vector, get_profiling_info, (), (const)); + MOCK_METHOD(std::vector>, query_state, (), (const)); + MOCK_METHOD(ov::SoPtr, get_tensor, (const ov::Output&), (const)); + MOCK_METHOD(std::vector>, get_tensors, (const ov::Output&), (const)); + MOCK_METHOD(void, set_tensor, (const ov::Output&, const ov::SoPtr&)); + MOCK_METHOD(void, set_tensors, (const ov::Output&, const std::vector>&)); + MOCK_METHOD(const std::vector>&, get_inputs, (), (const)); + MOCK_METHOD(const std::vector>&, get_outputs, (), (const)); +}; + +} // namespace ov diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp new file mode 100644 index 00000000000..ad9a12f8205 --- /dev/null +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp @@ -0,0 +1,39 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include +#include + +#include "openvino/runtime/iasync_infer_request.hpp" +#include "openvino/runtime/icompiled_model.hpp" + +namespace ov { + +class MockICompiledModel : public ov::ICompiledModel { +public: + MockICompiledModel(const std::shared_ptr& model, const std::shared_ptr& plugin) + : ov::ICompiledModel(model, plugin) {} + MOCK_METHOD(const std::vector>&, outputs, (), (const)); + MOCK_METHOD(const std::vector>&, inputs, (), (const)); + MOCK_METHOD(std::shared_ptr, create_infer_request, (), (const)); + MOCK_METHOD(std::shared_ptr, get_runtime_model, (), (const)); + MOCK_METHOD(void, export_model, (std::ostream&), (const)); + + MOCK_METHOD(void, set_property, (const ov::AnyMap& config)); + MOCK_METHOD(ov::Any, get_property, (const std::string& name), (const)); + MOCK_METHOD(ov::SoPtr, get_context, (), (const)); + + MOCK_CONST_METHOD0(create_sync_infer_request, std::shared_ptr(void)); + + std::shared_ptr create_infer_request_default() const { + return create_async_infer_request(); + } +}; + +} // namespace ov diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp new file mode 100644 index 00000000000..f4a903be89e --- /dev/null +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_icore.hpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "ie_icore.hpp" +#include "openvino/runtime/icompiled_model.hpp" + +namespace ov { + +class MockICore : public ov::ICore { +public: + MOCK_METHOD(ov::Any, get_property, (const std::string&, const std::string&, const ov::AnyMap&), (const)); + MOCK_METHOD(ov::Any, get_property, (const std::string&, const std::string&), (const)); + MOCK_METHOD(ov::AnyMap, get_supported_property, (const std::string&, const ov::AnyMap&), (const)); + + MOCK_METHOD(bool, is_new_api, (), (const)); + MOCK_METHOD(ov::SoPtr, + create_context, + (const std::string& deviceName, const ov::AnyMap& params), + (const)); + MOCK_METHOD(std::vector, get_available_devices, (), (const)); + MOCK_METHOD(ov::SupportedOpsMap, + query_model, + (const std::shared_ptr&, const std::string&, const ov::AnyMap&), + (const)); + MOCK_METHOD(ov::SoPtr, + import_model, + (std::istream&, const std::string&, const ov::AnyMap&), + (const)); + MOCK_METHOD(ov::SoPtr, + compile_model, + (const std::shared_ptr&, const std::string&, const ov::AnyMap&), + (const)); + MOCK_METHOD(ov::SoPtr, + compile_model, + (const std::shared_ptr&, const ov::SoPtr&, const ov::AnyMap&), + (const)); + MOCK_METHOD(ov::SoPtr, + compile_model, + (const std::string&, const std::string&, const ov::AnyMap&), + (const)); + MOCK_METHOD(ov::SoPtr, + compile_model, + (const std::string&, const ov::Tensor&, const std::string&, const ov::AnyMap&), + (const)); + MOCK_METHOD(std::shared_ptr, read_model, (const std::string&, const ov::Tensor&, bool), (const)); + MOCK_METHOD(std::shared_ptr, read_model, (const std::string&, const std::string&), (const)); + MOCK_METHOD(ov::SoPtr, get_default_context, (const std::string&), (const)); + MOCK_METHOD(ov::SoPtr, + import_model, + (std::istream&, const ov::SoPtr&, const ov::AnyMap&), + (const)); + MOCK_METHOD(bool, device_supports_model_caching, (const std::string&), (const)); + MOCK_METHOD(void, set_property, (const std::string& device_name, const ov::AnyMap& properties)); + + ~MockICore() = default; +}; + +} // namespace ov diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp new file mode 100644 index 00000000000..1de3023814e --- /dev/null +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp @@ -0,0 +1,50 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include + +#include "ie_extension.h" +#include "openvino/core/any.hpp" +#include "openvino/core/model.hpp" +#include "openvino/core/version.hpp" +#include "openvino/runtime/common.hpp" +#include "openvino/runtime/iinfer_request.hpp" +#include "openvino/runtime/iplugin.hpp" +#include "openvino/runtime/iremote_context.hpp" + +namespace ov { + +class MockIPlugin : public ov::IPlugin { +public: + MOCK_METHOD(std::shared_ptr, + compile_model, + (const std::shared_ptr&, const ov::AnyMap&), + (const)); + MOCK_METHOD(std::shared_ptr, compile_model, (const std::string&, const ov::AnyMap&), (const)); + MOCK_METHOD(std::shared_ptr, + compile_model, + (const std::shared_ptr&, const ov::AnyMap&, const ov::SoPtr&), + (const)); + MOCK_METHOD(void, set_property, (const ov::AnyMap&)); + MOCK_METHOD(ov::Any, get_property, (const std::string&, const ov::AnyMap&), (const)); + MOCK_METHOD(ov::SoPtr, create_context, (const ov::AnyMap&), (const)); + MOCK_METHOD(ov::SoPtr, get_default_context, (const ov::AnyMap&), (const)); + + MOCK_METHOD(std::shared_ptr, import_model, (std::istream&, const ov::AnyMap&), (const)); + MOCK_METHOD(std::shared_ptr, + import_model, + (std::istream&, const ov::SoPtr&, const ov::AnyMap&), + (const)); + MOCK_METHOD(ov::SupportedOpsMap, + query_model, + (const std::shared_ptr&, const ov::AnyMap&), + (const)); +}; + +} // namespace ov diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp new file mode 100644 index 00000000000..657fa94ba06 --- /dev/null +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/runtime/isync_infer_request.hpp" +#include "openvino/runtime/ivariable_state.hpp" +#include "openvino/runtime/profiling_info.hpp" + +namespace ov { + +class MockISyncInferRequest : public ov::ISyncInferRequest { +public: + MockISyncInferRequest(const std::shared_ptr& compiled_model) + : ov::ISyncInferRequest(compiled_model) {} + MOCK_METHOD(void, infer, ()); + MOCK_METHOD(std::vector, get_profiling_info, (), (const)); + MOCK_METHOD(std::vector>, query_state, (), (const)); +}; + +} // namespace ov + diff --git a/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp new file mode 100644 index 00000000000..8d8f9f18a20 --- /dev/null +++ b/src/tests/test_utils/unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include + +#include "openvino/runtime/ivariable_state.hpp" + +namespace ov { + +class MockIVariableState : public ov::IVariableState { +public: + MockIVariableState() : ov::IVariableState{"MockIVariableState"} {} + MOCK_METHOD(const std::string&, get_name, (), (const)); + MOCK_METHOD(void, reset, ()); + MOCK_METHOD(void, set_state, (const ov::SoPtr&)); + MOCK_METHOD(const ov::SoPtr&, get_state, (), (const)); +}; + +} // namespace ov