Moved inference unit tests to new API (#19452)

* Moved inference unit tests to new API

* Added infer request and variable state

* Try to fix LTO

* Try to avoid warning from gmock

* Try to fix azure build

* Try to fix Windows build

* Comment all variable_state_test file for future investigation
This commit is contained in:
Ilya Churaev 2023-08-31 16:39:46 +04:00 committed by GitHub
parent 1cf3fe96af
commit 2d2977ff4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 1124 additions and 231 deletions

View File

@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "compilation_context.hpp"
#include <gtest/gtest.h>
#include <chrono>
@ -11,17 +13,15 @@
#include "common_test_utils/common_utils.hpp"
#include "common_test_utils/test_constants.hpp"
#include "compilation_context.hpp"
#include "cpp/ie_cnn_network.h"
#include "ngraph/function.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/opsets/opset6.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/multiply.hpp"
#include "openvino/op/parameter.hpp"
#include "transformations/rt_info/fused_names_attribute.hpp"
#include "transformations/rt_info/primitives_priority_attribute.hpp"
using namespace InferenceEngine;
using namespace ov;
using namespace ngraph;
using namespace ::testing;
using namespace std::chrono;
@ -107,45 +107,45 @@ TEST_F(NetworkContext_CalcFileInfoTests, SizeModified) {
////////////////////////////////////////////////////
static std::shared_ptr<ngraph::Function> create_simple_function() {
// This example is taken from docs, shows how to create ngraph::Function
static std::shared_ptr<ov::Model> create_simple_model() {
// This example is taken from docs, shows how to create ov::Model
//
// Parameter--->Multiply--->Add--->Result
// Constant---' /
// Constant---'
// Create opset6::Parameter operation with static shape
auto data = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::i8, ngraph::Shape{3, 1, 2});
auto data = std::make_shared<ov::op::v0::Parameter>(ov::element::i8, ov::Shape{3, 1, 2});
data->set_friendly_name("Parameter");
data->get_output_tensor(0).set_names({"parameter"});
auto mul_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {3});
auto mul_constant = ov::op::v0::Constant::create(ov::element::i8, ov::Shape{1}, {3});
mul_constant->set_friendly_name("mul_constant");
mul_constant->get_output_tensor(0).set_names({"mul_constant"});
auto mul = std::make_shared<ngraph::opset6::Multiply>(data, mul_constant);
auto mul = std::make_shared<ov::op::v1::Multiply>(data, mul_constant);
mul->set_friendly_name("mul");
mul->get_output_tensor(0).set_names({"mul"});
auto add_constant = ngraph::opset6::Constant::create(ngraph::element::i8, ngraph::Shape{1}, {2});
auto add_constant = ov::op::v0::Constant::create(ov::element::i8, ov::Shape{1}, {2});
add_constant->set_friendly_name("add_constant");
add_constant->get_output_tensor(0).set_names({"add_constant"});
auto add = std::make_shared<ngraph::opset6::Add>(mul, add_constant);
auto add = std::make_shared<ov::op::v1::Add>(mul, add_constant);
add->set_friendly_name("add");
add->get_output_tensor(0).set_names({"add"});
// Create opset3::Result operation
auto res = std::make_shared<ngraph::opset6::Result>(add);
auto res = std::make_shared<ov::op::v0::Result>(add);
res->set_friendly_name("res");
// Create nGraph function
auto func = std::make_shared<ngraph::Function>(ngraph::ResultVector{res}, ngraph::ParameterVector{data});
return func;
// Create ov function
auto model = std::make_shared<ov::Model>(ov::ResultVector{res}, ov::ParameterVector{data});
return model;
}
static void checkCustomRt(const std::function<void(Node::RTMap&)>& emptyCb,
const std::function<void(Node::RTMap&, const std::string& name)>& nameCb) {
auto model1 = create_simple_function();
auto model2 = create_simple_function();
auto model1 = create_simple_model();
auto model2 = create_simple_model();
auto& op1 = model1->get_ops().front()->get_rt_info();
auto& op2 = model2->get_ops().front()->get_rt_info();
@ -166,22 +166,22 @@ static void checkCustomRt(const std::function<void(Node::RTMap&)>& emptyCb,
}
TEST(NetworkContext, HashOfSame) {
auto model1 = create_simple_function();
auto model2 = create_simple_function();
auto model1 = create_simple_model();
auto model2 = create_simple_model();
ASSERT_EQ(ModelCache::compute_hash(model1, {}), ModelCache::compute_hash(model2, {}));
}
TEST(NetworkContext, HashWithConfig) {
auto net1 = create_simple_function();
auto net2 = create_simple_function();
auto net1 = create_simple_model();
auto net2 = create_simple_model();
ASSERT_NE(ModelCache::compute_hash(net1, {{"key", "value"}}), ModelCache::compute_hash(net2, {}));
ASSERT_EQ(ModelCache::compute_hash(net1, {{"key", "value"}}), ModelCache::compute_hash(net2, {{"key", "value"}}));
}
TEST(NetworkContext, HashWithPrimitivesPriority) {
auto net1 = create_simple_function();
auto net2 = create_simple_function();
auto net3 = create_simple_function();
auto net1 = create_simple_model();
auto net2 = create_simple_model();
auto net3 = create_simple_model();
auto& op2 = net2->get_ops().front()->get_rt_info();
op2[ov::PrimitivesPriority::get_type_info_static()] = ov::PrimitivesPriority("testPriority");
@ -214,9 +214,9 @@ TEST(NetworkContext, HashWithPrimitivesPriorityType) {
}
TEST(NetworkContext, HashWithAffinity) {
auto net1 = create_simple_function();
auto net2 = create_simple_function();
auto net3 = create_simple_function();
auto net1 = create_simple_model();
auto net2 = create_simple_model();
auto net3 = create_simple_model();
auto& op2 = net2->get_ops().front()->get_rt_info();
op2["affinity"] = "testAffinity";
@ -229,9 +229,9 @@ TEST(NetworkContext, HashWithAffinity) {
}
TEST(NetworkContext, HashWithFutureRt_string) {
auto net1 = create_simple_function();
auto net2 = create_simple_function();
auto net3 = create_simple_function();
auto net1 = create_simple_model();
auto net2 = create_simple_model();
auto net3 = create_simple_model();
auto& op1 = net1->get_ops().front()->get_rt_info();
op1["someFutureKey"] = "hello";
@ -248,9 +248,9 @@ TEST(NetworkContext, HashWithFutureRt_string) {
}
TEST(NetworkContext, HashWithFutureRt_int64) {
auto net1 = create_simple_function();
auto net2 = create_simple_function();
auto net3 = create_simple_function();
auto net1 = create_simple_model();
auto net2 = create_simple_model();
auto net3 = create_simple_model();
auto& op1 = net1->get_ops().front()->get_rt_info();
op1["someFutureKey"] = int64_t(42);
@ -267,9 +267,9 @@ TEST(NetworkContext, HashWithFutureRt_int64) {
}
TEST(NetworkContext, HashWithTensorNames) {
auto fun1 = create_simple_function();
auto fun2 = create_simple_function();
auto fun3 = create_simple_function();
auto fun1 = create_simple_model();
auto fun2 = create_simple_model();
auto fun3 = create_simple_model();
std::unordered_set<std::string> names1, names2;
std::vector<std::string> testNames;
testNames.reserve(100);
@ -292,19 +292,19 @@ TEST(NetworkContext, HashWithTensorNames) {
}
TEST(NetworkContext, HashWithDifferentResults) {
auto net1 = create_simple_function();
auto net2 = create_simple_function();
auto net1 = create_simple_model();
auto net2 = create_simple_model();
net2->remove_result(net2->get_results().front());
auto net3 = create_simple_function();
auto net3 = create_simple_model();
net3->remove_result(net3->get_results().front());
ASSERT_NE(ModelCache::compute_hash(net1, {}), ModelCache::compute_hash(net2, {}));
ASSERT_EQ(ModelCache::compute_hash(net2, {}), ModelCache::compute_hash(net3, {}));
}
// Verify all internal hash calculations are thread-safe (like ngraph::function serialization)
// Verify all internal hash calculations are thread-safe (like ov::Model serialization)
TEST(NetworkContext, HashOfSameMultiThreading) {
auto net1 = create_simple_function();
auto net2 = create_simple_function();
auto net1 = create_simple_model();
auto net2 = create_simple_model();
std::atomic_bool fail{false};
const auto TEST_DURATION_MS = 1000;
auto start = high_resolution_clock::now();

View File

@ -0,0 +1,187 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/runtime/compiled_model.hpp"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <memory>
#include <ostream>
#include <stdexcept>
#include <unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp>
#include <unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp>
#include <vector>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/relu.hpp"
#include "openvino/runtime/iinfer_request.hpp"
#include "openvino/runtime/ivariable_state.hpp"
#include "openvino/runtime/variable_state.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp"
using namespace ::testing;
namespace {
struct CompiledModel_Impl {
typedef std::shared_ptr<ov::ICompiledModel> ov::CompiledModel::*type;
friend type get(CompiledModel_Impl);
};
template <typename Tag, typename Tag::type M>
struct Rob {
friend typename Tag::type get(Tag) {
return M;
}
};
template struct Rob<CompiledModel_Impl, &ov::CompiledModel::_impl>;
} // namespace
class CompiledModelTests : public ::testing::Test {
private:
std::shared_ptr<ov::Model> create_model() {
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 3, 2, 2});
param->set_friendly_name("Param");
param->output(0).set_names({"param"});
auto relu = std::make_shared<ov::op::v0::Relu>(param);
relu->set_friendly_name("ReLU");
relu->output(0).set_names({"relu"});
return std::make_shared<ov::Model>(ov::OutputVector{relu->output(0)}, ov::ParameterVector{param});
}
protected:
std::shared_ptr<ov::MockICompiledModel> mock_compiled_model;
ov::CompiledModel compiled_model;
std::shared_ptr<ov::IPlugin> plugin;
std::shared_ptr<const ov::Model> model;
void TearDown() override {
mock_compiled_model.reset();
compiled_model = {};
plugin = {};
}
void SetUp() override {
model = create_model();
auto mock_plugin = std::make_shared<ov::MockIPlugin>();
plugin = mock_plugin;
mock_compiled_model = std::make_shared<ov::MockICompiledModel>(model, plugin);
compiled_model.*get(CompiledModel_Impl()) = mock_compiled_model;
}
};
TEST_F(CompiledModelTests, GetOutputsThrowsIfReturnErr) {
EXPECT_CALL(*mock_compiled_model.get(), outputs()).Times(1).WillOnce(Throw(std::runtime_error{""}));
ASSERT_THROW(compiled_model.outputs(), std::runtime_error);
}
TEST_F(CompiledModelTests, GetOutputs) {
std::vector<ov::Output<const ov::Node>> data;
EXPECT_CALL(*mock_compiled_model.get(), outputs()).Times(1).WillOnce(ReturnRefOfCopy(model->outputs()));
ASSERT_NO_THROW(data = compiled_model.outputs());
ASSERT_EQ(data, model->outputs());
}
TEST_F(CompiledModelTests, GetInputsThrowsIfReturnErr) {
EXPECT_CALL(*mock_compiled_model.get(), inputs()).Times(1).WillOnce(Throw(std::runtime_error{""}));
ASSERT_THROW(compiled_model.inputs(), std::runtime_error);
}
TEST_F(CompiledModelTests, GetInputs) {
EXPECT_CALL(*mock_compiled_model.get(), inputs()).Times(1).WillOnce(ReturnRefOfCopy(model->inputs()));
std::vector<ov::Output<const ov::Node>> info;
ASSERT_NO_THROW(info = compiled_model.inputs());
ASSERT_EQ(info, model->inputs());
}
class CompiledModelWithIInferReqTests : public CompiledModelTests {
protected:
std::shared_ptr<ov::MockIAsyncInferRequest> mock_infer_request;
void SetUp() override {
CompiledModelTests::SetUp();
mock_infer_request = std::make_shared<ov::MockIAsyncInferRequest>();
}
};
TEST_F(CompiledModelWithIInferReqTests, CanCreateInferRequest) {
EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request));
ov::InferRequest actualInferReq;
ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request());
}
TEST_F(CompiledModelWithIInferReqTests, CreateInferRequestThrowsIfReturnNotOK) {
EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Throw(std::runtime_error{""}));
ASSERT_THROW(compiled_model.create_infer_request(), std::runtime_error);
}
TEST_F(CompiledModelWithIInferReqTests, QueryStateThrowsIfReturnErr) {
EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request));
ov::InferRequest actualInferReq;
ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request());
EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillOnce(Throw(std::runtime_error{""}));
EXPECT_THROW(actualInferReq.query_state(), std::runtime_error);
}
TEST_F(CompiledModelWithIInferReqTests, QueryState) {
EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Return(mock_infer_request));
ov::InferRequest actualInferReq;
ASSERT_NO_THROW(actualInferReq = compiled_model.create_infer_request());
ov::SoPtr<ov::IVariableState> state = std::make_shared<ov::MockIVariableState>();
EXPECT_CALL(*mock_infer_request.get(), query_state())
.Times(1)
.WillOnce(Return(std::vector<ov::SoPtr<ov::IVariableState>>(1, state)));
std::vector<ov::VariableState> MemState_v;
MemState_v = actualInferReq.query_state();
EXPECT_EQ(MemState_v.size(), 1);
}
class CompiledModelBaseTests : public ::testing::Test {
protected:
std::shared_ptr<ov::MockICompiledModel> mock_compiled_model;
ov::CompiledModel compiled_model;
std::shared_ptr<ov::IPlugin> plugin;
void SetUp() override {
auto mock_plugin = std::make_shared<ov::MockIPlugin>();
plugin = mock_plugin;
mock_compiled_model = std::make_shared<ov::MockICompiledModel>(nullptr, plugin);
compiled_model.*get(CompiledModel_Impl()) = mock_compiled_model;
}
};
// CreateInferRequest
TEST_F(CompiledModelBaseTests, canForwardCreateInferRequest) {
auto inferReqInternal = std::make_shared<ov::MockIAsyncInferRequest>();
EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).Times(1).WillRepeatedly(Return(inferReqInternal));
ASSERT_NO_THROW(compiled_model.create_infer_request());
}
TEST_F(CompiledModelBaseTests, canReportErrorInCreateInferRequest) {
EXPECT_CALL(*mock_compiled_model.get(), create_infer_request()).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(compiled_model.create_infer_request(), std::runtime_error, "compare");
}
// Export
TEST_F(CompiledModelBaseTests, canForwardExport) {
std::stringstream out_model;
EXPECT_CALL(*mock_compiled_model.get(), export_model(_)).Times(1);
EXPECT_NO_THROW(compiled_model.export_model(out_model));
}
TEST_F(CompiledModelBaseTests, canReportErrorInExport) {
std::stringstream out_model;
EXPECT_CALL(*mock_compiled_model.get(), export_model(_)).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(compiled_model.export_model(out_model), std::runtime_error, "compare");
}

View File

@ -0,0 +1,150 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/runtime/infer_request.hpp"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <stdexcept>
#include "common_test_utils/test_assertions.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/runtime/make_tensor.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp"
using namespace ::testing;
using namespace std;
namespace {
struct InferRequest_Impl {
typedef std::shared_ptr<ov::IAsyncInferRequest> ov::InferRequest::*type;
friend type get(InferRequest_Impl);
};
template <typename Tag, typename Tag::type M>
struct Rob {
friend typename Tag::type get(Tag) {
return M;
}
};
template struct Rob<InferRequest_Impl, &ov::InferRequest::_impl>;
} // namespace
class OVInferRequestBaseTests : public ::testing::Test {
protected:
std::shared_ptr<ov::MockIAsyncInferRequest> mock_impl;
ov::InferRequest request;
void SetUp() override {
mock_impl.reset(new ov::MockIAsyncInferRequest());
request.*get(InferRequest_Impl()) = mock_impl;
}
};
// start_async
TEST_F(OVInferRequestBaseTests, canForwardStartAsync) {
EXPECT_CALL(*mock_impl.get(), start_async()).Times(1);
ASSERT_NO_THROW(request.start_async());
}
TEST_F(OVInferRequestBaseTests, canReportErrorInStartAsync) {
EXPECT_CALL(*mock_impl.get(), start_async()).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(request.start_async(), std::runtime_error, "compare");
}
// wait
TEST_F(OVInferRequestBaseTests, canForwardWait) {
EXPECT_CALL(*mock_impl.get(), wait()).WillOnce(Return());
ASSERT_NO_THROW(request.wait());
}
TEST_F(OVInferRequestBaseTests, canReportErrorInWait) {
EXPECT_CALL(*mock_impl.get(), wait()).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(request.wait(), std::runtime_error, "compare");
}
// Infer
TEST_F(OVInferRequestBaseTests, canForwardInfer) {
EXPECT_CALL(*mock_impl.get(), infer()).Times(1);
ASSERT_NO_THROW(request.infer());
}
TEST_F(OVInferRequestBaseTests, canReportErrorInInfer) {
EXPECT_CALL(*mock_impl.get(), infer()).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(request.infer(), std::runtime_error, "compare");
}
// get_profiling_info
TEST_F(OVInferRequestBaseTests, canForwardGetPerformanceCounts) {
std::vector<ov::ProfilingInfo> info;
EXPECT_CALL(*mock_impl.get(), get_profiling_info()).WillOnce(Return(std::vector<ov::ProfilingInfo>{}));
ASSERT_NO_THROW(request.get_profiling_info());
}
TEST_F(OVInferRequestBaseTests, canReportErrorInGetPerformanceCounts) {
std::vector<ov::ProfilingInfo> info;
EXPECT_CALL(*mock_impl.get(), get_profiling_info()).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(request.get_profiling_info(), std::runtime_error, "compare");
}
// get_tensor
TEST_F(OVInferRequestBaseTests, canForwardGetTensor) {
ov::Tensor data;
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1});
param->output(0).set_names({"test_name"});
std::vector<ov::Output<const ov::Node>> inputs{param->output(0)};
EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs));
EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs));
EXPECT_CALL(*mock_impl.get(), get_tensors(_)).WillOnce(Return(std::vector<ov::SoPtr<ov::ITensor>>{}));
EXPECT_CALL(*mock_impl.get(), get_tensor(_)).WillOnce(Return(ov::make_tensor(ov::element::f32, {1, 2, 3, 3})));
ASSERT_NO_THROW(request.get_tensor("test_name"));
}
TEST_F(OVInferRequestBaseTests, canReportErrorInGetTensor) {
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1});
param->output(0).set_names({"test_name"});
std::vector<ov::Output<const ov::Node>> inputs{param->output(0)};
EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs));
EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs));
EXPECT_CALL(*mock_impl.get(), get_tensors(_)).WillOnce(Return(std::vector<ov::SoPtr<ov::ITensor>>{}));
EXPECT_CALL(*mock_impl.get(), get_tensor(_)).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(request.get_tensor("test_name"), std::runtime_error, "compare");
}
// set_tensor
TEST_F(OVInferRequestBaseTests, canForwardSetTensor) {
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1});
param->output(0).set_names({"test_name"});
std::vector<ov::Output<const ov::Node>> inputs{param->output(0)};
ov::Tensor data;
EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs));
EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs));
EXPECT_CALL(*mock_impl.get(), set_tensor(_, _)).Times(1);
ASSERT_NO_THROW(request.set_tensor("test_name", data));
}
TEST_F(OVInferRequestBaseTests, canReportErrorInSetTensor) {
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1});
param->output(0).set_names({"test_name"});
std::vector<ov::Output<const ov::Node>> inputs{param->output(0)};
ov::Tensor data;
EXPECT_CALL(*mock_impl.get(), get_inputs()).WillOnce(ReturnRef(inputs));
EXPECT_CALL(*mock_impl.get(), get_outputs()).WillOnce(ReturnRef(inputs));
EXPECT_CALL(*mock_impl.get(), set_tensor(_, _)).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(request.set_tensor("test_name", data), std::runtime_error, "compare");
}
// set_callback
TEST_F(OVInferRequestBaseTests, canForwardSetCompletionCallback) {
EXPECT_CALL(*mock_impl.get(), set_callback(_)).Times(1);
ASSERT_NO_THROW(request.set_callback(nullptr));
}
TEST_F(OVInferRequestBaseTests, canReportErrorInSetCompletionCallback) {
EXPECT_CALL(*mock_impl.get(), set_callback(_)).WillOnce(Throw(std::runtime_error("compare")));
OV_EXPECT_THROW_HAS_SUBSTRING(request.set_callback(nullptr), std::runtime_error, "compare");
}

View File

@ -0,0 +1,116 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "openvino/runtime/iplugin.hpp"
#include <gmock/gmock-spec-builders.h>
#include <gtest/gtest.h>
#include "openvino/core/node_output.hpp"
#include "openvino/core/node_vector.hpp"
#include "openvino/core/partial_shape.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/relu.hpp"
#include "openvino/op/result.hpp"
#include "openvino/runtime/compiled_model.hpp"
#include "openvino/runtime/iinfer_request.hpp"
#include "openvino/runtime/make_tensor.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_iplugin.hpp"
#include "unit_test_utils/mocks/openvino/runtime/mock_isync_infer_request.hpp"
using namespace ::testing;
using namespace std;
class IPluginTest : public ::testing::Test {
private:
std::shared_ptr<ov::Model> create_model() {
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 3, 2, 2});
param->set_friendly_name("Param");
param->output(0).set_names({"param"});
auto relu = std::make_shared<ov::op::v0::Relu>(param);
relu->set_friendly_name("ReLU");
relu->output(0).set_names({"relu"});
return std::make_shared<ov::Model>(ov::OutputVector{relu->output(0)}, ov::ParameterVector{param});
}
protected:
shared_ptr<ov::IPlugin> plugin;
shared_ptr<ov::MockIPlugin> mock_plugin_impl;
shared_ptr<ov::MockICompiledModel> mock_compiled_model;
shared_ptr<ov::MockISyncInferRequest> mock_infer_request;
std::shared_ptr<const ov::Model> model = create_model();
std::string pluginId;
void TearDown() override {
EXPECT_TRUE(Mock::VerifyAndClearExpectations(mock_plugin_impl.get()));
EXPECT_TRUE(Mock::VerifyAndClearExpectations(mock_compiled_model.get()));
EXPECT_TRUE(Mock::VerifyAndClearExpectations(mock_infer_request.get()));
}
void SetUp() override {
pluginId = "TEST";
mock_plugin_impl.reset(new ov::MockIPlugin());
mock_plugin_impl->set_device_name(pluginId);
plugin = std::static_pointer_cast<ov::IPlugin>(mock_plugin_impl);
mock_compiled_model = make_shared<ov::MockICompiledModel>(model, plugin);
ON_CALL(*mock_compiled_model.get(), inputs()).WillByDefault(ReturnRefOfCopy(model->inputs()));
ON_CALL(*mock_compiled_model.get(), outputs()).WillByDefault(ReturnRefOfCopy(model->outputs()));
mock_infer_request = make_shared<ov::MockISyncInferRequest>(mock_compiled_model);
}
void getInferRequestWithMockImplInside(std::shared_ptr<ov::IAsyncInferRequest>& request) {
std::shared_ptr<ov::ICompiledModel> compiled_model;
EXPECT_CALL(*mock_plugin_impl.get(), compile_model(A<const std::shared_ptr<const ov::Model>&>(), _))
.WillOnce(Return(mock_compiled_model));
EXPECT_CALL(*mock_compiled_model.get(), create_sync_infer_request()).WillOnce(Return(mock_infer_request));
ON_CALL(*mock_compiled_model.get(), create_infer_request()).WillByDefault([&]() {
return mock_compiled_model->create_infer_request_default();
});
compiled_model = plugin->compile_model(model, {});
ASSERT_NE(nullptr, compiled_model);
request = compiled_model->create_infer_request();
ASSERT_NE(nullptr, request);
}
};
MATCHER_P(blob_in_map_pointer_is_same, ref_blob, "") {
return reinterpret_cast<float*>(arg.begin()->second->buffer()) == reinterpret_cast<float*>(ref_blob->buffer());
}
TEST_F(IPluginTest, failToSetTensorWithIncorrectPort) {
auto incorrect_param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::PartialShape{1, 2});
ov::SoPtr<ov::ITensor> tensor = ov::make_tensor(ov::element::f32, {1, 1, 1, 1});
std::string refError = "Cannot find tensor for port";
std::shared_ptr<ov::IAsyncInferRequest> infer_request;
getInferRequestWithMockImplInside(infer_request);
try {
infer_request->set_tensor(incorrect_param->output(0), tensor);
} catch (ov::Exception& ex) {
ASSERT_TRUE(std::string{ex.what()}.find(refError) != std::string::npos)
<< "\tExpected: " << refError << "\n\tActual: " << ex.what();
}
}
TEST_F(IPluginTest, failToSetEmptyITensor) {
ov::SoPtr<ov::ITensor> tensor;
std::string refError = "Failed to set tensor. ";
std::shared_ptr<ov::IAsyncInferRequest> infer_request;
getInferRequestWithMockImplInside(infer_request);
try {
infer_request->set_tensor(model->input(0), tensor);
} catch (ov::Exception& ex) {
ASSERT_TRUE(std::string{ex.what()}.find(refError) != std::string::npos)
<< "\tExpected: " << refError << "\n\tActual: " << ex.what();
}
}
TEST_F(IPluginTest, SetTensorWithCorrectPort) {
ov::SoPtr<ov::ITensor> tensor = ov::make_tensor(ov::element::f32, {1, 3, 2, 2});
std::shared_ptr<ov::IAsyncInferRequest> infer_request;
getInferRequestWithMockImplInside(infer_request);
EXPECT_NO_THROW(infer_request->set_tensor(model->input(0), tensor));
}

View File

@ -5,14 +5,11 @@
#include "memory_solver.hpp"
#include <gtest/gtest.h>
#include <ie_common.h>
#include <vector>
using Box = MemorySolver::Box;
IE_SUPPRESS_DEPRECATED_START
TEST(MemSolverTest, CanConstruct) {
{ // Empty vector<Box>
MemorySolver ms(std::vector<Box>{});
@ -80,7 +77,7 @@ TEST(MemSolverTest, GetOffsetThrowException) {
MemorySolver ms(boxes);
ms.solve();
EXPECT_THROW(ms.getOffset(100), InferenceEngine::Exception);
EXPECT_THROW(ms.getOffset(100), std::runtime_error);
}
// |

View File

@ -7,10 +7,17 @@
#include <openvino/core/rt_info.hpp>
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/pass/constant_folding.hpp"
#include "openvino/opsets/opset9.hpp"
#include "openvino/op/add.hpp"
#include "openvino/op/constant.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/log_softmax.hpp"
#include "openvino/op/parameter.hpp"
#include "openvino/op/reduce_l2.hpp"
#include "openvino/op/reshape.hpp"
#include "openvino/op/result.hpp"
#include "openvino/pass/constant_folding.hpp"
#include "openvino/pass/manager.hpp"
#include "openvino/runtime/iplugin.hpp"
#include "transformations/common_optimizations/common_optimizations.hpp"
#include "transformations/common_optimizations/nop_elimination.hpp"
#include "transformations/convert_precision.hpp"
@ -41,9 +48,9 @@ protected:
public:
void Run(std::function<void(std::shared_ptr<ov::Model>&)> transform,
std::function<bool(const std::shared_ptr<ngraph::Node>)> is_node_supported,
std::function<bool(const std::shared_ptr<ov::Node>)> is_node_supported,
const std::unordered_set<std::string>& expected) {
auto supported = InferenceEngine::GetSupportedNodes(m_function, transform, is_node_supported);
auto supported = ov::get_supported_nodes(m_function, transform, is_node_supported);
auto const is_in_expected = [&expected](const std::string& x) {
return expected.find(x) != expected.end();
};
@ -59,27 +66,27 @@ public:
TEST_F(GetSupportedNodesTest, UnsupportedCompressedConstantCF) {
{
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param->set_friendly_name("input");
auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1});
auto constant_compressed = ov::op::v0::Constant::create(ov::element::f16, m_shape, {1});
constant_compressed->set_friendly_name("constant_compressed");
auto convert = std::make_shared<ov::opset9::Convert>(constant_compressed, ov::element::f32);
auto convert = std::make_shared<ov::op::v0::Convert>(constant_compressed, ov::element::f32);
convert->set_friendly_name("constant");
ov::mark_as_decompression(convert);
auto add = std::make_shared<ov::opset9::Add>(param, convert);
auto add = std::make_shared<ov::op::v1::Add>(param, convert);
add->set_friendly_name("add");
auto result = std::make_shared<ngraph::op::Result>(add);
auto result = std::make_shared<ov::op::v0::Result>(add);
result->set_friendly_name("result");
m_function = std::make_shared<ov::Model>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
m_function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
ov::pass::Manager m;
m.register_pass<ov::pass::InitNodeInfo>();
m.register_pass<ngraph::pass::ConstantFolding>();
m.register_pass<ov::pass::ConstantFolding>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op);
},
{});
@ -87,34 +94,34 @@ TEST_F(GetSupportedNodesTest, UnsupportedCompressedConstantCF) {
TEST_F(GetSupportedNodesTest, ConstantSubgraphCF) {
{
auto constant_compressed1 = ngraph::op::Constant::create(ov::element::f16, m_shape, {1});
auto constant_compressed1 = ov::op::v0::Constant::create(ov::element::f16, m_shape, {1});
constant_compressed1->set_friendly_name("constant_compressed1");
auto convert1 = std::make_shared<ov::opset9::Convert>(constant_compressed1, ov::element::f32);
auto convert1 = std::make_shared<ov::op::v0::Convert>(constant_compressed1, ov::element::f32);
convert1->set_friendly_name("constant1");
ov::mark_as_decompression(convert1);
auto constant_compressed2 = ngraph::op::Constant::create(ov::element::f16, m_shape, {2});
auto constant_compressed2 = ov::op::v0::Constant::create(ov::element::f16, m_shape, {2});
constant_compressed2->set_friendly_name("constant_compressed2");
auto convert2 = std::make_shared<ov::opset9::Convert>(constant_compressed2, ov::element::f32);
auto convert2 = std::make_shared<ov::op::v0::Convert>(constant_compressed2, ov::element::f32);
convert2->set_friendly_name("constant2");
ov::mark_as_decompression(convert2);
auto add = std::make_shared<ov::opset9::Add>(convert1, convert2);
auto add = std::make_shared<ov::op::v1::Add>(convert1, convert2);
add->set_friendly_name("add");
auto const_reshape = ov::opset9::Constant::create(ngraph::element::i64, ov::Shape{1}, {84});
auto const_reshape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {84});
const_reshape->set_friendly_name("const_reshape");
auto reshape = std::make_shared<ov::opset9::Reshape>(add, const_reshape, false);
auto reshape = std::make_shared<ov::op::v1::Reshape>(add, const_reshape, false);
reshape->set_friendly_name("reshape");
auto result = std::make_shared<ngraph::op::Result>(reshape);
auto result = std::make_shared<ov::op::v0::Result>(reshape);
result->set_friendly_name("result");
m_function = std::make_shared<ov::Model>(ngraph::ResultVector{result}, ngraph::ParameterVector{});
m_function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
ov::pass::Manager m;
m.register_pass<ov::pass::InitNodeInfo>();
m.register_pass<ngraph::pass::ConstantFolding>();
m.register_pass<ov::pass::ConstantFolding>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op);
},
{"constant_compressed1",
@ -129,44 +136,44 @@ TEST_F(GetSupportedNodesTest, ConstantSubgraphCF) {
TEST_F(GetSupportedNodesTest, SupportedCompressedConstantNop) {
{
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param->set_friendly_name("input");
auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1});
auto constant_compressed = ov::op::v0::Constant::create(ov::element::f16, m_shape, {1});
constant_compressed->set_friendly_name("constant_compressed");
auto convert = std::make_shared<ov::opset9::Convert>(constant_compressed, ov::element::f32);
auto convert = std::make_shared<ov::op::v0::Convert>(constant_compressed, ov::element::f32);
convert->set_friendly_name("constant");
auto add = std::make_shared<ov::opset9::Add>(param, convert);
auto add = std::make_shared<ov::op::v1::Add>(param, convert);
add->set_friendly_name("add");
auto result = std::make_shared<ngraph::op::Result>(add);
auto result = std::make_shared<ov::op::v0::Result>(add);
result->set_friendly_name("result");
m_function = std::make_shared<ov::Model>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
m_function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
ov::pass::Manager m;
m.register_pass<ov::pass::InitNodeInfo>();
m.register_pass<ov::pass::ConvertPrecision>(precisions_map{{ngraph::element::f16, ngraph::element::f32}});
m.register_pass<ov::pass::ConvertPrecision>(precisions_map{{ov::element::f16, ov::element::f32}});
m.register_pass<ov::pass::NopElimination>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) ||
(std::dynamic_pointer_cast<ov::opset9::Add>(op) != nullptr);
(std::dynamic_pointer_cast<ov::op::v1::Add>(op) != nullptr);
},
{"input", "constant_compressed", "constant", "add", "result"});
}
TEST_F(GetSupportedNodesTest, SupportedConstantInsertAdditionalOp) {
{
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param->set_friendly_name("input");
auto mul_const = ngraph::op::Constant::create(ov::element::f32, m_shape, {1});
auto mul_const = ov::op::v0::Constant::create(ov::element::f32, m_shape, {1});
mul_const->set_friendly_name("constant");
auto mul = std::make_shared<ov::opset9::Multiply>(param, mul_const);
auto mul = std::make_shared<ov::op::v1::Multiply>(param, mul_const);
mul->set_friendly_name("output_operation");
auto result = std::make_shared<ngraph::op::Result>(mul);
auto result = std::make_shared<ov::op::v0::Result>(mul);
result->set_friendly_name("result");
m_function = std::make_shared<ov::Model>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
m_function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
@ -174,12 +181,12 @@ TEST_F(GetSupportedNodesTest, SupportedConstantInsertAdditionalOp) {
m.register_pass<ov::pass::InitNodeInfo>();
m.run_passes(model);
for (auto& op : model->get_ops()) {
if (std::dynamic_pointer_cast<ov::opset9::Multiply>(op) != nullptr) {
if (std::dynamic_pointer_cast<ov::op::v1::Multiply>(op) != nullptr) {
// Add one more dummy operation
auto consumers = op->output(0).get_target_inputs();
auto shape = op->get_shape();
auto add_const = ngraph::op::Constant::create(ov::element::f32, m_shape, {0});
auto add = std::make_shared<ov::opset9::Add>(op, add_const);
auto add_const = ov::op::v0::Constant::create(ov::element::f32, m_shape, {0});
auto add = std::make_shared<ov::op::v1::Add>(op, add_const);
add->set_friendly_name(op->get_friendly_name());
op->set_friendly_name(op->get_friendly_name() + "/previous");
ov::copy_runtime_info(op, add);
@ -189,89 +196,89 @@ TEST_F(GetSupportedNodesTest, SupportedConstantInsertAdditionalOp) {
}
}
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) ||
(std::dynamic_pointer_cast<ov::opset9::Multiply>(op) != nullptr) ||
(std::dynamic_pointer_cast<ov::opset9::Add>(op) != nullptr);
(std::dynamic_pointer_cast<ov::op::v1::Multiply>(op) != nullptr) ||
(std::dynamic_pointer_cast<ov::op::v1::Add>(op) != nullptr);
},
{"input", "constant", "output_operation", "result"});
}
TEST_F(GetSupportedNodesTest, PartiallySupportedCompressedConstant) {
{
auto param1 = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param1 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param1->set_friendly_name("input1");
auto param2 = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param2 = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param2->set_friendly_name("input2");
auto constant_compressed = ngraph::op::Constant::create(ov::element::f16, m_shape, {1});
auto constant_compressed = ov::op::v0::Constant::create(ov::element::f16, m_shape, {1});
constant_compressed->set_friendly_name("constant_compressed");
auto convert = std::make_shared<ov::opset9::Convert>(constant_compressed, ov::element::f32);
auto convert = std::make_shared<ov::op::v0::Convert>(constant_compressed, ov::element::f32);
convert->set_friendly_name("constant");
ov::mark_as_decompression(convert);
auto add = std::make_shared<ov::opset9::Add>(param1, convert);
auto add = std::make_shared<ov::op::v1::Add>(param1, convert);
add->set_friendly_name("add");
auto result1 = std::make_shared<ngraph::op::Result>(add);
auto result1 = std::make_shared<ov::op::v0::Result>(add);
result1->set_friendly_name("result1");
auto mul = std::make_shared<ov::opset9::Multiply>(param2, convert);
auto mul = std::make_shared<ov::op::v1::Multiply>(param2, convert);
mul->set_friendly_name("mul");
auto result2 = std::make_shared<ngraph::op::Result>(mul);
auto result2 = std::make_shared<ov::op::v0::Result>(mul);
result2->set_friendly_name("result2");
m_function = std::make_shared<ov::Model>(ngraph::ResultVector{result1, result2},
ngraph::ParameterVector{param1, param2});
m_function =
std::make_shared<ov::Model>(ov::ResultVector{result1, result2}, ov::ParameterVector{param1, param2});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
ov::pass::Manager m;
m.register_pass<ov::pass::InitNodeInfo>();
m.register_pass<ngraph::pass::ConstantFolding>();
m.register_pass<ov::pass::ConstantFolding>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) ||
(std::dynamic_pointer_cast<ov::opset9::Multiply>(op) != nullptr);
(std::dynamic_pointer_cast<ov::op::v1::Multiply>(op) != nullptr);
},
{"input2", "constant_compressed", "constant", "mul", "result2"});
}
TEST_F(GetSupportedNodesTest, ConstantSubgraphSupported) {
{
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param->set_friendly_name("input");
auto weights = ov::opset9::Constant::create(ov::element::Type_t::f32, {10, 84}, {1});
auto weights = ov::op::v0::Constant::create(ov::element::Type_t::f32, {10, 84}, {1});
weights->set_friendly_name("weights");
auto shapeOf = std::make_shared<ov::opset9::ShapeOf>(weights);
auto shapeOf = std::make_shared<ov::op::v0::ShapeOf>(weights);
shapeOf->set_friendly_name("shapeof");
auto const1 = ov::opset9::Constant::create(ov::element::Type_t::i32, {1}, {1});
auto const1 = ov::op::v0::Constant::create(ov::element::Type_t::i32, {1}, {1});
const1->set_friendly_name("const1");
auto const2 = ov::opset9::Constant::create(ov::element::Type_t::i64, {}, {0});
auto const2 = ov::op::v0::Constant::create(ov::element::Type_t::i64, {}, {0});
const2->set_friendly_name("const2");
auto gather = std::make_shared<ov::opset9::Gather>(shapeOf, const1, const2);
auto gather = std::make_shared<ov::op::v8::Gather>(shapeOf, const1, const2);
gather->set_friendly_name("gather");
auto const3 = ov::opset9::Constant::create(ov::element::Type_t::i64, {1}, {1});
auto const3 = ov::op::v0::Constant::create(ov::element::Type_t::i64, {1}, {1});
const3->set_friendly_name("const3");
auto concat = std::make_shared<ov::opset9::Concat>(ov::NodeVector{const3, gather}, 0);
auto concat = std::make_shared<ov::op::v0::Concat>(ov::NodeVector{const3, gather}, 0);
concat->set_friendly_name("concat");
auto reshape = std::make_shared<ov::opset9::Reshape>(param, concat, false);
auto reshape = std::make_shared<ov::op::v1::Reshape>(param, concat, false);
reshape->set_friendly_name("reshape");
auto matmul = std::make_shared<ov::opset9::MatMul>(reshape, weights, false, true);
auto matmul = std::make_shared<ov::op::v0::MatMul>(reshape, weights, false, true);
matmul->set_friendly_name("matmul");
auto result = std::make_shared<ngraph::op::Result>(matmul);
auto result = std::make_shared<ov::op::v0::Result>(matmul);
result->set_friendly_name("result");
m_function = std::make_shared<ov::Model>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
m_function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
ov::pass::Manager m;
m.register_pass<ov::pass::InitNodeInfo>();
m.register_pass<ngraph::pass::ConstantFolding>();
m.register_pass<ov::pass::ConstantFolding>();
m.register_pass<ov::pass::NopElimination>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) ||
(std::dynamic_pointer_cast<ov::opset9::MatMul>(op) != nullptr);
(std::dynamic_pointer_cast<ov::op::v0::MatMul>(op) != nullptr);
},
{"input",
"weights",
@ -288,76 +295,75 @@ TEST_F(GetSupportedNodesTest, ConstantSubgraphSupported) {
TEST_F(GetSupportedNodesTest, UnmarkedSupportedInputsOutputs) {
{
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param->set_friendly_name("input");
auto constant = ngraph::op::Constant::create(ov::element::f32, ov::Shape{m_shape[1]}, {1});
auto constant = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{m_shape[1]}, {1});
constant->set_friendly_name("constant");
auto const_reshape = ov::opset9::Constant::create(ngraph::element::i64, ov::Shape{2}, m_shape);
auto const_reshape = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{2}, m_shape);
const_reshape->set_friendly_name("const_reshape");
auto reshape = std::make_shared<ov::opset9::Reshape>(constant, const_reshape, false);
auto reshape = std::make_shared<ov::op::v1::Reshape>(constant, const_reshape, false);
reshape->set_friendly_name("reshape");
auto add = std::make_shared<ov::opset9::Add>(param, reshape);
auto add = std::make_shared<ov::op::v1::Add>(param, reshape);
add->set_friendly_name("add");
auto result = std::make_shared<ngraph::op::Result>(add);
auto result = std::make_shared<ov::op::v0::Result>(add);
result->set_friendly_name("result");
m_function = std::make_shared<ov::Model>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
m_function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
ov::pass::Manager m;
m.register_pass<ov::pass::InitNodeInfo>();
m.register_pass<ngraph::pass::ConstantFolding>();
m.register_pass<ov::pass::ConstantFolding>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
// Plugin don't mark input, constant and result as supported
return (std::dynamic_pointer_cast<ov::opset9::Add>(op) != nullptr);
return (std::dynamic_pointer_cast<ov::op::v1::Add>(op) != nullptr);
},
{"add"});
}
TEST_F(GetSupportedNodesTest, WrongFusedNamesInOriginalModel) {
{
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param->set_friendly_name("input");
auto weights = ov::opset9::Constant::create(ov::element::Type_t::f32, {10, 84}, {1});
auto weights = ov::op::v0::Constant::create(ov::element::Type_t::f32, {10, 84}, {1});
weights->set_friendly_name("weights");
auto matmul = std::make_shared<ov::opset9::MatMul>(param, weights, false, true);
auto matmul = std::make_shared<ov::op::v0::MatMul>(param, weights, false, true);
matmul->get_rt_info()[ov::FusedNames::get_type_info_static()] = ov::FusedNames("add");
matmul->set_friendly_name("matmul");
auto constant = ngraph::op::Constant::create(ov::element::f32, {1, 10}, {1});
auto constant = ov::op::v0::Constant::create(ov::element::f32, {1, 10}, {1});
constant->set_friendly_name("constant");
auto add = std::make_shared<ov::opset9::Add>(matmul, constant);
auto add = std::make_shared<ov::op::v1::Add>(matmul, constant);
add->get_rt_info()[ov::FusedNames::get_type_info_static()] = ov::FusedNames("matmul");
add->set_friendly_name("add");
auto result = std::make_shared<ngraph::op::Result>(add);
auto result = std::make_shared<ov::op::v0::Result>(add);
result->set_friendly_name("result");
m_function = std::make_shared<ov::Model>(ngraph::ResultVector{result}, ngraph::ParameterVector{param});
m_function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
return;
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) ||
(std::dynamic_pointer_cast<ov::opset9::MatMul>(op) != nullptr);
(std::dynamic_pointer_cast<ov::op::v0::MatMul>(op) != nullptr);
},
{"input", "weights", "matmul"});
}
TEST_F(GetSupportedNodesTest, FusedNamesSupportedUnsupportedBoth) {
{
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param->set_friendly_name("input");
auto dummy_param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto dummy_param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
dummy_param->set_friendly_name("dummy_param");
auto logsoftmax = std::make_shared<ov::opset9::LogSoftmax>(param, 1);
auto logsoftmax = std::make_shared<ov::op::v5::LogSoftmax>(param, 1);
logsoftmax->set_friendly_name("logsoftmax");
auto result = std::make_shared<ngraph::op::Result>(logsoftmax);
auto result = std::make_shared<ov::op::v0::Result>(logsoftmax);
result->set_friendly_name("result");
m_function =
std::make_shared<ov::Model>(ngraph::ResultVector{result}, ngraph::ParameterVector{param, dummy_param});
m_function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param, dummy_param});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
@ -366,47 +372,46 @@ TEST_F(GetSupportedNodesTest, FusedNamesSupportedUnsupportedBoth) {
m.register_pass<ov::pass::LogSoftmaxDecomposition>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
// Exp is not supported and all constants are missing
return ov::op::util::is_parameter(op) || ov::op::util::is_output(op) ||
(std::dynamic_pointer_cast<ov::opset9::ReduceMax>(op) != nullptr) ||
(std::dynamic_pointer_cast<ov::opset9::Subtract>(op) != nullptr) ||
(std::dynamic_pointer_cast<ov::opset9::ReduceSum>(op) != nullptr) ||
(std::dynamic_pointer_cast<ov::opset9::Log>(op) != nullptr);
(std::dynamic_pointer_cast<ov::op::v1::ReduceMax>(op) != nullptr) ||
(std::dynamic_pointer_cast<ov::op::v1::Subtract>(op) != nullptr) ||
(std::dynamic_pointer_cast<ov::op::v1::ReduceSum>(op) != nullptr) ||
(std::dynamic_pointer_cast<ov::op::v0::Log>(op) != nullptr);
},
{"dummy_param"}); // kepp dummy only since it has no unsupported consumers
}
TEST_F(GetSupportedNodesTest, ShapeOfNonConstantNode) {
{
auto param = std::make_shared<ngraph::op::Parameter>(ov::element::f32, m_shape);
auto param = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, m_shape);
param->set_friendly_name("input");
auto slope_compressed = ov::opset9::Constant::create(ngraph::element::f16, ngraph::Shape{}, {-2.f});
auto slope_compressed = ov::op::v0::Constant::create(ov::element::f16, ov::Shape{}, {-2.f});
slope_compressed->set_friendly_name("slope_compressed");
auto convert_slope = std::make_shared<ov::opset9::Convert>(slope_compressed, ov::element::f32);
auto convert_slope = std::make_shared<ov::op::v0::Convert>(slope_compressed, ov::element::f32);
convert_slope->set_friendly_name("slope");
ov::mark_as_decompression(convert_slope);
auto prelu = std::make_shared<ov::opset9::PRelu>(param, convert_slope);
auto prelu = std::make_shared<ov::op::v0::PRelu>(param, convert_slope);
prelu->set_friendly_name("prelu");
auto shapeOf = std::make_shared<ov::opset9::ShapeOf>(prelu);
auto shapeOf = std::make_shared<ov::op::v0::ShapeOf>(prelu);
shapeOf->set_friendly_name("shapeof");
auto convert_fp32 = std::make_shared<ov::opset9::Convert>(shapeOf, ov::element::f32);
auto convert_fp32 = std::make_shared<ov::op::v0::Convert>(shapeOf, ov::element::f32);
convert_fp32->set_friendly_name("convert_fp32");
auto scale = ov::opset9::Constant::create(ngraph::element::f32, ngraph::Shape{}, {2.0f});
auto scale = ov::op::v0::Constant::create(ov::element::f32, ov::Shape{}, {2.0f});
scale->set_friendly_name("scale");
auto mul_scale = std::make_shared<ov::opset9::Multiply>(convert_fp32, scale);
auto mul_scale = std::make_shared<ov::op::v1::Multiply>(convert_fp32, scale);
mul_scale->set_friendly_name("mul_scale");
auto convert_i64 = std::make_shared<ov::opset9::Convert>(mul_scale, ov::element::i64);
auto convert_i64 = std::make_shared<ov::op::v0::Convert>(mul_scale, ov::element::i64);
convert_i64->set_friendly_name("convert_i64");
auto interpolate = std::make_shared<ov::opset9::Interpolate>(prelu,
auto interpolate = std::make_shared<ov::op::v4::Interpolate>(prelu,
convert_i64,
scale,
ov::opset9::Interpolate::InterpolateAttrs());
ov::op::v4::Interpolate::InterpolateAttrs());
interpolate->set_friendly_name("interpolate");
auto interpolate_result = std::make_shared<ngraph::op::Result>(interpolate);
auto interpolate_result = std::make_shared<ov::op::v0::Result>(interpolate);
interpolate_result->set_friendly_name("interpolate_result");
m_function =
std::make_shared<ov::Model>(ngraph::ResultVector{interpolate_result}, ngraph::ParameterVector{param});
m_function = std::make_shared<ov::Model>(ov::ResultVector{interpolate_result}, ov::ParameterVector{param});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
@ -415,9 +420,9 @@ TEST_F(GetSupportedNodesTest, ShapeOfNonConstantNode) {
m.register_pass<ov::pass::CommonOptimizations>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
return ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || ov::op::util::is_output(op) ||
(std::dynamic_pointer_cast<ov::opset9::PRelu>(op) != nullptr);
(std::dynamic_pointer_cast<ov::op::v0::PRelu>(op) != nullptr);
},
{"input", "slope_compressed", "slope", "prelu"}); // keep dummy only since it has no unsupported consumers
}
@ -425,28 +430,28 @@ TEST_F(GetSupportedNodesTest, ShapeOfNonConstantNode) {
TEST_F(GetSupportedNodesTest, ShuffleChannelFusion) {
{
ov::Shape input_shape = {1, 112, 56, 56};
auto input = std::make_shared<ov::opset9::Parameter>(ov::element::f32, input_shape);
auto input = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, input_shape);
input->set_friendly_name("input");
ov::Shape reshape_before_shape = {1, 4, 28, 56, 56};
auto shape_reshape_before = ov::opset9::Constant::create(ov::element::i64,
auto shape_reshape_before = ov::op::v0::Constant::create(ov::element::i64,
ov::Shape{reshape_before_shape.size()},
reshape_before_shape);
shape_reshape_before->set_friendly_name("shape_reshape_before");
auto reshape_before = std::make_shared<ov::opset9::Reshape>(input, shape_reshape_before, true);
auto reshape_before = std::make_shared<ov::op::v1::Reshape>(input, shape_reshape_before, true);
reshape_before->set_friendly_name("reshape_before");
ov::Shape permute_order = {0, 2, 1, 3, 4};
auto permutation =
ov::opset9::Constant::create(ov::element::i64, ov::Shape{permute_order.size()}, permute_order);
ov::op::v0::Constant::create(ov::element::i64, ov::Shape{permute_order.size()}, permute_order);
permutation->set_friendly_name("permutation");
auto permute = std::make_shared<ov::opset9::Transpose>(reshape_before, permutation);
auto permute = std::make_shared<ov::op::v1::Transpose>(reshape_before, permutation);
permute->set_friendly_name("permute");
auto shape_reshape_after =
ov::opset9::Constant::create(ov::element::i64, ov::Shape{input_shape.size()}, input_shape);
ov::op::v0::Constant::create(ov::element::i64, ov::Shape{input_shape.size()}, input_shape);
shape_reshape_after->set_friendly_name("shape_reshape_after");
auto reshape_after = std::make_shared<ov::opset9::Reshape>(permute, shape_reshape_after, true);
auto reshape_after = std::make_shared<ov::op::v1::Reshape>(permute, shape_reshape_after, true);
reshape_after->set_friendly_name("reshape_after");
m_function = std::make_shared<ov::Model>(ov::NodeVector{reshape_after}, ov::ParameterVector{input});
@ -466,14 +471,14 @@ TEST_F(GetSupportedNodesTest, ShuffleChannelFusion) {
TEST_F(GetSupportedNodesTest, FusedNameReduceL2Test) {
{
auto data = std::make_shared<ov::opset9::Parameter>(ov::element::f32, ov::Shape{1, 512});
auto data = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape{1, 512});
data->set_friendly_name("data");
auto axes = ov::opset9::Constant::create(ov::element::i64, ov::Shape{1}, {1});
auto axes = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {1});
axes->set_friendly_name("axes");
auto reduce_l2 = std::make_shared<ov::opset9::ReduceL2>(data, axes, true);
auto reduce_l2 = std::make_shared<ov::op::v4::ReduceL2>(data, axes, true);
reduce_l2->set_friendly_name("reduce_l2");
m_function = std::make_shared<ngraph::Function>(ov::NodeVector{reduce_l2}, ov::ParameterVector{data});
m_function = std::make_shared<ov::Model>(ov::NodeVector{reduce_l2}, ov::ParameterVector{data});
}
Run(
[&](std::shared_ptr<ov::Model>& model) {
@ -483,7 +488,7 @@ TEST_F(GetSupportedNodesTest, FusedNameReduceL2Test) {
m.register_pass<ov::pass::ConvertReduceToPooling>();
m.run_passes(model);
},
[&](const std::shared_ptr<ngraph::Node>& op) {
[&](const std::shared_ptr<ov::Node>& op) {
// Pooling is supported, but Sqrt is not
return ov::op::util::is_parameter(op) || ov::op::util::is_output(op) || ov::op::util::is_constant(op) ||
(std::dynamic_pointer_cast<ov::opset1::AvgPool>(op) != nullptr);

View File

@ -0,0 +1,193 @@
// // Copyright (C) 2018-2023 Intel Corporation
// // SPDX-License-Identifier: Apache-2.0
// //
//
// #include "openvino/runtime/variable_state.hpp"
//
// #include <gmock/gmock.h>
//
// #include "openvino/runtime/iasync_infer_request.hpp"
// #include "openvino/runtime/infer_request.hpp"
// #include "openvino/runtime/iplugin.hpp"
// #include "openvino/runtime/ivariable_state.hpp"
// #include "openvino/runtime/make_tensor.hpp"
// #include "unit_test_utils/mocks/openvino/runtime/mock_iasync_infer_request.hpp"
// #include "unit_test_utils/mocks/openvino/runtime/mock_icompiled_model.hpp"
// #include "unit_test_utils/mocks/openvino/runtime/mock_ivariable_state.hpp"
//
// using namespace ::testing;
// using namespace std;
//
// namespace {
//
// struct InferRequest_Impl {
// typedef std::shared_ptr<ov::IAsyncInferRequest> ov::InferRequest::*type;
// friend type get(InferRequest_Impl);
// };
//
// template <typename Tag, typename Tag::type M>
// struct Rob {
// friend typename Tag::type get(Tag) {
// return M;
// }
// };
//
// template struct Rob<InferRequest_Impl, &ov::InferRequest::_impl>;
//
// } // namespace
//
// class VariableStateTests : public ::testing::Test {
// protected:
// shared_ptr<ov::MockIAsyncInferRequest> mock_infer_request;
// shared_ptr<ov::MockIVariableState> mock_variable_state;
// ov::InferRequest req;
//
// void SetUp() override {
// mock_infer_request = make_shared<ov::MockIAsyncInferRequest>();
// mock_variable_state = make_shared<ov::MockIVariableState>();
// req.*get(InferRequest_Impl()) = mock_infer_request;
// }
// };
//
// class VariableStateMockImpl : public ov::IVariableState {
// public:
// VariableStateMockImpl(const std::string& name) : ov::IVariableState(name) {}
// MOCK_METHOD0(reset, void());
// };
//
// TEST_F(VariableStateTests, VariableStateInternalCanSaveName) {
// std::shared_ptr<ov::IVariableState> pState(new VariableStateMockImpl("VariableStateMockImpl"));
// ASSERT_STREQ(pState->get_name().c_str(), "VariableStateMockImpl");
// }
//
// TEST_F(VariableStateTests, VariableStateInternalCanSaveState) {
// std::shared_ptr<ov::IVariableState> pState(new VariableStateMockImpl("VariableStateMockImpl"));
// float data[] = {123, 124, 125};
// auto state_tensor = ov::make_tensor(ov::element::f32, {3}, data);
//
// pState->set_state(state_tensor);
// auto saver = pState->get_state();
//
// ASSERT_NE(saver, nullptr);
// ASSERT_FLOAT_EQ(saver->data<float>()[0], 123);
// ASSERT_FLOAT_EQ(saver->data<float>()[1], 124);
// ASSERT_FLOAT_EQ(saver->data<float>()[2], 125);
// }
//
// TEST_F(VariableStateTests, VariableStateInternalCanSaveStateByReference) {
// std::shared_ptr<ov::IVariableState> pState(new VariableStateMockImpl("VariableStateMockImpl"));
// float data[] = {123, 124, 125};
// auto state_tensor = ov::make_tensor(ov::element::f32, {3}, data);
//
// pState->set_state(state_tensor);
//
// data[0] = 121;
// data[1] = 122;
// data[2] = 123;
// auto saver = pState->get_state();
//
// ASSERT_NE(saver, nullptr);
// ASSERT_FLOAT_EQ(saver->data<float>()[0], 121);
// ASSERT_FLOAT_EQ(saver->data<float>()[1], 122);
// ASSERT_FLOAT_EQ(saver->data<float>()[2], 123);
// }
//
// // Tests for InferRequest::QueryState
// TEST_F(VariableStateTests, InferRequestCanConvertOneVariableStateFromCppToAPI) {
// std::vector<ov::SoPtr<ov::IVariableState>> toReturn(1);
// toReturn[0] = mock_variable_state;
//
// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn));
//
// auto state = req.query_state();
// ASSERT_EQ(state.size(), 1);
// }
//
// TEST_F(VariableStateTests, InferRequestCanConvertZeroVariableStateFromCppToAPI) {
// std::vector<ov::SoPtr<ov::IVariableState>> toReturn;
//
// EXPECT_CALL(*mock_infer_request.get(), query_state()).WillOnce(Return(toReturn));
//
// auto state = req.query_state();
// ASSERT_EQ(state.size(), 0);
// }
//
// TEST_F(VariableStateTests, InferRequestCanConvert2VariableStatesFromCPPtoAPI) {
// std::vector<ov::SoPtr<ov::IVariableState>> toReturn;
// toReturn.push_back(mock_variable_state);
// toReturn.push_back(mock_variable_state);
//
// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn));
//
// auto state = req.query_state();
// ASSERT_EQ(state.size(), 2);
// }
//
// TEST_F(VariableStateTests, InfReqVariableStatePropagatesReset) {
// std::vector<ov::SoPtr<ov::IVariableState>> toReturn;
// toReturn.push_back(mock_variable_state);
//
// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn));
// EXPECT_CALL(*mock_variable_state.get(), reset()).Times(1);
//
// auto state = req.query_state();
// state.front().reset();
// }
//
// TEST_F(VariableStateTests, InfReqVariableStatePropagatesExceptionsFromReset) {
// std::vector<ov::SoPtr<ov::IVariableState>> toReturn;
// toReturn.push_back(mock_variable_state);
//
// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn));
// EXPECT_CALL(*mock_variable_state.get(), reset()).WillOnce(Throw(std::logic_error("some error")));
//
// auto state = req.query_state();
// EXPECT_ANY_THROW(state.front().reset());
// }
//
// TEST_F(VariableStateTests, InfReqVariableStatePropagatesGetName) {
// std::vector<ov::SoPtr<ov::IVariableState>> toReturn;
// std::string test_name = "someName";
// toReturn.push_back(mock_variable_state);
//
// EXPECT_CALL(*mock_infer_request.get(), query_state()).Times(1).WillRepeatedly(Return(toReturn));
// EXPECT_CALL(*mock_variable_state.get(), get_name()).WillOnce(ReturnRef(test_name));
//
// auto state = req.query_state();
// EXPECT_STREQ(state.front().get_name().c_str(), "someName");
// }
//
// TEST_F(VariableStateTests, InfReqVariableStateCanPropagateSetState) {
// std::vector<ov::SoPtr<ov::IVariableState>> toReturn;
// ov::SoPtr<ov::ITensor> saver;
// toReturn.push_back(mock_variable_state);
//
// EXPECT_CALL(*mock_infer_request.get(), query_state()).WillRepeatedly(Return(toReturn));
// EXPECT_CALL(*mock_variable_state.get(), set_state(_)).WillOnce(SaveArg<0>(&saver));
//
// float data[] = {123, 124, 125};
// auto stateBlob = ov::Tensor(ov::element::f32, {3}, data);
//
// EXPECT_NO_THROW(req.query_state().front().set_state(stateBlob));
// ASSERT_FLOAT_EQ(saver->data<float>()[0], 123);
// ASSERT_FLOAT_EQ(saver->data<float>()[1], 124);
// ASSERT_FLOAT_EQ(saver->data<float>()[2], 125);
// }
//
// TEST_F(VariableStateTests, DISABLED_InfReqVariableStateCanPropagateGetLastState) {
// std::vector<ov::SoPtr<ov::IVariableState>> toReturn;
//
// float data[] = {123, 124, 125};
// auto stateBlob = ov::make_tensor(ov::element::f32, {3}, data);
//
// toReturn.push_back(mock_variable_state);
//
// EXPECT_CALL(*mock_infer_request.get(), query_state()).WillRepeatedly(Return(toReturn));
// EXPECT_CALL(*mock_variable_state.get(), get_state()).WillOnce(ReturnRef(stateBlob));
//
// auto saver = req.query_state().front().get_state();
// ASSERT_TRUE(saver);
// ASSERT_FLOAT_EQ(saver.data<float>()[0], 123);
// ASSERT_FLOAT_EQ(saver.data<float>()[1], 124);
// ASSERT_FLOAT_EQ(saver.data<float>()[2], 125);
// }

View File

@ -4,75 +4,63 @@
#pragma once
#include <string>
#include "openvino/util/pp.hpp"
#include "gtest/gtest.h"
#include "gmock/gmock-matchers.h"
#include "openvino/core/deprecated.hpp"
#include "gtest/gtest.h"
#include "ie_blob.h"
#include "ie_data.h"
#include "ie_preprocess.hpp"
#include "ie_input_info.hpp"
#include "ie_preprocess.hpp"
#include "openvino/core/deprecated.hpp"
#include "openvino/util/pp.hpp"
inline bool strContains(const std::string & str, const std::string & substr) {
inline bool strContains(const std::string& str, const std::string& substr) {
return str.find(substr) != std::string::npos;
}
inline bool strDoesnotContain(const std::string & str, const std::string & substr) {
inline bool strDoesnotContain(const std::string& str, const std::string& substr) {
return !strContains(str, substr);
}
#define ASSERT_STR_CONTAINS(str, substr) \
ASSERT_PRED2(&strContains, str, substr)
#define ASSERT_STR_CONTAINS(str, substr) ASSERT_PRED2(&strContains, str, substr)
#define ASSERT_STR_DOES_NOT_CONTAIN(str, substr) \
ASSERT_PRED2(&strDoesnotContain, str, substr)
#define ASSERT_STR_DOES_NOT_CONTAIN(str, substr) ASSERT_PRED2(&strDoesnotContain, str, substr)
#define EXPECT_STR_CONTAINS(str, substr) \
EXPECT_PRED2(&strContains, str, substr)
#define EXPECT_STR_CONTAINS(str, substr) EXPECT_PRED2(&strContains, str, substr)
#define ASSERT_BLOB_EQ(lhs, rhs) \
compare_blob(lhs, rhs)
#define ASSERT_BLOB_EQ(lhs, rhs) compare_blob(lhs, rhs)
#define ASSERT_DIMS_EQ(lhs, rhs) \
compare_dims(lhs, rhs)
#define ASSERT_DIMS_EQ(lhs, rhs) compare_dims(lhs, rhs)
#define ASSERT_DATA_EQ(lhs, rhs) \
compare_data(lhs, rhs)
#define ASSERT_DATA_EQ(lhs, rhs) compare_data(lhs, rhs)
#define ASSERT_PREPROCESS_CHANNEL_EQ(lhs, rhs) \
compare_preprocess(lhs, rhs)
#define ASSERT_PREPROCESS_CHANNEL_EQ(lhs, rhs) compare_preprocess(lhs, rhs)
#define ASSERT_PREPROCESS_INFO_EQ(lhs, rhs) \
compare_preprocess_info(lhs, rhs)
#define ASSERT_PREPROCESS_INFO_EQ(lhs, rhs) compare_preprocess_info(lhs, rhs)
#define ASSERT_OUTPUTS_INFO_EQ(lhs, rhs) \
compare_outputs_info(lhs, rhs)
#define ASSERT_OUTPUTS_INFO_EQ(lhs, rhs) compare_outputs_info(lhs, rhs)
#define ASSERT_INPUTS_INFO_EQ(lhs, rhs) \
compare_inputs_info(lhs, rhs)
#define ASSERT_INPUTS_INFO_EQ(lhs, rhs) compare_inputs_info(lhs, rhs)
#define ASSERT_STRINGEQ(lhs, rhs) \
compare_cpp_strings(lhs, rhs)
#define ASSERT_STRINGEQ(lhs, rhs) compare_cpp_strings(lhs, rhs)
#define OV_ASSERT_NO_THROW(statement) \
OV_ASSERT_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
#define OV_ASSERT_NO_THROW(statement) OV_ASSERT_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
#define OV_ASSERT_NO_THROW_(statement, fail) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \
try { \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
} catch (const std::exception& e) { \
fail("Expected: " #statement " doesn't throw an exception.\n" \
" Actual: it throws.") << e.what(); \
} catch (...) { \
fail("Expected: " #statement " doesn't throw an exception.\n" \
" Actual: it throws."); \
} \
}
#define OV_ASSERT_NO_THROW_(statement, fail) \
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
if (::testing::internal::AlwaysTrue()) { \
try { \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
} catch (const std::exception& e) { \
fail("Expected: " #statement " doesn't throw an exception.\n" \
" Actual: it throws.") \
<< e.what(); \
} catch (...) { \
fail("Expected: " #statement " doesn't throw an exception.\n" \
" Actual: it throws."); \
} \
}
#define OV_EXPECT_THROW(statement, exp_exception, exception_what_matcher) \
try { \
@ -86,32 +74,46 @@ inline bool strDoesnotContain(const std::string & str, const std::string & subst
FAIL() << "Unknown exception"; \
}
#define OV_EXPECT_THROW_HAS_SUBSTRING(statement, exp_exception, exception_what_matcher) \
try { \
GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
FAIL() << "Expected exception " << OV_PP_TOSTRING(exp_exception); \
} catch (const exp_exception& ex) { \
EXPECT_THAT(ex.what(), ::testing::HasSubstr(exception_what_matcher)); \
} catch (const std::exception& e) { \
FAIL() << "Unexpected exception " << e.what(); \
} catch (...) { \
FAIL() << "Unknown exception"; \
}
OPENVINO_SUPPRESS_DEPRECATED_START
inline void compare_blob(InferenceEngine::Blob::Ptr lhs, InferenceEngine::Blob::Ptr rhs) {
ASSERT_EQ(lhs.get(), rhs.get());
//TODO: add blob specific comparison for general case
// TODO: add blob specific comparison for general case
}
inline void compare_dims(const InferenceEngine::SizeVector & lhs, const InferenceEngine::SizeVector & rhs) {
inline void compare_dims(const InferenceEngine::SizeVector& lhs, const InferenceEngine::SizeVector& rhs) {
ASSERT_EQ(lhs.size(), rhs.size());
for (size_t i = 0; i < lhs.size(); i++) {
ASSERT_EQ(lhs[i], rhs[i]);
}
}
inline void compare_data(const InferenceEngine::Data & lhs, const InferenceEngine::Data & rhs) {
inline void compare_data(const InferenceEngine::Data& lhs, const InferenceEngine::Data& rhs) {
ASSERT_DIMS_EQ(lhs.getDims(), rhs.getDims());
ASSERT_STREQ(lhs.getName().c_str(), rhs.getName().c_str());
ASSERT_EQ(lhs.getPrecision(), rhs.getPrecision());
}
inline void compare_preprocess(const InferenceEngine::PreProcessChannel & lhs, const InferenceEngine::PreProcessChannel & rhs) {
inline void compare_preprocess(const InferenceEngine::PreProcessChannel& lhs,
const InferenceEngine::PreProcessChannel& rhs) {
ASSERT_FLOAT_EQ(lhs.meanValue, rhs.meanValue);
ASSERT_FLOAT_EQ(lhs.stdScale, rhs.stdScale);
ASSERT_BLOB_EQ(lhs.meanData, rhs.meanData);
}
inline void compare_preprocess_info(const InferenceEngine::PreProcessInfo & lhs, const InferenceEngine::PreProcessInfo & rhs) {
inline void compare_preprocess_info(const InferenceEngine::PreProcessInfo& lhs,
const InferenceEngine::PreProcessInfo& rhs) {
ASSERT_EQ(lhs.getMeanVariant(), rhs.getMeanVariant());
ASSERT_EQ(lhs.getNumberOfChannels(), rhs.getNumberOfChannels());
for (size_t i = 0; i < lhs.getNumberOfChannels(); i++) {
@ -119,18 +121,19 @@ inline void compare_preprocess_info(const InferenceEngine::PreProcessInfo & lhs,
}
}
inline void compare_outputs_info(const InferenceEngine::OutputsDataMap & lhs, const InferenceEngine::OutputsDataMap & rhs) {
inline void compare_outputs_info(const InferenceEngine::OutputsDataMap& lhs,
const InferenceEngine::OutputsDataMap& rhs) {
ASSERT_EQ(lhs.size(), rhs.size());
auto i = lhs.begin();
auto j = rhs.begin();
for (size_t k =0; k != lhs.size(); k++, i++, j++) {
for (size_t k = 0; k != lhs.size(); k++, i++, j++) {
ASSERT_STREQ(i->first.c_str(), j->first.c_str());
ASSERT_DATA_EQ(*i->second.get(), *j->second.get());
}
}
inline void compare_inputs_info(const InferenceEngine::InputsDataMap & lhs, const InferenceEngine::InputsDataMap & rhs) {
inline void compare_inputs_info(const InferenceEngine::InputsDataMap& lhs, const InferenceEngine::InputsDataMap& rhs) {
ASSERT_EQ(lhs.size(), rhs.size());
auto i = lhs.begin();
auto j = rhs.begin();
@ -144,6 +147,6 @@ inline void compare_inputs_info(const InferenceEngine::InputsDataMap & lhs, cons
}
OPENVINO_SUPPRESS_DEPRECATED_END
inline void compare_cpp_strings(const std::string & lhs, const std::string &rhs) {
inline void compare_cpp_strings(const std::string& lhs, const std::string& rhs) {
ASSERT_STREQ(lhs.c_str(), rhs.c_str());
}

View File

@ -0,0 +1,40 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <chrono>
#include <map>
#include <string>
#include <vector>
#include "openvino/runtime/iasync_infer_request.hpp"
#include "openvino/runtime/ivariable_state.hpp"
#include "openvino/runtime/profiling_info.hpp"
namespace ov {
class MockIAsyncInferRequest : public ov::IAsyncInferRequest {
public:
MockIAsyncInferRequest() : ov::IAsyncInferRequest(nullptr, nullptr, nullptr) {}
MOCK_METHOD(void, start_async, ());
MOCK_METHOD(void, wait, ());
MOCK_METHOD(bool, wait_for, (const std::chrono::milliseconds&));
MOCK_METHOD(void, cancel, ());
MOCK_METHOD(void, set_callback, (std::function<void(std::exception_ptr)>));
MOCK_METHOD(void, infer, ());
MOCK_METHOD(std::vector<ov::ProfilingInfo>, get_profiling_info, (), (const));
MOCK_METHOD(std::vector<ov::SoPtr<ov::IVariableState>>, query_state, (), (const));
MOCK_METHOD(ov::SoPtr<ov::ITensor>, get_tensor, (const ov::Output<const ov::Node>&), (const));
MOCK_METHOD(std::vector<ov::SoPtr<ov::ITensor>>, get_tensors, (const ov::Output<const ov::Node>&), (const));
MOCK_METHOD(void, set_tensor, (const ov::Output<const ov::Node>&, const ov::SoPtr<ov::ITensor>&));
MOCK_METHOD(void, set_tensors, (const ov::Output<const ov::Node>&, const std::vector<ov::SoPtr<ov::ITensor>>&));
MOCK_METHOD(const std::vector<ov::Output<const ov::Node>>&, get_inputs, (), (const));
MOCK_METHOD(const std::vector<ov::Output<const ov::Node>>&, get_outputs, (), (const));
};
} // namespace ov

View File

@ -0,0 +1,39 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <map>
#include <string>
#include <vector>
#include "openvino/runtime/iasync_infer_request.hpp"
#include "openvino/runtime/icompiled_model.hpp"
namespace ov {
class MockICompiledModel : public ov::ICompiledModel {
public:
MockICompiledModel(const std::shared_ptr<const ov::Model>& model, const std::shared_ptr<const ov::IPlugin>& plugin)
: ov::ICompiledModel(model, plugin) {}
MOCK_METHOD(const std::vector<ov::Output<const ov::Node>>&, outputs, (), (const));
MOCK_METHOD(const std::vector<ov::Output<const ov::Node>>&, inputs, (), (const));
MOCK_METHOD(std::shared_ptr<ov::IAsyncInferRequest>, create_infer_request, (), (const));
MOCK_METHOD(std::shared_ptr<const ov::Model>, get_runtime_model, (), (const));
MOCK_METHOD(void, export_model, (std::ostream&), (const));
MOCK_METHOD(void, set_property, (const ov::AnyMap& config));
MOCK_METHOD(ov::Any, get_property, (const std::string& name), (const));
MOCK_METHOD(ov::SoPtr<ov::IRemoteContext>, get_context, (), (const));
MOCK_CONST_METHOD0(create_sync_infer_request, std::shared_ptr<ov::ISyncInferRequest>(void));
std::shared_ptr<ov::IAsyncInferRequest> create_infer_request_default() const {
return create_async_infer_request();
}
};
} // namespace ov

View File

@ -0,0 +1,63 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include "ie_icore.hpp"
#include "openvino/runtime/icompiled_model.hpp"
namespace ov {
class MockICore : public ov::ICore {
public:
MOCK_METHOD(ov::Any, get_property, (const std::string&, const std::string&, const ov::AnyMap&), (const));
MOCK_METHOD(ov::Any, get_property, (const std::string&, const std::string&), (const));
MOCK_METHOD(ov::AnyMap, get_supported_property, (const std::string&, const ov::AnyMap&), (const));
MOCK_METHOD(bool, is_new_api, (), (const));
MOCK_METHOD(ov::SoPtr<ov::IRemoteContext>,
create_context,
(const std::string& deviceName, const ov::AnyMap& params),
(const));
MOCK_METHOD(std::vector<std::string>, get_available_devices, (), (const));
MOCK_METHOD(ov::SupportedOpsMap,
query_model,
(const std::shared_ptr<const ov::Model>&, const std::string&, const ov::AnyMap&),
(const));
MOCK_METHOD(ov::SoPtr<ov::ICompiledModel>,
import_model,
(std::istream&, const std::string&, const ov::AnyMap&),
(const));
MOCK_METHOD(ov::SoPtr<ov::ICompiledModel>,
compile_model,
(const std::shared_ptr<const ov::Model>&, const std::string&, const ov::AnyMap&),
(const));
MOCK_METHOD(ov::SoPtr<ov::ICompiledModel>,
compile_model,
(const std::shared_ptr<const ov::Model>&, const ov::SoPtr<ov::IRemoteContext>&, const ov::AnyMap&),
(const));
MOCK_METHOD(ov::SoPtr<ov::ICompiledModel>,
compile_model,
(const std::string&, const std::string&, const ov::AnyMap&),
(const));
MOCK_METHOD(ov::SoPtr<ov::ICompiledModel>,
compile_model,
(const std::string&, const ov::Tensor&, const std::string&, const ov::AnyMap&),
(const));
MOCK_METHOD(std::shared_ptr<ov::Model>, read_model, (const std::string&, const ov::Tensor&, bool), (const));
MOCK_METHOD(std::shared_ptr<ov::Model>, read_model, (const std::string&, const std::string&), (const));
MOCK_METHOD(ov::SoPtr<ov::IRemoteContext>, get_default_context, (const std::string&), (const));
MOCK_METHOD(ov::SoPtr<ov::ICompiledModel>,
import_model,
(std::istream&, const ov::SoPtr<ov::IRemoteContext>&, const ov::AnyMap&),
(const));
MOCK_METHOD(bool, device_supports_model_caching, (const std::string&), (const));
MOCK_METHOD(void, set_property, (const std::string& device_name, const ov::AnyMap& properties));
~MockICore() = default;
};
} // namespace ov

View File

@ -0,0 +1,50 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <map>
#include <string>
#include "ie_extension.h"
#include "openvino/core/any.hpp"
#include "openvino/core/model.hpp"
#include "openvino/core/version.hpp"
#include "openvino/runtime/common.hpp"
#include "openvino/runtime/iinfer_request.hpp"
#include "openvino/runtime/iplugin.hpp"
#include "openvino/runtime/iremote_context.hpp"
namespace ov {
class MockIPlugin : public ov::IPlugin {
public:
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>,
compile_model,
(const std::shared_ptr<const ov::Model>&, const ov::AnyMap&),
(const));
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>, compile_model, (const std::string&, const ov::AnyMap&), (const));
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>,
compile_model,
(const std::shared_ptr<const ov::Model>&, const ov::AnyMap&, const ov::SoPtr<ov::IRemoteContext>&),
(const));
MOCK_METHOD(void, set_property, (const ov::AnyMap&));
MOCK_METHOD(ov::Any, get_property, (const std::string&, const ov::AnyMap&), (const));
MOCK_METHOD(ov::SoPtr<ov::IRemoteContext>, create_context, (const ov::AnyMap&), (const));
MOCK_METHOD(ov::SoPtr<ov::IRemoteContext>, get_default_context, (const ov::AnyMap&), (const));
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>, import_model, (std::istream&, const ov::AnyMap&), (const));
MOCK_METHOD(std::shared_ptr<ov::ICompiledModel>,
import_model,
(std::istream&, const ov::SoPtr<ov::IRemoteContext>&, const ov::AnyMap&),
(const));
MOCK_METHOD(ov::SupportedOpsMap,
query_model,
(const std::shared_ptr<const ov::Model>&, const ov::AnyMap&),
(const));
};
} // namespace ov

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include "openvino/runtime/isync_infer_request.hpp"
#include "openvino/runtime/ivariable_state.hpp"
#include "openvino/runtime/profiling_info.hpp"
namespace ov {
class MockISyncInferRequest : public ov::ISyncInferRequest {
public:
MockISyncInferRequest(const std::shared_ptr<const ov::ICompiledModel>& compiled_model)
: ov::ISyncInferRequest(compiled_model) {}
MOCK_METHOD(void, infer, ());
MOCK_METHOD(std::vector<ov::ProfilingInfo>, get_profiling_info, (), (const));
MOCK_METHOD(std::vector<ov::SoPtr<ov::IVariableState>>, query_state, (), (const));
};
} // namespace ov

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gmock/gmock.h>
#include <string>
#include <vector>
#include "openvino/runtime/ivariable_state.hpp"
namespace ov {
class MockIVariableState : public ov::IVariableState {
public:
MockIVariableState() : ov::IVariableState{"MockIVariableState"} {}
MOCK_METHOD(const std::string&, get_name, (), (const));
MOCK_METHOD(void, reset, ());
MOCK_METHOD(void, set_state, (const ov::SoPtr<ov::ITensor>&));
MOCK_METHOD(const ov::SoPtr<ov::ITensor>&, get_state, (), (const));
};
} // namespace ov