[HETERO] Implement get_runtime_model function (#18586)

* [HETERO] Implement get_runtime_model function

* [HETERO] Adds unit tests

Update src/plugins/hetero/tests/unit/properties_tests.cpp

Co-authored-by: Vitaliy Urusovskij <vitaliy.urusovskij@intel.com>

* Temporarily disable GA

---------

Co-authored-by: Vitaliy Urusovskij <vitaliy.urusovskij@intel.com>
This commit is contained in:
Nadezhda Ageeva 2023-07-25 12:30:04 +04:00 committed by GitHub
parent 5792bba6d1
commit be02d1a3c9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 1239 additions and 11 deletions

View File

@ -366,6 +366,9 @@ jobs:
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVProxyTests.xml
displayName: 'OV Proxy Plugin Tests'
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVHeteroUnitTests.xml
displayName: 'OV Hetero Unit Tests'
- script: $(RUN_PREFIX) $(INSTALL_TEST_DIR)/ov_conditional_compilation_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ConditionalCompilation.xml
displayName: 'Conditional Compilation Tests'

View File

@ -186,7 +186,7 @@ jobs:
displayName: 'Clean ccache stats'
- script: cmake --build $(BUILD_DIR) --parallel --config $(BUILD_TYPE)
env:
env:
CCACHE_DIR: $(CCACHE_DIR)
CCACHE_TEMPDIR: $(TMP_DIR)/ccache
CCACHE_BASEDIR: $(Pipeline.Workspace)
@ -282,6 +282,12 @@ jobs:
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
displayName: 'OV Proxy Tests'
- script: |
$(INSTALL_TEST_DIR)/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVHeteroUnitTests.xml
env:
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)
displayName: 'OV Hetero Unit Tests'
- script: $(INSTALL_TEST_DIR)/ov_onnx_frontend_tests --gtest_print_time=1 --gtest_filter=-*IE_GPU* --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-ONNXFrontend.xml
env:
LD_LIBRARY_PATH: $(INSTALL_TEST_DIR)

View File

@ -188,6 +188,10 @@ jobs:
displayName: 'OV Proxy Plugin Tests'
enabled: 'false'
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-OVHeteroUnitTests.xml
displayName: 'OV Hetero Unit Tests'
enabled: 'false'
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/ov_ir_frontend_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)/TEST-IRFrontend.xml
displayName: 'IR Frontend Tests'
enabled: 'false'

View File

@ -266,6 +266,9 @@ jobs:
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_proxy_plugin_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-OVProxyTests.xml
displayName: 'OV Proxy Plugin Tests'
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_hetero_unit_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-OVHeteroUnitTests.xml
displayName: 'OV Hetero Unit Tests'
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_conditional_compilation_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)\TEST-ConditionalCompilation.xml
displayName: 'Conditional Compilation Tests'
@ -293,10 +296,10 @@ jobs:
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_transformations_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)\Transformations.xml
displayName: 'Transformations Tests'
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_legacy_transformations_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)\LegacyTransformations.xml
displayName: 'Legacy Transformations Tests'
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\ov_util_tests --gtest_print_time=1 --gtest_output=xml:$(INSTALL_TEST_DIR)\CommonUtilTests.xml
displayName: 'Common Utils Tests'

View File

@ -28,3 +28,7 @@ target_link_libraries(${TARGET_NAME} PRIVATE openvino::pugixml)
ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME})
set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
if(ENABLE_TESTS)
add_subdirectory(tests/unit)
endif()

View File

@ -257,16 +257,17 @@ ov::hetero::CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model
input_subsets[input_subgraph_id].emplace(input);
}
}
// for each subset of inputs create separate Result operation if subset belongs to other
// Avoid duplicate results on the same output port
auto result = std::make_shared<ov::op::v0::Result>(output);
ov::copy_runtime_info(output.get_node_shared_ptr(), result);
subgraphIds.emplace(result, output_subgraph_id);
results.push_back(result);
for (const auto& input_subset : input_subsets) {
auto result = std::make_shared<ov::op::v0::Result>(output);
ov::copy_runtime_info(output.get_node_shared_ptr(), result);
subgraphIds.emplace(result, output_subgraph_id);
results.push_back(result);
// Avoid duplicate parameters in the same subgraph
auto parameter =
std::make_shared<ov::op::v0::Parameter>(output.get_element_type(), output.get_partial_shape());
for (const auto& input : input_subset.second) {
output.remove_target_input(input);
auto parameter =
std::make_shared<ov::op::v0::Parameter>(output.get_element_type(), output.get_partial_shape());
ov::copy_runtime_info(input.get_node()->shared_from_this(), parameter);
input.replace_source_output(parameter->output(0));
subgraphIds.emplace(parameter, input_subset.first);
@ -529,7 +530,110 @@ void ov::hetero::CompiledModel::set_property(const ov::AnyMap& properties) {
}
std::shared_ptr<const ov::Model> ov::hetero::CompiledModel::get_runtime_model() const {
OPENVINO_NOT_IMPLEMENTED;
std::vector<std::shared_ptr<ov::Model>> rt_models;
// Collect runtime subgraphs
for (size_t i = 0; i < m_compiled_submodels.size(); i++) {
rt_models.push_back(m_compiled_submodels.at(i).compiled_model->get_runtime_model()->clone());
}
// Results which should not be present in final graph
std::set<std::string> result_names_to_be_removed;
// Remap port indexes to names, because order of them will be modified during merge
std::map<std::pair<size_t, std::string>, std::pair<size_t, std::string>> input_to_prev_output;
for (const auto& kvp : m_submodels_input_to_prev_output) {
const auto& input_node = rt_models[kvp.first.first]->inputs()[kvp.first.second].get_node();
const auto& output_node = rt_models[kvp.second.first]->outputs()[kvp.second.second].get_node();
input_to_prev_output[{kvp.first.first, input_node->get_friendly_name()}] = {kvp.second.first,
output_node->get_friendly_name()};
result_names_to_be_removed.insert(output_node->get_friendly_name());
}
int submodel_in_index = static_cast<int>(rt_models.size()) - 1;
while (submodel_in_index >= 0 && input_to_prev_output.size() > 0) {
auto& submodel_in = rt_models[submodel_in_index];
size_t port_in_index = 0;
while (port_in_index < submodel_in->get_parameters().size()) {
auto parameter_to_replace = submodel_in->get_parameters()[port_in_index];
auto item = input_to_prev_output.find({submodel_in_index, parameter_to_replace->get_friendly_name()});
if (item == input_to_prev_output.end()) {
port_in_index++;
continue;
}
auto submodel_out_index = item->second.first;
auto submodel_out_result_name = item->second.second;
auto submodel_out = rt_models.at(submodel_out_index);
// Get all results from previous subgraph except already existed in next subgraph
std::shared_ptr<ov::op::v0::Result> result_to_replace = nullptr;
ov::ResultVector add_results;
for (auto& result : submodel_out->get_results()) {
if (result->get_friendly_name() == submodel_out_result_name) {
result_to_replace = result;
}
auto it = std::find_if(submodel_in->get_results().begin(),
submodel_in->get_results().end(),
[&](const std::shared_ptr<ov::op::v0::Result>& result_to_check) {
return result_to_check == result;
});
if (it == submodel_in->get_results().end())
add_results.push_back(result);
}
OPENVINO_ASSERT(result_to_replace != nullptr);
// Get all parameters from previous subgraph except already existed in next subgraph
ov::ParameterVector add_parameters;
for (auto& parameter : submodel_out->get_parameters()) {
auto it = std::find_if(submodel_in->get_parameters().begin(),
submodel_in->get_parameters().end(),
[&](const std::shared_ptr<ov::op::v0::Parameter>& parameter_to_check) {
return parameter_to_check == parameter;
});
if (it == submodel_in->get_parameters().end())
add_parameters.push_back(parameter);
}
// Reconnect appropariate target inputs to the new source output
auto result_source = result_to_replace->get_input_source_output(0);
auto parameter_targets = parameter_to_replace->get_output_target_inputs(0);
for (auto parameter_target : parameter_targets) {
parameter_target.replace_source_output(result_source);
}
// Update parameter and results
submodel_in->remove_parameter(parameter_to_replace);
submodel_in->add_parameters(add_parameters);
submodel_in->add_results(add_results);
// Remove processed connection
input_to_prev_output.erase(item);
// Update incoming model since it is merged
for (size_t i = 0; i < rt_models.size(); i++) {
if (rt_models[i] == submodel_out) {
rt_models[i] = submodel_in;
}
}
// Start check ports from the beginning because number of ports are modified
port_in_index = 0;
}
--submodel_in_index;
}
// Finally all subgraphs should be merged into single one
OPENVINO_ASSERT(input_to_prev_output.size() == 0);
OPENVINO_ASSERT(all_of(rt_models.begin(), rt_models.end(), [&](const std::shared_ptr<ov::Model>& rt_model) {
return rt_model == rt_models[0];
}));
auto runtime_graph = rt_models[0];
// Cleanup intermidiate results
for (size_t i = 0; i < runtime_graph->get_results().size();) {
auto& result = runtime_graph->get_results()[i];
if (result_names_to_be_removed.count(result->get_friendly_name())) {
runtime_graph->remove_result(result);
} else {
i++;
}
}
OPENVINO_ASSERT(runtime_graph->inputs().size() == inputs().size());
return runtime_graph;
}
std::shared_ptr<const ov::hetero::Plugin> ov::hetero::CompiledModel::get_hetero_plugin() const {

View File

@ -0,0 +1,29 @@
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME ov_hetero_unit_tests)
ov_add_test_target(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDENCIES
openvino::runtime
mock_engine
openvino_hetero_plugin
LINK_LIBRARIES
openvino::runtime::dev
gtest
gtest_main
unit_test_utils
ADD_CLANG_FORMAT
LABELS
HETERO
)
target_compile_definitions(${TARGET_NAME} PRIVATE CI_BUILD_NUMBER=\"mock_version\")
if(ENABLE_OV_IR_FRONTEND)
add_dependencies(${TARGET_NAME} openvino_ir_frontend)
target_compile_definitions(${TARGET_NAME} PRIVATE IR_FRONTEND_ENABLED)
endif()

View File

@ -0,0 +1,91 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero_tests.hpp"
#include "openvino/runtime/exec_model_info.hpp"
#include "openvino/runtime/properties.hpp"
using namespace ov::hetero::tests;
TEST_F(HeteroTests, get_available_devices) {
auto available_devices = core.get_available_devices();
std::vector<std::string> mock_reference_dev = {{"MOCK0.0"}, {"MOCK0.1"}, {"MOCK0.2"}, {"MOCK1.0"}, {"MOCK1.1"}};
for (const auto& dev : available_devices) {
auto it = std::find(mock_reference_dev.begin(), mock_reference_dev.end(), dev);
if (it != mock_reference_dev.end()) {
mock_reference_dev.erase(it);
}
}
// All devices should be found
EXPECT_TRUE(mock_reference_dev.empty());
}
TEST_F(HeteroTests, compile_with_registered_devices) {
// Change device priority
core.set_property("HETERO", ov::device::priorities("MOCK0,MOCK1"));
auto model = create_model_with_reshape();
EXPECT_NO_THROW(core.compile_model(model, "HETERO"));
}
TEST_F(HeteroTests, compile_with_unregistered_devices_throw) {
// Change device priority
core.set_property("HETERO", ov::device::priorities("MOCK2,MOCK3"));
auto model = create_model_with_reshape();
EXPECT_THROW(core.compile_model(model, "HETERO"), ov::Exception);
}
TEST_F(HeteroTests, compile_without_device_priorities_throw) {
// Change device priority
core.set_property("HETERO", ov::device::priorities(""));
auto model = create_model_with_reshape();
EXPECT_THROW(core.compile_model(model, "HETERO"), ov::Exception);
}
TEST_F(HeteroTests, compile_with_device_properties) {
ov::AnyMap config = {ov::device::priorities("MOCK0,MOCK1"),
ov::device::properties("MOCK0", ov::num_streams(4), ov::enable_profiling(false)),
ov::device::properties("MOCK1", ov::num_streams(6), ov::enable_profiling(true))};
auto model = create_model_with_subtract_reshape();
auto compiled_model = core.compile_model(model, "HETERO", config);
EXPECT_THROW(compiled_model.get_property(ov::num_streams), ov::Exception);
EXPECT_THROW(compiled_model.get_property(ov::enable_profiling), ov::Exception);
auto device_properties = compiled_model.get_property(ov::device::properties.name()).as<ov::AnyMap>();
ASSERT_TRUE(device_properties.count("MOCK0.0"));
auto mock0_properties = device_properties.at("MOCK0.0").as<ov::AnyMap>();
ASSERT_TRUE(mock0_properties.count(ov::num_streams.name()));
ASSERT_TRUE(mock0_properties.count(ov::enable_profiling.name()));
EXPECT_EQ(4, mock0_properties.at(ov::num_streams.name()).as<ov::streams::Num>());
EXPECT_EQ(false, mock0_properties.at(ov::enable_profiling.name()).as<bool>());
ASSERT_TRUE(device_properties.count("MOCK1.0"));
auto mock1_properties = device_properties.at("MOCK1.0").as<ov::AnyMap>();
ASSERT_TRUE(mock1_properties.count(ov::num_streams.name()));
ASSERT_TRUE(mock1_properties.count(ov::enable_profiling.name()));
EXPECT_EQ(6, mock1_properties.at(ov::num_streams.name()).as<ov::streams::Num>());
EXPECT_EQ(true, mock1_properties.at(ov::enable_profiling.name()).as<bool>());
}
TEST_F(HeteroTests, get_runtime_model) {
ov::AnyMap config = {ov::device::priorities("MOCK0,MOCK1")};
auto model = create_model_with_subtract_reshape();
std::set<std::string> original_names;
for (auto& op : model->get_ordered_ops()) {
original_names.insert(op->get_friendly_name());
}
auto compiled_model = core.compile_model(model, "HETERO", config);
auto runtime_model = compiled_model.get_runtime_model();
for (auto& op : runtime_model->get_ordered_ops()) {
auto& info = op->get_rt_info();
ASSERT_TRUE(info.count(ov::exec_model_info::EXECUTION_ORDER));
ASSERT_TRUE(info.count(ov::exec_model_info::IMPL_TYPE));
ASSERT_TRUE(info.count(ov::exec_model_info::PERF_COUNTER));
ASSERT_TRUE(info.count(ov::exec_model_info::ORIGINAL_NAMES));
auto fused_names = info.at(ov::exec_model_info::ORIGINAL_NAMES).as<std::vector<std::string>>();
for (auto& fused_name : fused_names) {
if (original_names.count(fused_name))
original_names.erase(fused_name);
}
ASSERT_TRUE(info.count(ov::exec_model_info::RUNTIME_PRECISION));
ASSERT_TRUE(info.count(ov::exec_model_info::OUTPUT_PRECISIONS));
}
EXPECT_EQ(0, original_names.size());
}

View File

@ -0,0 +1,731 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero_tests.hpp"
#include <memory>
#include <string>
#include "common_test_utils/file_utils.hpp"
#include "ie_plugin_config.hpp"
#include "openvino/core/any.hpp"
#include "openvino/core/except.hpp"
#include "openvino/opsets/opset11.hpp"
#include "openvino/pass/serialize.hpp"
#include "openvino/runtime/exec_model_info.hpp"
#include "openvino/runtime/internal_properties.hpp"
#include "openvino/runtime/iplugin.hpp"
#include "openvino/runtime/iremote_context.hpp"
#include "openvino/runtime/iremote_tensor.hpp"
#include "openvino/runtime/make_tensor.hpp"
#include "openvino/runtime/properties.hpp"
#include "openvino/util/file_util.hpp"
#include "openvino/util/shared_object.hpp"
#include "transformations/rt_info/fused_names_attribute.hpp"
namespace {
std::string get_mock_engine_path() {
std::string mockEngineName("mock_engine");
return ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
mockEngineName + IE_BUILD_POSTFIX);
}
template <class T>
std::function<T> make_std_function(const std::shared_ptr<void> so, const std::string& functionName) {
std::function<T> ptr(reinterpret_cast<T*>(ov::util::get_symbol(so, functionName.c_str())));
return ptr;
}
bool support_model(const std::shared_ptr<const ov::Model>& model, const ov::SupportedOpsMap& supported_ops) {
for (const auto& op : model->get_ops()) {
if (supported_ops.find(op->get_friendly_name()) == supported_ops.end())
return false;
}
return true;
}
ov::PropertyName RO_property(const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RO);
};
ov::PropertyName RW_property(const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RW);
};
} // namespace
ov::Tensor ov::hetero::tests::HeteroTests::create_and_fill_tensor(const ov::element::Type& type,
const ov::Shape& shape) {
switch (type) {
case ov::element::Type_t::i64:
return create_tensor<ov::element_type_traits<ov::element::Type_t::i64>::value_type>(type, shape);
default:
break;
}
OPENVINO_THROW("Cannot generate tensor. Unsupported element type.");
}
std::shared_ptr<ov::Model> ov::hetero::tests::HeteroTests::create_model_with_subtract() {
auto param = std::make_shared<ov::opset11::Parameter>(ov::element::i64, ov::Shape{1, 3, 2, 2});
param->set_friendly_name("input");
auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1});
const_value->set_friendly_name("const_val");
auto add = std::make_shared<ov::opset11::Add>(param, const_value);
add->set_friendly_name("add");
auto subtract = std::make_shared<ov::opset11::Subtract>(add, const_value);
subtract->set_friendly_name("sub");
auto result = std::make_shared<ov::opset11::Result>(subtract);
result->set_friendly_name("res");
return std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
std::shared_ptr<ov::Model> ov::hetero::tests::HeteroTests::create_model_with_subtract_reshape() {
auto param = std::make_shared<ov::opset11::Parameter>(ov::element::i64, ov::Shape{1, 3, 2, 2});
param->set_friendly_name("input");
auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1});
const_value->set_friendly_name("const_val");
auto add = std::make_shared<ov::opset11::Add>(param, const_value);
add->set_friendly_name("add");
auto subtract = std::make_shared<ov::opset11::Subtract>(add, const_value);
subtract->set_friendly_name("sub");
auto reshape_val = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1}, {-1});
reshape_val->set_friendly_name("reshape_val");
auto reshape = std::make_shared<ov::opset11::Reshape>(subtract, reshape_val, true);
reshape->set_friendly_name("reshape");
auto result = std::make_shared<ov::opset11::Result>(reshape);
result->set_friendly_name("res");
return std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
std::shared_ptr<ov::Model> ov::hetero::tests::HeteroTests::create_model_with_subtract_reshape_relu() {
auto param = std::make_shared<ov::opset11::Parameter>(ov::element::i64, ov::Shape{1, 3, 2, 2});
param->set_friendly_name("input");
auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1});
const_value->set_friendly_name("const_val");
auto add = std::make_shared<ov::opset11::Add>(param, const_value);
add->set_friendly_name("add");
auto subtract = std::make_shared<ov::opset11::Subtract>(add, const_value);
subtract->set_friendly_name("sub");
auto reshape_val = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1}, {-1});
reshape_val->set_friendly_name("reshape_val");
auto reshape = std::make_shared<ov::opset11::Reshape>(subtract, reshape_val, true);
reshape->set_friendly_name("reshape");
auto relu = std::make_shared<ov::opset11::Relu>(reshape);
relu->set_friendly_name("relu");
auto result = std::make_shared<ov::opset11::Result>(relu);
result->set_friendly_name("res");
return std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
std::shared_ptr<ov::Model> ov::hetero::tests::HeteroTests::create_model_with_reshape() {
auto param = std::make_shared<ov::opset11::Parameter>(ov::element::i64, ov::Shape{1, 3, 2, 2});
param->set_friendly_name("input");
auto const_value = ov::opset11::Constant::create(ov::element::i64, ov::Shape{1, 1, 1, 1}, {1});
const_value->set_friendly_name("const_val");
auto add = std::make_shared<ov::opset11::Add>(param, const_value);
add->set_friendly_name("add");
auto reshape_val = ov::opset11::Constant::create(ov::element::i64, ov::Shape{3}, {1, 3, 4});
reshape_val->set_friendly_name("reshape_val");
auto reshape = std::make_shared<ov::opset11::Reshape>(add, reshape_val, true);
reshape->set_friendly_name("reshape");
auto result = std::make_shared<ov::opset11::Result>(reshape);
result->set_friendly_name("res");
return std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param});
}
// Mock plugins
class MockCompiledModel : public ov::ICompiledModel {
public:
MockCompiledModel(const std::shared_ptr<const ov::Model>& model,
const std::shared_ptr<const ov::IPlugin>& plugin,
const ov::AnyMap& config)
: ov::ICompiledModel(model, plugin),
m_config(config),
m_model(model),
m_has_context(false) {}
MockCompiledModel(const std::shared_ptr<const ov::Model>& model,
const std::shared_ptr<const ov::IPlugin>& plugin,
const ov::AnyMap& config,
const ov::SoPtr<ov::IRemoteContext>& context)
: ov::ICompiledModel(model, plugin),
m_config(config),
m_model(model),
m_has_context(true),
m_context(context) {}
// Methods from a base class ov::ICompiledModel
void export_model(std::ostream& model) const override {
ov::pass::StreamSerialize(model, std::function<void(std::ostream&)>())
.run_on_model(std::const_pointer_cast<ov::Model>(m_model));
}
std::shared_ptr<const ov::Model> get_runtime_model() const override {
auto model = m_model->clone();
// Add execution information into the model
size_t exec_order = 0;
for (const auto& op : model->get_ordered_ops()) {
auto& info = op->get_rt_info();
info[ov::exec_model_info::EXECUTION_ORDER] = std::to_string(exec_order++);
info[ov::exec_model_info::IMPL_TYPE] = get_plugin()->get_device_name() + "_ " + op->get_type_info().name;
auto perf_count_enabled = get_property(ov::enable_profiling.name()).as<bool>();
info[ov::exec_model_info::PERF_COUNTER] = perf_count_enabled ? "0" : "not_executed";
std::string original_names = ov::getFusedNames(op);
if (original_names.empty()) {
original_names = op->get_friendly_name();
} else if (original_names.find(op->get_friendly_name()) == std::string::npos) {
original_names = op->get_friendly_name() + "," + original_names;
}
info[ov::exec_model_info::ORIGINAL_NAMES] = original_names;
if (op->inputs().size() > 0)
info[ov::exec_model_info::RUNTIME_PRECISION] = op->get_input_element_type(0);
else
info[ov::exec_model_info::RUNTIME_PRECISION] = op->get_output_element_type(0);
std::stringstream precisions_ss;
for (size_t i = 0; i < op->get_output_size(); i++) {
if (i > 0)
precisions_ss << ",";
precisions_ss << op->get_output_element_type(i);
}
info[ov::exec_model_info::OUTPUT_PRECISIONS] = precisions_ss.str();
}
return model;
}
void set_property(const ov::AnyMap& properties) override {
OPENVINO_NOT_IMPLEMENTED;
}
ov::Any get_property(const std::string& name) const override {
if (name == ov::supported_properties) {
const std::vector<ov::PropertyName> supported_properties = {ov::num_streams.name(),
ov::enable_profiling.name()};
return decltype(ov::supported_properties)::value_type(supported_properties);
} else if (name == ov::num_streams) {
return m_config.count(ov::num_streams.name()) ? m_config.at(ov::num_streams.name()) : ov::streams::Num(1);
} else if (name == ov::enable_profiling) {
return m_config.count(ov::enable_profiling.name()) ? m_config.at(ov::enable_profiling.name()) : false;
} else {
OPENVINO_THROW("get property: " + name);
}
}
std::shared_ptr<ov::ISyncInferRequest> create_sync_infer_request() const override;
const std::shared_ptr<const ov::Model>& get_model() const {
return m_model;
}
ov::SoPtr<ov::IRemoteContext> get_context() const {
return m_context;
}
bool has_context() const {
return m_has_context;
}
private:
ov::AnyMap m_config;
std::shared_ptr<const ov::Model> m_model;
bool m_has_context;
ov::SoPtr<ov::IRemoteContext> m_context;
};
class MockInferRequest : public ov::ISyncInferRequest {
public:
MockInferRequest(const std::shared_ptr<const MockCompiledModel>& compiled_model)
: ov::ISyncInferRequest(compiled_model) {
OPENVINO_ASSERT(compiled_model);
m_model = compiled_model->get_model();
// Allocate input/output tensors
for (const auto& input : get_inputs()) {
allocate_tensor(input, [this, input, compiled_model](ov::SoPtr<ov::ITensor>& tensor) {
// Can add a check to avoid double work in case of shared tensors
allocate_tensor_impl(tensor,
input.get_element_type(),
input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape(),
compiled_model->has_context(),
compiled_model->get_context());
});
}
for (const auto& output : get_outputs()) {
allocate_tensor(output, [this, output, compiled_model](ov::SoPtr<ov::ITensor>& tensor) {
// Can add a check to avoid double work in case of shared tensors
allocate_tensor_impl(tensor,
output.get_element_type(),
output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape(),
compiled_model->has_context(),
compiled_model->get_context());
});
}
}
~MockInferRequest() = default;
void infer() override {
ov::TensorVector input_tensors;
for (const auto& input : get_inputs()) {
input_tensors.emplace_back(ov::make_tensor(get_tensor(input)));
}
ov::TensorVector output_tensors;
for (const auto& output : get_outputs()) {
output_tensors.emplace_back(ov::make_tensor(get_tensor(output)));
}
m_model->evaluate(output_tensors, input_tensors);
}
std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override {
OPENVINO_NOT_IMPLEMENTED;
}
std::vector<ov::ProfilingInfo> get_profiling_info() const override {
OPENVINO_NOT_IMPLEMENTED;
}
private:
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
const ov::element::Type& element_type,
const ov::Shape& shape,
bool has_context,
ov::SoPtr<ov::IRemoteContext> context) {
if (!tensor || tensor->get_element_type() != element_type) {
if (has_context) {
tensor = context->create_tensor(element_type, shape, {});
} else {
tensor = ov::SoPtr<ov::ITensor>(ov::make_tensor(element_type, shape), nullptr);
}
} else {
tensor->set_shape(shape);
}
}
std::shared_ptr<const ov::Model> m_model;
};
std::shared_ptr<ov::ISyncInferRequest> MockCompiledModel::create_sync_infer_request() const {
return std::make_shared<MockInferRequest>(std::dynamic_pointer_cast<const MockCompiledModel>(shared_from_this()));
}
class MockRemoteTensor : public ov::IRemoteTensor {
ov::AnyMap m_properties;
std::string m_dev_name;
public:
MockRemoteTensor(const std::string& name, const ov::AnyMap& props) : m_properties(props), m_dev_name(name) {}
const ov::AnyMap& get_properties() const override {
return m_properties;
}
const std::string& get_device_name() const override {
return m_dev_name;
}
void set_shape(ov::Shape shape) override {
OPENVINO_NOT_IMPLEMENTED;
}
const ov::element::Type& get_element_type() const override {
OPENVINO_NOT_IMPLEMENTED;
}
const ov::Shape& get_shape() const override {
OPENVINO_NOT_IMPLEMENTED;
}
const ov::Strides& get_strides() const override {
OPENVINO_NOT_IMPLEMENTED;
}
};
class MockRemoteContext : public ov::IRemoteContext {
ov::AnyMap m_property = {{"IS_DEFAULT", true}};
std::string m_dev_name;
public:
MockRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {}
const std::string& get_device_name() const override {
return m_dev_name;
}
const ov::AnyMap& get_property() const override {
return m_property;
}
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
const ov::Shape& shape,
const ov::AnyMap& params = {}) override {
auto remote_tensor = std::make_shared<MockRemoteTensor>(m_dev_name, m_property);
return {remote_tensor, nullptr};
}
};
class MockCustomRemoteContext : public ov::IRemoteContext {
ov::AnyMap m_property = {{"IS_DEFAULT", false}};
std::string m_dev_name;
public:
MockCustomRemoteContext(const std::string& dev_name) : m_dev_name(dev_name) {}
const std::string& get_device_name() const override {
return m_dev_name;
}
const ov::AnyMap& get_property() const override {
return m_property;
}
ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type,
const ov::Shape& shape,
const ov::AnyMap& params = {}) override {
auto remote_tensor = std::make_shared<MockRemoteTensor>(m_dev_name, m_property);
return {remote_tensor, nullptr};
}
};
class MockPluginBase : public ov::IPlugin {
public:
MockPluginBase(const std::string& name, const std::unordered_set<std::string>& supported_ops)
: m_supported_ops(supported_ops) {
set_device_name(name);
}
virtual const ov::Version& get_const_version() = 0;
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const override {
OPENVINO_ASSERT(model);
if (!support_model(model, query_model(model, properties)))
OPENVINO_THROW("Unsupported model");
return std::make_shared<MockCompiledModel>(model, shared_from_this(), properties);
}
std::shared_ptr<ov::ICompiledModel> compile_model(const std::string& model_path,
const ov::AnyMap& properties) const override {
OPENVINO_NOT_IMPLEMENTED;
}
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties,
const ov::SoPtr<ov::IRemoteContext>& context) const override {
if (!support_model(model, query_model(model, properties)))
OPENVINO_THROW("Unsupported model");
return std::make_shared<MockCompiledModel>(model, shared_from_this(), properties, context);
}
void set_property(const ov::AnyMap& properties) override {
OPENVINO_NOT_IMPLEMENTED;
}
ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override {
OPENVINO_NOT_IMPLEMENTED;
}
ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override {
if (remote_properties.find("CUSTOM_CTX") == remote_properties.end())
return std::make_shared<MockRemoteContext>(get_device_name());
return std::make_shared<MockCustomRemoteContext>(get_device_name());
}
ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override {
return std::make_shared<MockRemoteContext>(get_device_name());
}
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model, const ov::AnyMap& properties) const override {
std::string xmlString, xmlInOutString;
ov::Tensor weights;
ov::pass::StreamSerialize::DataHeader hdr = {};
model.read(reinterpret_cast<char*>(&hdr), sizeof hdr);
model.seekg(hdr.custom_data_offset);
xmlInOutString.resize(hdr.custom_data_size);
model.read(const_cast<char*>(xmlInOutString.c_str()), hdr.custom_data_size);
// read blob content
model.seekg(hdr.consts_offset);
if (hdr.consts_size) {
weights = ov::Tensor(ov::element::i8, ov::Shape{hdr.consts_size});
char* data = static_cast<char*>(weights.data());
model.read(data, hdr.consts_size);
}
// read XML content
model.seekg(hdr.model_offset);
xmlString.resize(hdr.model_size);
model.read(const_cast<char*>(xmlString.c_str()), hdr.model_size);
ov::Core core;
auto ov_model = core.read_model(xmlString, weights);
return compile_model(ov_model, properties);
}
std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
const ov::SoPtr<ov::IRemoteContext>& context,
const ov::AnyMap& properties) const override {
std::string xmlString, xmlInOutString;
ov::Tensor weights;
ov::pass::StreamSerialize::DataHeader hdr = {};
model.read(reinterpret_cast<char*>(&hdr), sizeof hdr);
model.seekg(hdr.custom_data_offset);
xmlInOutString.resize(hdr.custom_data_size);
model.read(const_cast<char*>(xmlInOutString.c_str()), hdr.custom_data_size);
// read blob content
model.seekg(hdr.consts_offset);
if (hdr.consts_size) {
weights = ov::Tensor(ov::element::i8, ov::Shape{hdr.consts_size});
char* data = static_cast<char*>(weights.data());
model.read(data, hdr.consts_size);
}
// read XML content
model.seekg(hdr.model_offset);
xmlString.resize(hdr.model_size);
model.read(const_cast<char*>(xmlString.c_str()), hdr.model_size);
ov::Core core;
auto ov_model = core.read_model(xmlString, weights);
return compile_model(ov_model, properties, context);
}
ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
const ov::AnyMap& properties) const override {
OPENVINO_ASSERT(model);
ov::SupportedOpsMap res;
auto device_id = properties.count(ov::device::id.name())
? properties.at(ov::device::id.name()).as<std::string>()
: m_default_device_id;
for (const auto& op : model->get_ordered_ops()) {
if (m_supported_ops.find(op->get_type_info().name) == m_supported_ops.end())
continue;
res[op->get_friendly_name()] = get_device_name() + "." + device_id;
}
return res;
}
protected:
std::string m_default_device_id = "0";
std::unordered_set<std::string> m_supported_ops;
bool m_profiling = false;
bool m_loaded_from_cache{false};
};
class MockPluginReshape : public MockPluginBase {
public:
MockPluginReshape(const std::string& name)
: MockPluginBase(name, {"Parameter", "Result", "Add", "Constant", "Reshape"}) {}
const ov::Version& get_const_version() override {
static const ov::Version version = {CI_BUILD_NUMBER, "openvino_mock_reshape_plugin"};
return version;
}
void set_property(const ov::AnyMap& properties) override {
for (const auto& it : properties) {
if (it.first == ov::num_streams.name())
num_streams = it.second.as<int32_t>();
else if (it.first == ov::enable_profiling.name())
m_profiling = it.second.as<bool>();
else if (it.first == ov::device::id.name())
continue;
else
OPENVINO_THROW(get_device_name(), " set config: " + it.first);
}
}
ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override {
const static std::vector<std::string> device_ids = {"0", "1", "2"};
const static std::vector<ov::PropertyName> roProperties{
RO_property(ov::supported_properties.name()),
RO_property(ov::available_devices.name()),
RO_property(ov::loaded_from_cache.name()),
RO_property(ov::device::uuid.name()),
RO_property(METRIC_KEY(IMPORT_EXPORT_SUPPORT)),
};
// the whole config is RW before network is loaded.
const static std::vector<ov::PropertyName> rwProperties{
RW_property(ov::num_streams.name()),
RW_property(ov::enable_profiling.name()),
};
std::string device_id;
if (arguments.find(ov::device::id.name()) != arguments.end()) {
device_id = arguments.find(ov::device::id.name())->second.as<std::string>();
}
if (name == ov::supported_properties) {
std::vector<ov::PropertyName> supportedProperties;
supportedProperties.reserve(roProperties.size() + rwProperties.size());
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return decltype(ov::supported_properties)::value_type(supportedProperties);
} else if (name == ov::internal::supported_properties) {
return decltype(ov::internal::supported_properties)::value_type(
{ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}});
} else if (name == ov::device::uuid) {
ov::device::UUID uuid;
for (size_t i = 0; i < uuid.MAX_UUID_SIZE; i++) {
if (device_id == device_ids[0])
uuid.uuid[i] = static_cast<uint8_t>(i);
else if (device_id == device_ids[1])
uuid.uuid[i] = static_cast<uint8_t>(i * 2);
else if (device_id == device_ids[2])
uuid.uuid[i] = static_cast<uint8_t>(i * 3);
}
return decltype(ov::device::uuid)::value_type{uuid};
} else if (name == ov::available_devices) {
return decltype(ov::available_devices)::value_type(device_ids);
} else if (name == ov::device::capabilities) {
std::vector<std::string> capabilities;
capabilities.push_back(ov::device::capability::EXPORT_IMPORT);
return decltype(ov::device::capabilities)::value_type(capabilities);
} else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : rwProperties) {
configs.emplace_back(property);
}
return configs;
} else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) {
return true;
} else if (ov::internal::caching_properties == name) {
std::vector<ov::PropertyName> caching_properties = {ov::device::uuid};
return decltype(ov::internal::caching_properties)::value_type(caching_properties);
} else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : roProperties) {
configs.emplace_back(property);
}
return configs;
} else if (name == ov::loaded_from_cache.name()) {
return m_loaded_from_cache;
} else if (name == ov::enable_profiling.name()) {
return decltype(ov::enable_profiling)::value_type{m_profiling};
} else if (name == ov::streams::num.name()) {
return decltype(ov::streams::num)::value_type{num_streams};
}
OPENVINO_THROW("Unsupported property: ", name);
}
private:
int32_t num_streams{0};
};
class MockPluginSubtract : public MockPluginBase {
public:
MockPluginSubtract(const std::string& name)
: MockPluginBase(name, {"Parameter", "Result", "Add", "Constant", "Subtract"}) {}
const ov::Version& get_const_version() override {
static const ov::Version version = {CI_BUILD_NUMBER, "openvino_mock_subtract_plugin"};
return version;
}
void set_property(const ov::AnyMap& properties) override {
for (const auto& it : properties) {
if (it.first == ov::enable_profiling.name())
m_profiling = it.second.as<bool>();
else if (it.first == ov::device::id.name())
continue;
else
OPENVINO_THROW(get_device_name(), " set config: " + it.first);
}
}
ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override {
const static std::vector<std::string> device_ids = {"0", "1"};
const static std::vector<ov::PropertyName> roProperties{
RO_property(ov::supported_properties.name()),
RO_property(ov::available_devices.name()),
RO_property(ov::loaded_from_cache.name()),
RO_property(ov::device::uuid.name()),
RO_property(METRIC_KEY(IMPORT_EXPORT_SUPPORT)),
};
// the whole config is RW before network is loaded.
const static std::vector<ov::PropertyName> rwProperties{
RW_property(ov::num_streams.name()),
RW_property(ov::enable_profiling.name()),
};
std::string device_id;
if (arguments.find(ov::device::id.name()) != arguments.end()) {
device_id = arguments.find(ov::device::id.name())->second.as<std::string>();
}
if (name == ov::supported_properties) {
std::vector<ov::PropertyName> supportedProperties;
supportedProperties.reserve(roProperties.size() + rwProperties.size());
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return decltype(ov::supported_properties)::value_type(supportedProperties);
} else if (name == ov::internal::supported_properties) {
return decltype(ov::internal::supported_properties)::value_type(
{ov::PropertyName{ov::internal::caching_properties.name(), ov::PropertyMutability::RO}});
} else if (name == ov::device::uuid) {
ov::device::UUID uuid;
for (size_t i = 0; i < uuid.MAX_UUID_SIZE; i++) {
if (device_id == device_ids[0])
uuid.uuid[i] = static_cast<uint8_t>(i * 2);
else if (device_id == device_ids[1])
uuid.uuid[i] = static_cast<uint8_t>(i * 4);
else if (device_id == device_ids[2])
uuid.uuid[i] = static_cast<uint8_t>(i * 5);
}
return decltype(ov::device::uuid)::value_type{uuid};
} else if (name == ov::available_devices) {
return decltype(ov::available_devices)::value_type(device_ids);
} else if (name == ov::device::capabilities) {
std::vector<std::string> capabilities;
capabilities.push_back(ov::device::capability::EXPORT_IMPORT);
return decltype(ov::device::capabilities)::value_type(capabilities);
} else if (name == ov::loaded_from_cache.name()) {
return m_loaded_from_cache;
} else if (name == ov::enable_profiling.name()) {
return decltype(ov::enable_profiling)::value_type{m_profiling};
} else if (name == "SUPPORTED_CONFIG_KEYS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : rwProperties) {
configs.emplace_back(property);
}
return configs;
} else if (METRIC_KEY(IMPORT_EXPORT_SUPPORT) == name) {
return true;
} else if (ov::internal::caching_properties == name) {
std::vector<ov::PropertyName> caching_properties = {ov::device::uuid};
return decltype(ov::internal::caching_properties)::value_type(caching_properties);
} else if (name == "SUPPORTED_METRICS") { // TODO: Remove this key
std::vector<std::string> configs;
for (const auto& property : roProperties) {
configs.emplace_back(property);
}
return configs;
}
OPENVINO_THROW("Unsupported property: ", name);
}
};
void ov::hetero::tests::HeteroTests::reg_plugin(std::shared_ptr<ov::IPlugin>& plugin) {
std::string libraryPath = get_mock_engine_path();
if (!m_so)
m_so = ov::util::load_shared_object(libraryPath.c_str());
if (auto mock_plugin = std::dynamic_pointer_cast<MockPluginBase>(plugin))
mock_plugin->set_version(mock_plugin->get_const_version());
std::function<void(ov::IPlugin*)> injectProxyEngine = make_std_function<void(ov::IPlugin*)>(m_so, "InjectPlugin");
injectProxyEngine(plugin.get());
core.register_plugin(ov::util::make_plugin_library_name(CommonTestUtils::getExecutableDirectory(),
std::string("mock_engine") + IE_BUILD_POSTFIX),
plugin->get_device_name());
m_mock_plugins.emplace_back(plugin);
}
template <typename T>
void ov::hetero::tests::HeteroTests::reg_plugin_type(const std::string& device_name) {
auto plugin = std::dynamic_pointer_cast<ov::IPlugin>(std::make_shared<T>(device_name));
reg_plugin(plugin);
}
void ov::hetero::tests::HeteroTests::SetUp() {
if (m_mock_plugins.empty()) {
reg_plugin_type<MockPluginReshape>("MOCK0");
reg_plugin_type<MockPluginSubtract>("MOCK1");
}
}

View File

@ -0,0 +1,50 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <gtest/gtest.h>
#include <memory>
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/iplugin.hpp"
namespace ov {
namespace hetero {
namespace tests {
class HeteroTests : public ::testing::Test {
public:
ov::Core core;
void SetUp() override;
std::shared_ptr<ov::Model> create_model_with_subtract();
std::shared_ptr<ov::Model> create_model_with_subtract_reshape();
std::shared_ptr<ov::Model> create_model_with_subtract_reshape_relu();
std::shared_ptr<ov::Model> create_model_with_reshape();
ov::Tensor create_and_fill_tensor(const ov::element::Type& type, const ov::Shape& shape);
private:
template <class T>
ov::Tensor create_tensor(const ov::element::Type& type, const ov::Shape& shape) {
ov::Tensor tensor(type, shape);
T* data = tensor.data<T>();
for (size_t i = 0; i < tensor.get_size(); i++) {
data[i] = static_cast<T>(i);
}
return tensor;
}
std::vector<std::shared_ptr<ov::IPlugin>> m_mock_plugins;
std::shared_ptr<void> m_so;
template <typename T>
void reg_plugin_type(const std::string& device_name);
void reg_plugin(std::shared_ptr<ov::IPlugin>& plugin);
};
} // namespace tests
} // namespace hetero
} // namespace ov

View File

@ -0,0 +1,51 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero_tests.hpp"
using namespace ov::hetero::tests;
// IR frontend is needed for import
#ifdef IR_FRONTEND_ENABLED
TEST_F(HeteroTests, import_single_plugins) {
std::stringstream model_stream;
auto model = create_model_with_reshape();
{
auto compiled_model = core.compile_model(model, "HETERO", ov::device::priorities("MOCK0"));
compiled_model.export_model(model_stream);
}
auto compiled_model = core.import_model(model_stream, "HETERO", {});
EXPECT_EQ(1, compiled_model.inputs().size());
EXPECT_EQ(1, compiled_model.outputs().size());
auto infer_request = compiled_model.create_infer_request();
auto input_tensor =
create_and_fill_tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape());
infer_request.set_input_tensor(input_tensor);
infer_request.infer();
auto output_tensor = infer_request.get_output_tensor();
EXPECT_EQ(input_tensor.get_shape(), model->input().get_shape());
EXPECT_EQ(input_tensor.get_element_type(), model->input().get_element_type());
}
TEST_F(HeteroTests, import_several_plugins) {
std::stringstream model_stream;
auto model = create_model_with_subtract();
{
auto compiled_model = core.compile_model(model, "HETERO", ov::device::priorities("MOCK0,MOCK1"));
compiled_model.export_model(model_stream);
}
auto compiled_model = core.import_model(model_stream, "HETERO", {});
EXPECT_EQ(1, compiled_model.inputs().size());
EXPECT_EQ(1, compiled_model.outputs().size());
auto infer_request = compiled_model.create_infer_request();
auto input_tensor =
create_and_fill_tensor(compiled_model.input().get_element_type(), compiled_model.input().get_shape());
infer_request.set_input_tensor(input_tensor);
infer_request.infer();
auto output_tensor = infer_request.get_output_tensor();
EXPECT_EQ(input_tensor.get_shape(), output_tensor.get_shape());
EXPECT_EQ(input_tensor.get_element_type(), output_tensor.get_element_type());
EXPECT_EQ(memcmp(input_tensor.data(), output_tensor.data(), input_tensor.get_byte_size()), 0);
}
#endif

View File

@ -0,0 +1,85 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero_tests.hpp"
#include "ie/ie_plugin_config.hpp"
#include "openvino/runtime/internal_properties.hpp"
using namespace ov::hetero::tests;
TEST_F(HeteroTests, get_property_supported_properties) {
const std::vector<ov::PropertyName> supported_properties = {ov::supported_properties,
ov::device::full_name,
ov::device::capabilities,
ov::device::priorities};
auto actual_supported_properties = core.get_property("HETERO", ov::supported_properties);
EXPECT_EQ(supported_properties.size(), actual_supported_properties.size());
for (auto& supported_property : supported_properties) {
ASSERT_TRUE(std::find(actual_supported_properties.begin(),
actual_supported_properties.end(),
supported_property) != actual_supported_properties.end());
}
}
TEST_F(HeteroTests, get_property_supported_metrics) {
const std::vector<std::string> supported_metrics = {ov::supported_properties.name(),
ov::device::full_name.name(),
ov::device::capabilities.name(),
METRIC_KEY(SUPPORTED_METRICS),
METRIC_KEY(SUPPORTED_CONFIG_KEYS),
METRIC_KEY(IMPORT_EXPORT_SUPPORT)};
auto actual_supported_metrics =
core.get_property("HETERO", METRIC_KEY(SUPPORTED_METRICS)).as<std::vector<std::string>>();
EXPECT_EQ(supported_metrics.size(), actual_supported_metrics.size());
for (auto& supported_metric : supported_metrics) {
ASSERT_TRUE(std::find(actual_supported_metrics.begin(), actual_supported_metrics.end(), supported_metric) !=
actual_supported_metrics.end());
}
}
TEST_F(HeteroTests, get_property_supported_configs) {
const std::vector<std::string> supported_configs = {"HETERO_DUMP_GRAPH_DOT",
"TARGET_FALLBACK",
ov::device::priorities.name(),
"EXCLUSIVE_ASYNC_REQUESTS"};
auto actual_supported_configs =
core.get_property("HETERO", METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as<std::vector<std::string>>();
EXPECT_EQ(supported_configs.size(), actual_supported_configs.size());
for (auto& supported_config : supported_configs) {
ASSERT_TRUE(std::find(actual_supported_configs.begin(), actual_supported_configs.end(), supported_config) !=
actual_supported_configs.end());
}
}
TEST_F(HeteroTests, get_property_internal_supported_properties) {
const std::vector<ov::PropertyName> supported_properties = {ov::internal::caching_properties};
auto actual_supported_properties = core.get_property("HETERO", ov::internal::supported_properties);
EXPECT_EQ(supported_properties.size(), actual_supported_properties.size());
for (auto& supported_property : supported_properties) {
ASSERT_TRUE(std::find(actual_supported_properties.begin(),
actual_supported_properties.end(),
supported_property) != actual_supported_properties.end());
}
}
TEST_F(HeteroTests, get_property_ro_properties) {
EXPECT_EQ("HETERO", core.get_property("HETERO", ov::device::full_name));
EXPECT_EQ(std::vector<std::string>{ov::device::capability::EXPORT_IMPORT},
core.get_property("HETERO", ov::device::capabilities));
}
TEST_F(HeteroTests, set_property_device_priorities) {
EXPECT_EQ("", core.get_property("HETERO", ov::device::priorities));
core.set_property("HETERO", ov::device::priorities("MOCK0,MOCK1"));
EXPECT_EQ("MOCK0,MOCK1", core.get_property("HETERO", ov::device::priorities));
EXPECT_EQ("MOCK0,MOCK1", core.get_property("HETERO", "TARGET_FALLBACK").as<std::string>());
core.set_property("HETERO", {{"TARGET_FALLBACK", "MOCK1,MOCK0"}});
EXPECT_EQ("MOCK1,MOCK0", core.get_property("HETERO", ov::device::priorities));
EXPECT_EQ("MOCK1,MOCK0", core.get_property("HETERO", "TARGET_FALLBACK").as<std::string>());
}
TEST_F(HeteroTests, set_property_exclusive_async_request) {
EXPECT_EQ(true, core.get_property("HETERO", "EXCLUSIVE_ASYNC_REQUESTS").as<bool>());
core.set_property("HETERO", {{"EXCLUSIVE_ASYNC_REQUESTS", "NO"}});
EXPECT_EQ(false, core.get_property("HETERO", "EXCLUSIVE_ASYNC_REQUESTS").as<bool>());
}

View File

@ -0,0 +1,67 @@
// Copyright (C) 2018-2023 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "hetero_tests.hpp"
using namespace ov::hetero::tests;
// AVAILABLE_DEVICES {"MOCK0.0", "MOCK0.1", "MOCK0.2", "MOCK1.0", "MOCK1.0"};
TEST_F(HeteroTests, query_model_on_mock0) {
const std::string dev_name = "MOCK0.1";
const auto model = create_model_with_subtract_reshape();
const auto supported_ops = core.query_model(model, "HETERO", {ov::device::priorities(dev_name)});
std::unordered_set<std::string> names;
for (const auto& op : model->get_ops()) {
names.insert(op->get_friendly_name());
}
for (const auto& op : supported_ops) {
EXPECT_EQ(op.second, dev_name);
names.erase(op.first);
}
EXPECT_EQ(1, names.size());
EXPECT_EQ("sub", *names.begin());
}
TEST_F(HeteroTests, query_model_on_mock1) {
const std::string dev_name = "MOCK1.1";
const auto model = create_model_with_subtract_reshape();
// This WA is needed because mock plugins are loaded one by one
EXPECT_NO_THROW(core.get_available_devices());
const auto supported_ops = core.query_model(model, "HETERO", {ov::device::priorities(dev_name)});
std::unordered_set<std::string> names;
for (const auto& op : model->get_ops()) {
names.insert(op->get_friendly_name());
}
for (const auto& op : supported_ops) {
EXPECT_EQ(op.second, dev_name);
names.erase(op.first);
}
EXPECT_EQ(1, names.size());
EXPECT_EQ("reshape", *names.begin());
}
TEST_F(HeteroTests, query_model_on_mixed) {
const std::string dev_name0 = "MOCK0.3";
const std::string dev_name1 = "MOCK1.2";
ov::AnyMap config = {ov::device::priorities(dev_name0 + "," + dev_name1)};
const auto model = create_model_with_subtract_reshape();
std::set<std::string> supported_ops_mock0;
for (auto& op : core.query_model(model, dev_name0)) {
if (op.second == dev_name0)
supported_ops_mock0.insert(op.first);
}
const auto supported_ops = core.query_model(model, "HETERO", config);
std::unordered_set<std::string> names;
for (const auto& op : model->get_ops()) {
names.insert(op->get_friendly_name());
}
for (const auto& op : supported_ops) {
if (supported_ops_mock0.count(op.first))
EXPECT_EQ(op.second, dev_name0);
else
EXPECT_EQ(op.second, dev_name1);
names.erase(op.first);
}
EXPECT_EQ(0, names.size());
}