From e095a90cdfd50c8c7ea4983b0dd55653d54abab9 Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Tue, 11 Jan 2022 09:36:13 +0300 Subject: [PATCH] Handle names collisions for old IR with new API (#9388) * Handle names collisions for old IR with new API * Fixed load model * Try to fix tests * Try to fix tests * Try to fix build * Try to fix tests * Fixed tests * Revert "Fixed tests" This reverts commit 35da3072104c7b4337842b16f72ff864f390f7d8. * Refactoring * Fixed functional test * Try to fix CPU tests Co-authored-by: Ilya Lavrenov --- .../interface/ie_iplugin_internal.hpp | 3 +- .../interface/ie_iplugin_internal.cpp | 43 ++++++--- src/inference/src/ie_core.cpp | 4 +- src/inference/src/ie_network_reader.cpp | 54 +++++++---- .../rt_info_deserialization.cpp | 90 +++++++++++++++++++ .../executable_network/exec_network_base.hpp | 49 +++++++++- .../ov_executable_network/exec_graph_info.hpp | 2 +- .../exec_network_base.hpp | 45 ++++++++++ 8 files changed, 258 insertions(+), 32 deletions(-) diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp index 39232e0ef86..0d20e90e87e 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_iplugin_internal.hpp @@ -92,7 +92,8 @@ INFERENCE_ENGINE_API_CPP(OutputsDataMap) copyInfo(const OutputsDataMap& networkO */ INFERENCE_ENGINE_API_CPP(void) SetExeNetworkInfo(const std::shared_ptr& exeNetwork, - const std::shared_ptr& function); + const std::shared_ptr& function, + bool new_api); /** * @interface IInferencePlugin diff --git a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp index 1933b50dded..6c0875337af 100644 --- a/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp +++ b/src/inference/src/cpp_interfaces/interface/ie_iplugin_internal.cpp @@ -286,18 +286,21 @@ void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr& exeNetwork, const std::shared_ptr& function) { - InferenceEngine::SetExeNetworkInfo(exeNetwork, function); + bool newAPI = this->GetCore() && this->GetCore()->isNewAPI(); + InferenceEngine::SetExeNetworkInfo(exeNetwork, function, newAPI); exeNetwork->SetPointerToPlugin(shared_from_this()); } void SetExeNetworkInfo(const std::shared_ptr& exeNetwork, - const std::shared_ptr& function) { + const std::shared_ptr& function, + bool new_api) { OPENVINO_ASSERT(exeNetwork != nullptr); OPENVINO_ASSERT(function != nullptr); std::vector> const_params; std::vector> const_results; + std::unordered_set leaf_names; bool add_operation_names = false; const auto& rt_info = function->get_rt_info(); const auto it = rt_info.find("version"); @@ -307,6 +310,14 @@ void SetExeNetworkInfo(const std::shared_ptr& exeNet // getInputs / getOutputs. Since these functions are designed to be used in new API only // always need to add operation names for IR v10 add_operation_names = ir_version == 10; + + for (const auto& vals : {function->inputs(), function->outputs()}) { + for (const auto& val : vals) { + for (const auto& name : val.get_names()) { + leaf_names.insert(name); + } + } + } } const auto& inputsInfo = exeNetwork->GetInputsInfo(); @@ -315,14 +326,21 @@ void SetExeNetworkInfo(const std::shared_ptr& exeNet OPENVINO_ASSERT(outputsInfo.size() == function->get_output_size()); for (const auto& param : function->get_parameters()) { + const auto& param_name = param->get_friendly_name(); auto new_param = ov::as_type_ptr(param->copy_with_new_inputs({})); - new_param->set_friendly_name(param->get_friendly_name()); - if (add_operation_names) - new_param->output(0).get_tensor().add_names({new_param->get_friendly_name()}); + new_param->set_friendly_name(param_name); + if (add_operation_names) { + OPENVINO_ASSERT(!new_api || leaf_names.find(param_name) == leaf_names.end() || + param->output(0).get_names().find(param_name) != param->output(0).get_names().end(), + "Model operation names have collisions with tensor names.", + " Please use MO to generate new IR version, it should allow to avoid the issue"); + leaf_names.insert(param_name); + new_param->output(0).get_tensor().add_names({param_name}); + } // WA: use CNNNetwork's precisions since plugins sometimes override their precisions // after transformation pipeline is run new_param->set_element_type( - InferenceEngine::details::convertPrecision(inputsInfo.at(new_param->get_friendly_name())->getPrecision())); + InferenceEngine::details::convertPrecision(inputsInfo.at(param_name)->getPrecision())); new_param->set_layout(param->get_layout()); new_param->output(0).get_rt_info() = param->output(0).get_rt_info(); new_param->validate_and_infer_types(); @@ -331,15 +349,20 @@ void SetExeNetworkInfo(const std::shared_ptr& exeNet for (const auto& result : function->get_results()) { auto fake_param = std::make_shared(result->get_output_element_type(0), result->get_output_partial_shape(0)); - const std::string param_name = ngraph::op::util::create_ie_output_name(result->input_value(0)); - fake_param->set_friendly_name(param_name); + const std::string res_name = ngraph::op::util::create_ie_output_name(result->input_value(0)); + fake_param->set_friendly_name(res_name); fake_param->set_element_type( - InferenceEngine::details::convertPrecision(outputsInfo.at(param_name)->getPrecision())); + InferenceEngine::details::convertPrecision(outputsInfo.at(res_name)->getPrecision())); fake_param->validate_and_infer_types(); auto new_result = result->copy_with_new_inputs({fake_param}); new_result->set_friendly_name(result->get_friendly_name()); if (add_operation_names) { - new_result->output(0).get_tensor().add_names({fake_param->get_friendly_name()}); + OPENVINO_ASSERT(!new_api || leaf_names.find(res_name) == leaf_names.end() || + result->output(0).get_names().find(res_name) != result->output(0).get_names().end(), + "Model operation names have collisions with tensor names.", + " Please use MO to generate new IR version, it should allow to avoid the issue"); + leaf_names.insert(res_name); + new_result->output(0).get_tensor().add_names({res_name}); } auto r = std::dynamic_pointer_cast(new_result); OPENVINO_ASSERT(r, "Internal error. SetNetworkInfo failure casting output copy to Result"); diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp index 86f3fcb89dc..ec9bf929eef 100644 --- a/src/inference/src/ie_core.cpp +++ b/src/inference/src/ie_core.cpp @@ -512,7 +512,7 @@ public: res = compile_model_impl(network, plugin, parsed._config, context, hash); } else { // Temporary workaround until all plugins support caching of original model inputs - InferenceEngine::SetExeNetworkInfo(res._ptr, network.getFunction()); + InferenceEngine::SetExeNetworkInfo(res._ptr, network.getFunction(), isNewAPI()); } } else { res = compile_model_impl(network, plugin, parsed._config, context, {}); @@ -589,7 +589,7 @@ public: res = compile_model_impl(network, plugin, parsed._config, nullptr, hash, {}, forceDisableCache); } else { // Temporary workaround until all plugins support caching of original model inputs - InferenceEngine::SetExeNetworkInfo(res._ptr, network.getFunction()); + InferenceEngine::SetExeNetworkInfo(res._ptr, network.getFunction(), isNewAPI()); } } else { res = compile_model_impl(network, plugin, parsed._config, nullptr, {}, {}, forceDisableCache); diff --git a/src/inference/src/ie_network_reader.cpp b/src/inference/src/ie_network_reader.cpp index bd971a0dfb7..ccd720e24a3 100644 --- a/src/inference/src/ie_network_reader.cpp +++ b/src/inference/src/ie_network_reader.cpp @@ -302,11 +302,32 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr& function, const int64_t ir_version = it->second.as(); if (ir_version == 10 && newAPI) { + std::unordered_set leaf_names; const auto inputs = function->inputs(); for (size_t i = 0; i < inputs.size(); ++i) { const auto ngraph_type = inputs[i].get_element_type(); const auto legacy_type = details::toLegacyType(ngraph_type, true); prepost.input(i).tensor().set_element_type(legacy_type); + for (const auto& name : inputs[i].get_names()) { + OPENVINO_ASSERT(leaf_names.find(name) == leaf_names.end(), + "Model tensor names have collisions.", + " Please use MO to generate new IR version, it should allow to avoid the issue"); + leaf_names.insert(name); + } + } + + const auto outputs = function->outputs(); + for (size_t i = 0; i < outputs.size(); ++i) { + const auto ngraph_type = outputs[i].get_element_type(); + const auto legacy_type = details::toLegacyType(ngraph_type, false); + + prepost.output(i).tensor().set_element_type(legacy_type); + for (const auto& name : outputs[i].get_names()) { + OPENVINO_ASSERT(leaf_names.find(name) == leaf_names.end(), + "Model tensor names have collisions.", + " Please use MO to generate new IR version, it should allow to avoid the issue"); + leaf_names.insert(name); + } } // in order to support the following scenarios for IR v10 cases: @@ -317,29 +338,28 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr& function, // f.reshape({ { "input_operation_name", ov::PartialShape{} } }); // we need to add operation names as tensor names for inputs and outputs { - std::vector result_names; - std::vector> prevPorts; - result_names.reserve(function->get_results().size()); - prevPorts.reserve(function->get_results().size()); - for (const auto& result : function->get_results()) { - result_names.emplace_back(ngraph::op::util::create_ie_output_name(result->input_value(0))); - result->output(0).get_tensor().add_names({result_names.back()}); - prevPorts.emplace_back(result->input_value(0)); + auto res_name = ngraph::op::util::create_ie_output_name(result->input_value(0)); + OPENVINO_ASSERT( + leaf_names.find(res_name) == leaf_names.end() || + result->output(0).get_names().find(res_name) != result->output(0).get_names().end(), + "Model operation names have collisions with tensor names.", + " Please use MO to generate new IR version, it should allow to avoid the issue"); + leaf_names.insert(res_name); + result->output(0).get_tensor().add_names({res_name}); } for (const auto& param : function->get_parameters()) { - param->output(0).get_tensor().add_names({param->get_friendly_name()}); + auto param_name = param->get_friendly_name(); + OPENVINO_ASSERT( + leaf_names.find(param_name) == leaf_names.end() || + param->output(0).get_names().find(param_name) != param->output(0).get_names().end(), + "Model operation names have collisions with tensor names.", + " Please use MO to generate new IR version, it should allow to avoid the issue"); + leaf_names.insert(param_name); + param->output(0).get_tensor().add_names({param_name}); } } - const auto outputs = function->outputs(); - for (size_t i = 0; i < outputs.size(); ++i) { - const auto ngraph_type = outputs[i].get_element_type(); - const auto legacy_type = details::toLegacyType(ngraph_type, false); - - prepost.output(i).tensor().set_element_type(legacy_type); - } - function = prepost.build(); // Set version to 10 diff --git a/src/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp b/src/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp index c6727ad2311..191684934ba 100644 --- a/src/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp +++ b/src/tests/functional/inference_engine/ir_serialization/rt_info_deserialization.cpp @@ -202,6 +202,96 @@ TEST_F(RTInfoDeserialization, NodeV10) { } } +TEST_F(RTInfoDeserialization, NamesCollisionV10) { + std::string model = R"V0G0N( + + + + + + + + + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + + + 1 + 3 + 22 + 22 + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + +)V0G0N"; + auto f = getWithIRFrontend(model); + ASSERT_NE(nullptr, f); + + auto check_version = [](const std::shared_ptr& f, int version_ref) { + auto& rt_info = f->get_rt_info(); + ASSERT_TRUE(rt_info.count("version")); + ASSERT_TRUE(rt_info.at("version").is()); + ASSERT_EQ(rt_info.at("version").as(), version_ref); + }; + check_version(f, 10); + + // read IR v10 with old API + { + InferenceEngine::Core core; + auto f_10 = core.ReadNetwork(model, InferenceEngine::Blob::CPtr()); + ASSERT_NE(nullptr, f_10.getFunction()); + + auto res = compare_functions(f, f_10.getFunction()); + EXPECT_TRUE(res.first) << res.second; + } + + // read IR v10 with new API and check that CNNNetwork precision conversions are applied + { + ov::runtime::Core core; + EXPECT_THROW(core.read_model(model, ov::runtime::Tensor()), ov::Exception); + } +} + TEST_F(RTInfoDeserialization, InputAndOutputV10) { std::string model = R"V0G0N( diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp index 29796844f2c..f131557f26c 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/exec_network_base.hpp @@ -6,6 +6,7 @@ #include "base/behavior_test_utils.hpp" #include "common_test_utils/ngraph_test_utils.hpp" #include "common_test_utils/file_utils.hpp" +#include "openvino/core/model.hpp" namespace BehaviorTestsDefinitions { class ExecutableNetworkBaseTest : public testing::WithParamInterface, @@ -316,4 +317,50 @@ TEST_P(ExecNetSetPrecision, canSetOutputPrecisionForNetwork) { outputs_info.begin()->second->setPrecision(netPrecision); ASSERT_NO_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration)); } -} // namespace BehaviorTestsDefinitions \ No newline at end of file +TEST_P(ExecutableNetworkBaseTest, loadIncorrectV10Model) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::runtime::CompiledModel execNet; + + // Create simple function + { + auto param1 = std::make_shared(ov::element::Type_t::f32, ov::Shape({1, 3, 24, 24})); + param1->set_friendly_name("param1"); + param1->output(0).get_tensor().set_names({"data1"}); + auto relu = std::make_shared(param1); + relu->set_friendly_name("data1"); + relu->output(0).get_tensor().set_names({"relu"}); + auto result = std::make_shared(relu); + result->set_friendly_name("result"); + function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param1}); + function->get_rt_info()["version"] = int64_t(10); + function->set_friendly_name("SimpleReLU"); + } + InferenceEngine::CNNNetwork cnnNet(function); + EXPECT_NO_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration)); +} + +TEST_P(ExecutableNetworkBaseTest, loadIncorrectV11Model) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::runtime::CompiledModel execNet; + + // Create simple function + { + auto param1 = std::make_shared(ov::element::Type_t::f32, ov::Shape({1, 3, 24, 24})); + param1->set_friendly_name("param1"); + param1->output(0).get_tensor().set_names({"data1"}); + auto relu = std::make_shared(param1); + relu->set_friendly_name("data1"); + relu->output(0).get_tensor().set_names({"relu"}); + auto result = std::make_shared(relu); + result->set_friendly_name("result"); + function = std::make_shared(ov::ResultVector{result}, ov::ParameterVector{param1}); + function->get_rt_info()["version"] = int64_t(11); + function->set_friendly_name("SimpleReLU"); + } + InferenceEngine::CNNNetwork cnnNet(function); + EXPECT_NO_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration)); +} + +} // namespace BehaviorTestsDefinitions diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp index 2478213bd7d..b550b8293b6 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp @@ -236,7 +236,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) { std::shared_ptr ie = ::PluginCache::get().ie(); InferenceEngine::ExecutableNetwork execNet; -// Create simple function + // Create simple function { auto param1 = std::make_shared(elementType, ngraph::Shape({1, 3, 24, 24})); param1->set_friendly_name("param1"); diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp index 85b5aeb182d..3c397bd3741 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp @@ -653,6 +653,51 @@ TEST_P(OVExecutableNetworkBaseTest, getCompiledModelFromInferRequest) { ASSERT_NO_THROW(another_req.infer()); } } + +TEST_P(OVExecutableNetworkBaseTest, loadIncorrectV10Model) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::runtime::CompiledModel execNet; + + // Create simple function + { + auto param1 = std::make_shared(element::Type_t::f32, ngraph::Shape({1, 3, 24, 24})); + param1->set_friendly_name("param1"); + param1->output(0).get_tensor().set_names({"data1"}); + auto relu = std::make_shared(param1); + relu->set_friendly_name("data1"); + relu->output(0).get_tensor().set_names({"relu"}); + auto result = std::make_shared(relu); + result->set_friendly_name("result"); + function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param1}); + function->get_rt_info()["version"] = int64_t(10); + function->set_friendly_name("SimpleReLU"); + } + EXPECT_THROW(core->compile_model(function, targetDevice, configuration), ov::Exception); +} + +TEST_P(OVExecutableNetworkBaseTest, loadIncorrectV11Model) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::runtime::CompiledModel execNet; + + // Create simple function + { + auto param1 = std::make_shared(element::Type_t::f32, ngraph::Shape({1, 3, 24, 24})); + param1->set_friendly_name("param1"); + param1->output(0).get_tensor().set_names({"data1"}); + auto relu = std::make_shared(param1); + relu->set_friendly_name("data1"); + relu->output(0).get_tensor().set_names({"relu"}); + auto result = std::make_shared(relu); + result->set_friendly_name("result"); + function = std::make_shared(ngraph::ResultVector{result}, ngraph::ParameterVector{param1}); + function->get_rt_info()["version"] = int64_t(11); + function->set_friendly_name("SimpleReLU"); + } + EXPECT_NO_THROW(core->compile_model(function, targetDevice, configuration)); +} + } // namespace behavior } // namespace test } // namespace ov