Handle names collisions for old IR with new API (#9388)
* Handle names collisions for old IR with new API
* Fixed load model
* Try to fix tests
* Try to fix tests
* Try to fix build
* Try to fix tests
* Fixed tests
* Revert "Fixed tests"
This reverts commit 35da307210
.
* Refactoring
* Fixed functional test
* Try to fix CPU tests
Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
This commit is contained in:
parent
e0485c1ad2
commit
e095a90cdf
@ -92,7 +92,8 @@ INFERENCE_ENGINE_API_CPP(OutputsDataMap) copyInfo(const OutputsDataMap& networkO
|
||||
*/
|
||||
INFERENCE_ENGINE_API_CPP(void)
|
||||
SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNetwork,
|
||||
const std::shared_ptr<const ov::Model>& function);
|
||||
const std::shared_ptr<const ov::Model>& function,
|
||||
bool new_api);
|
||||
|
||||
/**
|
||||
* @interface IInferencePlugin
|
||||
|
@ -286,18 +286,21 @@ void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr<IExecutableNetwor
|
||||
|
||||
void IInferencePlugin::SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNetwork,
|
||||
const std::shared_ptr<const ov::Model>& function) {
|
||||
InferenceEngine::SetExeNetworkInfo(exeNetwork, function);
|
||||
bool newAPI = this->GetCore() && this->GetCore()->isNewAPI();
|
||||
InferenceEngine::SetExeNetworkInfo(exeNetwork, function, newAPI);
|
||||
exeNetwork->SetPointerToPlugin(shared_from_this());
|
||||
}
|
||||
|
||||
void SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNetwork,
|
||||
const std::shared_ptr<const ov::Model>& function) {
|
||||
const std::shared_ptr<const ov::Model>& function,
|
||||
bool new_api) {
|
||||
OPENVINO_ASSERT(exeNetwork != nullptr);
|
||||
OPENVINO_ASSERT(function != nullptr);
|
||||
|
||||
std::vector<std::shared_ptr<const ov::Node>> const_params;
|
||||
std::vector<std::shared_ptr<const ov::Node>> const_results;
|
||||
|
||||
std::unordered_set<std::string> leaf_names;
|
||||
bool add_operation_names = false;
|
||||
const auto& rt_info = function->get_rt_info();
|
||||
const auto it = rt_info.find("version");
|
||||
@ -307,6 +310,14 @@ void SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNet
|
||||
// getInputs / getOutputs. Since these functions are designed to be used in new API only
|
||||
// always need to add operation names for IR v10
|
||||
add_operation_names = ir_version == 10;
|
||||
|
||||
for (const auto& vals : {function->inputs(), function->outputs()}) {
|
||||
for (const auto& val : vals) {
|
||||
for (const auto& name : val.get_names()) {
|
||||
leaf_names.insert(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto& inputsInfo = exeNetwork->GetInputsInfo();
|
||||
@ -315,14 +326,21 @@ void SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNet
|
||||
OPENVINO_ASSERT(outputsInfo.size() == function->get_output_size());
|
||||
|
||||
for (const auto& param : function->get_parameters()) {
|
||||
const auto& param_name = param->get_friendly_name();
|
||||
auto new_param = ov::as_type_ptr<ov::op::v0::Parameter>(param->copy_with_new_inputs({}));
|
||||
new_param->set_friendly_name(param->get_friendly_name());
|
||||
if (add_operation_names)
|
||||
new_param->output(0).get_tensor().add_names({new_param->get_friendly_name()});
|
||||
new_param->set_friendly_name(param_name);
|
||||
if (add_operation_names) {
|
||||
OPENVINO_ASSERT(!new_api || leaf_names.find(param_name) == leaf_names.end() ||
|
||||
param->output(0).get_names().find(param_name) != param->output(0).get_names().end(),
|
||||
"Model operation names have collisions with tensor names.",
|
||||
" Please use MO to generate new IR version, it should allow to avoid the issue");
|
||||
leaf_names.insert(param_name);
|
||||
new_param->output(0).get_tensor().add_names({param_name});
|
||||
}
|
||||
// WA: use CNNNetwork's precisions since plugins sometimes override their precisions
|
||||
// after transformation pipeline is run
|
||||
new_param->set_element_type(
|
||||
InferenceEngine::details::convertPrecision(inputsInfo.at(new_param->get_friendly_name())->getPrecision()));
|
||||
InferenceEngine::details::convertPrecision(inputsInfo.at(param_name)->getPrecision()));
|
||||
new_param->set_layout(param->get_layout());
|
||||
new_param->output(0).get_rt_info() = param->output(0).get_rt_info();
|
||||
new_param->validate_and_infer_types();
|
||||
@ -331,15 +349,20 @@ void SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNet
|
||||
for (const auto& result : function->get_results()) {
|
||||
auto fake_param = std::make_shared<ov::op::v0::Parameter>(result->get_output_element_type(0),
|
||||
result->get_output_partial_shape(0));
|
||||
const std::string param_name = ngraph::op::util::create_ie_output_name(result->input_value(0));
|
||||
fake_param->set_friendly_name(param_name);
|
||||
const std::string res_name = ngraph::op::util::create_ie_output_name(result->input_value(0));
|
||||
fake_param->set_friendly_name(res_name);
|
||||
fake_param->set_element_type(
|
||||
InferenceEngine::details::convertPrecision(outputsInfo.at(param_name)->getPrecision()));
|
||||
InferenceEngine::details::convertPrecision(outputsInfo.at(res_name)->getPrecision()));
|
||||
fake_param->validate_and_infer_types();
|
||||
auto new_result = result->copy_with_new_inputs({fake_param});
|
||||
new_result->set_friendly_name(result->get_friendly_name());
|
||||
if (add_operation_names) {
|
||||
new_result->output(0).get_tensor().add_names({fake_param->get_friendly_name()});
|
||||
OPENVINO_ASSERT(!new_api || leaf_names.find(res_name) == leaf_names.end() ||
|
||||
result->output(0).get_names().find(res_name) != result->output(0).get_names().end(),
|
||||
"Model operation names have collisions with tensor names.",
|
||||
" Please use MO to generate new IR version, it should allow to avoid the issue");
|
||||
leaf_names.insert(res_name);
|
||||
new_result->output(0).get_tensor().add_names({res_name});
|
||||
}
|
||||
auto r = std::dynamic_pointer_cast<ov::op::v0::Result>(new_result);
|
||||
OPENVINO_ASSERT(r, "Internal error. SetNetworkInfo failure casting output copy to Result");
|
||||
|
@ -512,7 +512,7 @@ public:
|
||||
res = compile_model_impl(network, plugin, parsed._config, context, hash);
|
||||
} else {
|
||||
// Temporary workaround until all plugins support caching of original model inputs
|
||||
InferenceEngine::SetExeNetworkInfo(res._ptr, network.getFunction());
|
||||
InferenceEngine::SetExeNetworkInfo(res._ptr, network.getFunction(), isNewAPI());
|
||||
}
|
||||
} else {
|
||||
res = compile_model_impl(network, plugin, parsed._config, context, {});
|
||||
@ -589,7 +589,7 @@ public:
|
||||
res = compile_model_impl(network, plugin, parsed._config, nullptr, hash, {}, forceDisableCache);
|
||||
} else {
|
||||
// Temporary workaround until all plugins support caching of original model inputs
|
||||
InferenceEngine::SetExeNetworkInfo(res._ptr, network.getFunction());
|
||||
InferenceEngine::SetExeNetworkInfo(res._ptr, network.getFunction(), isNewAPI());
|
||||
}
|
||||
} else {
|
||||
res = compile_model_impl(network, plugin, parsed._config, nullptr, {}, {}, forceDisableCache);
|
||||
|
@ -302,11 +302,32 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
|
||||
const int64_t ir_version = it->second.as<int64_t>();
|
||||
|
||||
if (ir_version == 10 && newAPI) {
|
||||
std::unordered_set<std::string> leaf_names;
|
||||
const auto inputs = function->inputs();
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
const auto ngraph_type = inputs[i].get_element_type();
|
||||
const auto legacy_type = details::toLegacyType(ngraph_type, true);
|
||||
prepost.input(i).tensor().set_element_type(legacy_type);
|
||||
for (const auto& name : inputs[i].get_names()) {
|
||||
OPENVINO_ASSERT(leaf_names.find(name) == leaf_names.end(),
|
||||
"Model tensor names have collisions.",
|
||||
" Please use MO to generate new IR version, it should allow to avoid the issue");
|
||||
leaf_names.insert(name);
|
||||
}
|
||||
}
|
||||
|
||||
const auto outputs = function->outputs();
|
||||
for (size_t i = 0; i < outputs.size(); ++i) {
|
||||
const auto ngraph_type = outputs[i].get_element_type();
|
||||
const auto legacy_type = details::toLegacyType(ngraph_type, false);
|
||||
|
||||
prepost.output(i).tensor().set_element_type(legacy_type);
|
||||
for (const auto& name : outputs[i].get_names()) {
|
||||
OPENVINO_ASSERT(leaf_names.find(name) == leaf_names.end(),
|
||||
"Model tensor names have collisions.",
|
||||
" Please use MO to generate new IR version, it should allow to avoid the issue");
|
||||
leaf_names.insert(name);
|
||||
}
|
||||
}
|
||||
|
||||
// in order to support the following scenarios for IR v10 cases:
|
||||
@ -317,29 +338,28 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
|
||||
// f.reshape({ { "input_operation_name", ov::PartialShape{} } });
|
||||
// we need to add operation names as tensor names for inputs and outputs
|
||||
{
|
||||
std::vector<std::string> result_names;
|
||||
std::vector<ov::Output<ov::Node>> prevPorts;
|
||||
result_names.reserve(function->get_results().size());
|
||||
prevPorts.reserve(function->get_results().size());
|
||||
|
||||
for (const auto& result : function->get_results()) {
|
||||
result_names.emplace_back(ngraph::op::util::create_ie_output_name(result->input_value(0)));
|
||||
result->output(0).get_tensor().add_names({result_names.back()});
|
||||
prevPorts.emplace_back(result->input_value(0));
|
||||
auto res_name = ngraph::op::util::create_ie_output_name(result->input_value(0));
|
||||
OPENVINO_ASSERT(
|
||||
leaf_names.find(res_name) == leaf_names.end() ||
|
||||
result->output(0).get_names().find(res_name) != result->output(0).get_names().end(),
|
||||
"Model operation names have collisions with tensor names.",
|
||||
" Please use MO to generate new IR version, it should allow to avoid the issue");
|
||||
leaf_names.insert(res_name);
|
||||
result->output(0).get_tensor().add_names({res_name});
|
||||
}
|
||||
for (const auto& param : function->get_parameters()) {
|
||||
param->output(0).get_tensor().add_names({param->get_friendly_name()});
|
||||
auto param_name = param->get_friendly_name();
|
||||
OPENVINO_ASSERT(
|
||||
leaf_names.find(param_name) == leaf_names.end() ||
|
||||
param->output(0).get_names().find(param_name) != param->output(0).get_names().end(),
|
||||
"Model operation names have collisions with tensor names.",
|
||||
" Please use MO to generate new IR version, it should allow to avoid the issue");
|
||||
leaf_names.insert(param_name);
|
||||
param->output(0).get_tensor().add_names({param_name});
|
||||
}
|
||||
}
|
||||
|
||||
const auto outputs = function->outputs();
|
||||
for (size_t i = 0; i < outputs.size(); ++i) {
|
||||
const auto ngraph_type = outputs[i].get_element_type();
|
||||
const auto legacy_type = details::toLegacyType(ngraph_type, false);
|
||||
|
||||
prepost.output(i).tensor().set_element_type(legacy_type);
|
||||
}
|
||||
|
||||
function = prepost.build();
|
||||
|
||||
// Set version to 10
|
||||
|
@ -202,6 +202,96 @@ TEST_F(RTInfoDeserialization, NodeV10) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(RTInfoDeserialization, NamesCollisionV10) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="10">
|
||||
<layers>
|
||||
<layer name="in1" type="Parameter" id="0" version="opset8">
|
||||
<data element_type="f16" shape="1,3,22,22"/>
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="in1"/>
|
||||
<attribute name="old_api_map_order" version="0" value="0,2,3,1" />
|
||||
<attribute name="old_api_map_element_type" version="0" value="f16"/>
|
||||
</rt_info>
|
||||
<output>
|
||||
<port id="0" precision="FP16" names="input_tensor">
|
||||
<rt_info>
|
||||
<attribute name="layout" version="0" layout="[N,C,H,W]"/>
|
||||
</rt_info>
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="input_tensor" id="1" type="Round" version="opset8">
|
||||
<data mode="half_to_even"/>
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="Round1,Round2"/>
|
||||
</rt_info>
|
||||
<input>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16" names="output_tensor">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="2" version="opset8">
|
||||
<input>
|
||||
<port id="0" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
|
||||
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
auto f = getWithIRFrontend(model);
|
||||
ASSERT_NE(nullptr, f);
|
||||
|
||||
auto check_version = [](const std::shared_ptr<ov::Model>& f, int version_ref) {
|
||||
auto& rt_info = f->get_rt_info();
|
||||
ASSERT_TRUE(rt_info.count("version"));
|
||||
ASSERT_TRUE(rt_info.at("version").is<int64_t>());
|
||||
ASSERT_EQ(rt_info.at("version").as<int64_t>(), version_ref);
|
||||
};
|
||||
check_version(f, 10);
|
||||
|
||||
// read IR v10 with old API
|
||||
{
|
||||
InferenceEngine::Core core;
|
||||
auto f_10 = core.ReadNetwork(model, InferenceEngine::Blob::CPtr());
|
||||
ASSERT_NE(nullptr, f_10.getFunction());
|
||||
|
||||
auto res = compare_functions(f, f_10.getFunction());
|
||||
EXPECT_TRUE(res.first) << res.second;
|
||||
}
|
||||
|
||||
// read IR v10 with new API and check that CNNNetwork precision conversions are applied
|
||||
{
|
||||
ov::runtime::Core core;
|
||||
EXPECT_THROW(core.read_model(model, ov::runtime::Tensor()), ov::Exception);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(RTInfoDeserialization, InputAndOutputV10) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="10">
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "base/behavior_test_utils.hpp"
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
#include "common_test_utils/file_utils.hpp"
|
||||
#include "openvino/core/model.hpp"
|
||||
|
||||
namespace BehaviorTestsDefinitions {
|
||||
class ExecutableNetworkBaseTest : public testing::WithParamInterface<BehaviorTestsUtils::InferRequestParams>,
|
||||
@ -316,4 +317,50 @@ TEST_P(ExecNetSetPrecision, canSetOutputPrecisionForNetwork) {
|
||||
outputs_info.begin()->second->setPrecision(netPrecision);
|
||||
ASSERT_NO_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration));
|
||||
}
|
||||
TEST_P(ExecutableNetworkBaseTest, loadIncorrectV10Model) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
ov::runtime::CompiledModel execNet;
|
||||
|
||||
// Create simple function
|
||||
{
|
||||
auto param1 = std::make_shared<ov::opset8::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 3, 24, 24}));
|
||||
param1->set_friendly_name("param1");
|
||||
param1->output(0).get_tensor().set_names({"data1"});
|
||||
auto relu = std::make_shared<ov::opset8::Relu>(param1);
|
||||
relu->set_friendly_name("data1");
|
||||
relu->output(0).get_tensor().set_names({"relu"});
|
||||
auto result = std::make_shared<ov::opset8::Result>(relu);
|
||||
result->set_friendly_name("result");
|
||||
function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param1});
|
||||
function->get_rt_info()["version"] = int64_t(10);
|
||||
function->set_friendly_name("SimpleReLU");
|
||||
}
|
||||
InferenceEngine::CNNNetwork cnnNet(function);
|
||||
EXPECT_NO_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration));
|
||||
}
|
||||
|
||||
TEST_P(ExecutableNetworkBaseTest, loadIncorrectV11Model) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
ov::runtime::CompiledModel execNet;
|
||||
|
||||
// Create simple function
|
||||
{
|
||||
auto param1 = std::make_shared<ov::opset8::Parameter>(ov::element::Type_t::f32, ov::Shape({1, 3, 24, 24}));
|
||||
param1->set_friendly_name("param1");
|
||||
param1->output(0).get_tensor().set_names({"data1"});
|
||||
auto relu = std::make_shared<ov::opset8::Relu>(param1);
|
||||
relu->set_friendly_name("data1");
|
||||
relu->output(0).get_tensor().set_names({"relu"});
|
||||
auto result = std::make_shared<ov::opset8::Result>(relu);
|
||||
result->set_friendly_name("result");
|
||||
function = std::make_shared<ov::Model>(ov::ResultVector{result}, ov::ParameterVector{param1});
|
||||
function->get_rt_info()["version"] = int64_t(11);
|
||||
function->set_friendly_name("SimpleReLU");
|
||||
}
|
||||
InferenceEngine::CNNNetwork cnnNet(function);
|
||||
EXPECT_NO_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration));
|
||||
}
|
||||
|
||||
} // namespace BehaviorTestsDefinitions
|
@ -236,7 +236,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) {
|
||||
std::shared_ptr<InferenceEngine::Core> ie = ::PluginCache::get().ie();
|
||||
InferenceEngine::ExecutableNetwork execNet;
|
||||
|
||||
// Create simple function
|
||||
// Create simple function
|
||||
{
|
||||
auto param1 = std::make_shared<ov::opset8::Parameter>(elementType, ngraph::Shape({1, 3, 24, 24}));
|
||||
param1->set_friendly_name("param1");
|
||||
|
@ -653,6 +653,51 @@ TEST_P(OVExecutableNetworkBaseTest, getCompiledModelFromInferRequest) {
|
||||
ASSERT_NO_THROW(another_req.infer());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(OVExecutableNetworkBaseTest, loadIncorrectV10Model) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
ov::runtime::CompiledModel execNet;
|
||||
|
||||
// Create simple function
|
||||
{
|
||||
auto param1 = std::make_shared<ov::opset8::Parameter>(element::Type_t::f32, ngraph::Shape({1, 3, 24, 24}));
|
||||
param1->set_friendly_name("param1");
|
||||
param1->output(0).get_tensor().set_names({"data1"});
|
||||
auto relu = std::make_shared<ov::opset8::Relu>(param1);
|
||||
relu->set_friendly_name("data1");
|
||||
relu->output(0).get_tensor().set_names({"relu"});
|
||||
auto result = std::make_shared<ov::opset8::Result>(relu);
|
||||
result->set_friendly_name("result");
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param1});
|
||||
function->get_rt_info()["version"] = int64_t(10);
|
||||
function->set_friendly_name("SimpleReLU");
|
||||
}
|
||||
EXPECT_THROW(core->compile_model(function, targetDevice, configuration), ov::Exception);
|
||||
}
|
||||
|
||||
TEST_P(OVExecutableNetworkBaseTest, loadIncorrectV11Model) {
|
||||
// Skip test according to plugin specific disabledTestPatterns() (if any)
|
||||
SKIP_IF_CURRENT_TEST_IS_DISABLED()
|
||||
ov::runtime::CompiledModel execNet;
|
||||
|
||||
// Create simple function
|
||||
{
|
||||
auto param1 = std::make_shared<ov::opset8::Parameter>(element::Type_t::f32, ngraph::Shape({1, 3, 24, 24}));
|
||||
param1->set_friendly_name("param1");
|
||||
param1->output(0).get_tensor().set_names({"data1"});
|
||||
auto relu = std::make_shared<ov::opset8::Relu>(param1);
|
||||
relu->set_friendly_name("data1");
|
||||
relu->output(0).get_tensor().set_names({"relu"});
|
||||
auto result = std::make_shared<ov::opset8::Result>(relu);
|
||||
result->set_friendly_name("result");
|
||||
function = std::make_shared<ngraph::Function>(ngraph::ResultVector{result}, ngraph::ParameterVector{param1});
|
||||
function->get_rt_info()["version"] = int64_t(11);
|
||||
function->set_friendly_name("SimpleReLU");
|
||||
}
|
||||
EXPECT_NO_THROW(core->compile_model(function, targetDevice, configuration));
|
||||
}
|
||||
|
||||
} // namespace behavior
|
||||
} // namespace test
|
||||
} // namespace ov
|
||||
|
Loading…
Reference in New Issue
Block a user