Fix python segment fault issue of plugin api 2.0 (#18325)

* Fix python segmentfault issue of plugin api 2.0

* Fix onnx_duplicated_output_name due to empty tensor

Co-authored-by: Bell, Song <bell.song@intel.com>

* Remove redundant code

* Keep rt_info to fix test failure in case of legacy public api

* Not to set_name for new port

---------

Co-authored-by: Bell, Song <bell.song@intel.com>
This commit is contained in:
River Li 2023-07-05 15:54:13 +08:00 committed by GitHub
parent 71306e3b38
commit 1abf2a01d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 29 additions and 16 deletions

View File

@ -48,34 +48,47 @@ ov::ICompiledModel::ICompiledModel(const std::shared_ptr<const ov::Model>& model
}
}
if (add_operation_names) {
for (const auto& param : model->get_parameters()) {
const auto& param_name = param->get_friendly_name();
for (const auto& param : model->get_parameters()) {
const auto& param_name = param->get_friendly_name();
auto new_param = ov::as_type_ptr<ov::op::v0::Parameter>(param->copy_with_new_inputs({}));
new_param->set_friendly_name(param_name);
if (add_operation_names) {
OPENVINO_ASSERT(!m_plugin->is_new_api() || leaf_names.find(param_name) == leaf_names.end() ||
param->output(0).get_names().find(param_name) != param->output(0).get_names().end(),
"Model operation names have collisions with tensor names.",
" Please use MO to generate new IR version, it should allow to avoid the issue");
leaf_names.insert(param_name);
param->output(0).get_tensor().add_names({param_name});
m_inputs.emplace_back(
ov::Output<const ov::Node>{param->output(0).get_node(), param->output(0).get_index()});
new_param->output(0).get_tensor().add_names({param_name});
}
for (const auto& result : model->get_results()) {
auto fake_param = std::make_shared<ov::op::v0::Parameter>(result->get_output_element_type(0),
result->get_output_partial_shape(0));
const std::string res_name = ov::op::util::create_ie_output_name(result->input_value(0));
new_param->set_element_type(param->get_element_type());
new_param->set_layout(param->get_layout());
new_param->output(0).get_rt_info() = param->output(0).get_rt_info();
new_param->validate_and_infer_types();
m_inputs.emplace_back(new_param->output(0));
}
for (const auto& result : model->get_results()) {
auto fake_param = std::make_shared<ov::op::v0::Parameter>(result->get_output_element_type(0),
result->get_output_partial_shape(0));
const std::string res_name = ov::op::util::create_ie_output_name(result->input_value(0));
fake_param->set_friendly_name(res_name);
fake_param->set_element_type(result->get_element_type());
fake_param->validate_and_infer_types();
auto new_result = result->copy_with_new_inputs({fake_param});
new_result->set_friendly_name(result->get_friendly_name());
if (add_operation_names) {
OPENVINO_ASSERT(!m_plugin->is_new_api() || leaf_names.find(res_name) == leaf_names.end() ||
result->output(0).get_names().find(res_name) != result->output(0).get_names().end(),
"Model operation names have collisions with tensor names.",
" Please use MO to generate new IR version, it should allow to avoid the issue");
leaf_names.insert(res_name);
result->output(0).get_tensor().add_names({res_name});
m_outputs.emplace_back(
ov::Output<const ov::Node>{result->output(0).get_node(), result->output(0).get_index()});
new_result->output(0).get_tensor().add_names({res_name});
}
} else {
m_inputs = model->inputs();
m_outputs = model->outputs();
auto r = std::dynamic_pointer_cast<ov::op::v0::Result>(new_result);
r->set_layout(result->get_layout());
new_result->output(0).get_rt_info() = result->output(0).get_rt_info();
m_outputs.emplace_back(new_result->output(0));
}
}
}

View File

@ -263,10 +263,10 @@ void ov::ISyncInferRequest::allocate_tensor(const ov::Output<const ov::Node>& po
void ov::ISyncInferRequest::check_tensors() const {
const auto& inputs = m_compiled_model->inputs();
for (size_t i = 0; i < inputs.size(); i++) {
check_tensor(inputs[i], m_tensors.at(inputs[i].get_tensor_ptr()));
check_tensor(inputs[i], get_ref_tensor(inputs[i]));
}
const auto& outputs = m_compiled_model->outputs();
for (size_t i = 0; i < outputs.size(); i++) {
check_tensor(outputs[i], m_tensors.at(outputs[i].get_tensor_ptr()));
check_tensor(outputs[i], get_ref_tensor(outputs[i]));
}
}