Allow duplicated outputs in ie_plugin_internal (#9882)

* Avoid duplicated outputs with the same name

* Revert onnx graph changes

* Allow output duplicates in ie_plugin_internal check

* Add test with onnx model

* Check get_tensor_ptr instead of any_name

* More outputs test

* Refactor to use std::transform

* test manifest update

* Remove redundant header

* INTERPRETER segfaults fix for duplicated output names

* Simplify duplication assert

* Update test names

* Test update
This commit is contained in:
Katarzyna Mitrus 2022-01-29 00:58:09 +01:00 committed by GitHub
parent 9cb6626ffd
commit e3ec1ac9b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 267 additions and 10 deletions

View File

@ -72,20 +72,20 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
}
// map function params -> HostTensor
unordered_map<descriptor::Tensor*, shared_ptr<HostTensor>> tensor_map;
std::unordered_map<std::shared_ptr<ov::descriptor::Tensor>, shared_ptr<HostTensor>> tensor_map;
size_t input_count = 0;
for (const auto& param : get_parameters()) {
for (size_t i = 0; i < param->get_output_size(); ++i) {
descriptor::Tensor* tensor = &param->output(i).get_tensor();
auto tensor = param->output(i).get_tensor_ptr();
tensor_map.insert({tensor, func_inputs[input_count++]});
}
}
std::unordered_map<std::shared_ptr<ngraph::Node>, size_t> results_map;
std::unordered_map<std::shared_ptr<ov::descriptor::Tensor>, size_t> results_map;
// map function outputs -> HostTensor
for (size_t output_count = 0; output_count < get_results().size(); ++output_count) {
auto output = get_results()[output_count];
results_map[output] = output_count;
auto output = get_results()[output_count]->output(0).get_tensor_ptr();
results_map.emplace(output, results_map.size());
}
EvaluationContext eval_context;
@ -101,7 +101,7 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
// get op inputs from map
vector<shared_ptr<HostTensor>> op_inputs;
for (auto input : op->inputs()) {
descriptor::Tensor* tensor = &input.get_tensor();
auto tensor = input.get_tensor_ptr();
op_inputs.push_back(tensor_map.at(tensor));
}
@ -115,11 +115,11 @@ bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::
// get op outputs from map or create
vector<shared_ptr<HostTensor>> op_outputs;
for (size_t i = 0; i < op->get_output_size(); ++i) {
descriptor::Tensor* tensor = &op->output(i).get_tensor();
auto tensor = op->output(i).get_tensor_ptr();
shared_ptr<HostTensor> host_tensor;
auto it = tensor_map.find(tensor);
if (op::is_output(op)) {
host_tensor = func_outputs[results_map[op]];
host_tensor = func_outputs[results_map[tensor]];
} else if (it == tensor_map.end()) {
// Use cloned_node to create HostTensor with static dimensions
host_tensor = make_shared<HostTensor>(cloned_node->output(i));

View File

@ -112,10 +112,12 @@ void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<con
// Generate backend specific blob mappings. For example Inference Engine uses not ngraph::Result nodes friendly name
// as inference request output names but the name of the layer before.
size_t idx = 0;
for (auto&& result : _function->get_results()) {
const auto& input = result->input_value(0);
auto name = ngraph::op::util::get_ie_output_name(input);
_outputIndex.emplace(name, _function->get_result_index(result));
if (_outputIndex.emplace(name, idx).second)
idx++;
}
for (auto&& parameter : _function->get_parameters()) {
_inputIndex.emplace(parameter->get_friendly_name(), _function->get_parameter_index(parameter));

View File

@ -0,0 +1,124 @@
ir_version: 3
producer_name: "onnx tests"
graph {
node {
input: "A"
input: "B"
output: "X"
name: "add_node1"
op_type: "Add"
}
node {
input: "X"
input: "C"
output: "K"
name: "add_node2"
op_type: "Add"
}
node {
input: "K"
output: "T0"
output: "T1"
op_type: "Split"
attribute {
name: "axis"
i: 0
type: INT
}
}
name: "test_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
}
}
}
}
input {
name: "B"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "C"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "T0"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "T0"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "T1"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "T0"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 4
}

View File

@ -0,0 +1,87 @@
ir_version: 3
producer_name: "onnx tests"
graph {
node {
input: "A"
input: "B"
output: "X"
name: "add_node1"
op_type: "Add"
}
node {
input: "X"
input: "C"
output: "T0"
name: "add_node2"
op_type: "Add"
}
name: "test_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "B"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "C"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "T0"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "T0"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 4
}

View File

@ -99,6 +99,32 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_node_names_check) {
EXPECT_EQ(additions.at(1)->get_output_tensor(0).get_names(), std::unordered_set<std::string>{"Y"});
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_duplicated_output_name) {
auto function =
onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/duplicated_output_name.onnx"));
EXPECT_EQ(function->get_output_size(), 2);
auto test_case = test::TestCase(function, s_device);
test_case.add_multiple_inputs(Inputs{{1}, {2}, {3}});
test_case.add_expected_output(Shape{1}, std::vector<float>{6});
test_case.add_expected_output(Shape{1}, std::vector<float>{6});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_duplicated_more_output_names) {
auto function =
onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/duplicated_more_output_names.onnx"));
EXPECT_EQ(function->get_output_size(), 4);
auto test_case = test::TestCase(function, s_device);
test_case.add_multiple_inputs(Inputs{{1, 2}, {2}, {3}});
test_case.add_expected_output(Shape{1}, std::vector<float>{6});
test_case.add_expected_output(Shape{1}, std::vector<float>{6});
test_case.add_expected_output(Shape{1}, std::vector<float>{7});
test_case.add_expected_output(Shape{1}, std::vector<float>{6});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_binary_add_abc) {
auto function = onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/add_abc.onnx"));

View File

@ -323,7 +323,25 @@ void SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNet
const auto& inputsInfo = exeNetwork->GetInputsInfo();
const auto& outputsInfo = exeNetwork->GetOutputsInfo();
OPENVINO_ASSERT(inputsInfo.size() == function->get_parameters().size());
OPENVINO_ASSERT(outputsInfo.size() == function->get_output_size());
if (outputsInfo.size() != function->get_output_size()) {
const auto& outputs = function->outputs();
std::unordered_set<std::shared_ptr<ov::descriptor::Tensor>> output_tensors;
std::transform(outputs.cbegin(),
outputs.cend(),
std::inserter(output_tensors, output_tensors.begin()),
[](const ov::Output<const ov::Node>& out) {
return out.get_tensor_ptr();
});
OPENVINO_ASSERT(outputsInfo.size() == output_tensors.size(),
"outputsInfo.size() is: ",
outputsInfo.size(),
", and function->get_output_size() is: ",
function->get_output_size(),
". Number of duplicated outputs: ",
outputs.size() - output_tensors.size());
}
for (const auto& param : function->get_parameters()) {
const auto& param_name = param->get_friendly_name();