Fixed output names for case with redundant ops before result (#2209)

This commit is contained in:
Ilya Churaev 2020-09-15 14:00:27 +03:00 committed by GitHub
parent 9b7e22f49a
commit 7bba2a9542
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 128 additions and 0 deletions

View File

@ -17,6 +17,10 @@
#include <ie_parameter.hpp>
#include <ie_core.hpp>
#include <legacy/net_pass.h>
#include <generic_ie.hpp>
#include <legacy/convert_function_to_cnn_network.hpp>
#include <transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.hpp>
#include <ngraph/pass/manager.hpp>
#include <ngraph/opsets/opset3.hpp>
#include <ngraph/function.hpp>
@ -955,4 +959,126 @@ TEST(CNNNGraphImplTests, AddOutputToExperimentalOp) {
auto outputs = network.getOutputsInfo();
ASSERT_NE(outputs.find("exp.0"), outputs.end());
}
TEST(CNNNGraphImplTests, SaveOriginalResultNameForMultiOutputOp) {
std::string model = R"V0G0N(
<net name="Activation" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data shape="1,3,22,22" element_type="f32"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="exp" id="1" type="ExperimentalDetectronROIFeatureExtractor" version="experimental">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer id="2" name="fake_const" type="Const" version="opset1">
<data offset="0" size="4" shape="1,1,1,1" element_type="f32"/>
<output>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</output>
</layer>
<layer id="3" name="text_features" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="4" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="3" to-port="0"/>
<edge from-layer="2" from-port="1" to-layer="3" to-port="1"/>
<edge from-layer="3" from-port="2" to-layer="4" to-port="0"/>
</edges>
</net>
)V0G0N";
InferenceEngine::Core core;
Blob::Ptr data = make_shared_blob<float>(TensorDesc(Precision::FP32, {4}, Layout::C));
data->allocate();
{
auto lockData = data->buffer();
float *dataPtr = lockData.as<float*>();
for (size_t i = 0; i < 4; ++i) {
dataPtr[i] = 0;
}
}
CNNNetwork network = core.ReadNetwork(model, data);
{
auto outputs = network.getOutputsInfo();
ASSERT_NE(outputs.find("text_features"), outputs.end());
}
auto nGraphFunc = network.getFunction();
// Disable shape inference (WA for generic operations)
ngraph::op::GenericIE::DisableReshape noReshape(nGraphFunc);
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::ConvertOpSet1ToLegacy>();
manager.run_passes(nGraphFunc);
auto clonedNetwork = InferenceEngine::details::convertFunctionToICNNNetwork(nGraphFunc, network);
{
OutputsDataMap outputs;
clonedNetwork->getOutputsInfo(outputs);
ASSERT_NE(outputs.find("text_features"), outputs.end());
}
}
IE_SUPPRESS_DEPRECATED_END

View File

@ -902,6 +902,8 @@ bool ngraph::replace_output_update_name(Output<Node> output, const Output<Node>&
if (has_result_output && !is_type<ngraph::op::Parameter>(replacement.get_node()))
{
replacement.get_node()->set_friendly_name(output.get_node()->get_friendly_name());
// Update output tensor name
replacement.get_tensor().set_name(output.get_node()->get_friendly_name());
}
output.replace(replacement);
copy_runtime_info({replacement.get_node_shared_ptr(), output.get_node_shared_ptr()},