Introduce the Broker API to map original framework names to OV (#3800)

* Added tests

* Fixed tests

* Added tests to check addOutput method

* Added support of port names in the IR

* Update copyrights

* Deprecate tensor name

* Fixed comments

* Enabled functional tests for GPU, GNA and Myriad

* Fixed get_tensor().get_names()

* Added unit test to check tensor names

* Fixed code style

* Skip add output test for GNA

* Added serialization support

* Added PythonAPI

* Fixed tests

* Fixed tests

* Fixed typo

* Try to disable GNA test

* Fixed tests

* Removed unused variables

* Fixed tests

* Update documentation

* Fixed comment
This commit is contained in:
Ilya Churaev 2021-01-29 06:28:31 +03:00 committed by GitHub
parent b0f5a339e1
commit 2ebae7cf30
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
39 changed files with 788 additions and 31 deletions

View File

@ -1439,6 +1439,14 @@ cdef class IENetwork:
def _get_function_capsule(self):
return self.impl.getFunction()
def get_ov_name_for_tensor(self, orig_name: str):
name = bytes(orig_name, 'utf-8')
return self.impl.getOVNameForTensor(name).decode('utf-8')
def get_ov_name_for_operation(self, orig_name: str):
name = bytes(orig_name, 'utf-8')
return self.impl.getOVNameForOperation(name).decode('utf-8')
cdef class BlobBuffer:
"""Copy-less accessor for Inference Engine Blob"""

View File

@ -260,6 +260,14 @@ const std::map <std::string, InferenceEngine::DataPtr> InferenceEnginePython::IE
return outputs;
}
std::string InferenceEnginePython::IENetwork::getOVNameForTensor(const std::string& orig_name) {
return actual->getOVNameForTensor(orig_name);
}
std::string InferenceEnginePython::IENetwork::getOVNameForOperation(const std::string& orig_name) {
return actual->getOVNameForOperation(orig_name);
}
void
InferenceEnginePython::IENetwork::addOutput(const std::string &out_layer, size_t port_id) {
actual->addOutput(out_layer, port_id);

View File

@ -71,6 +71,9 @@ struct IENetwork {
IENetwork() = default;
void convertToOldRepresentation();
std::string getOVNameForTensor(const std::string& orig_name);
std::string getOVNameForOperation(const std::string& orig_name);
};

View File

@ -175,6 +175,8 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
void load_from_buffer(const char*xml, size_t xml_size, uint8_t*bin, size_t bin_size) except +
object getFunction() except +
void convertToOldRepresentation() except +
string getOVNameForTensor(const string &) except +
string getOVNameForOperation(const string &) except +
cdef cppclass InferRequestWrap:
double exec_time;

View File

@ -247,3 +247,61 @@ def test_multi_out_data():
assert net.outputs["28/Reshape"].name == "28/Reshape" and net.outputs["28/Reshape"].shape == [1, 5184]
assert net.outputs["fc_out"].name == "fc_out" and net.outputs["fc_out"].shape == [1, 10]
pass
def test_tensor_names():
model = """
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32" names="input">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="relu_t, identity_t">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
"""
ie = IECore()
weights = b''
net = ie.read_network(model=model.encode('utf-8'), weights=weights, init_from_buffer=True)
assert net.get_ov_name_for_tensor("relu_t") == "activation"
assert net.get_ov_name_for_tensor("identity_t") == "activation"
assert net.get_ov_name_for_tensor("input") == "in1"
assert net.get_ov_name_for_operation("output") == "activation"

View File

@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -189,6 +189,32 @@ public:
*/
void serialize(const std::string& xmlPath, const std::string& binPath = {}) const;
/**
* @brief Method maps framework tensor name to OpenVINO name
*
* @param orig_name Framework tensor name
*
* @return OpenVINO name
*/
std::string getOVNameForTensor(const std::string& orig_name) const {
std::string ov_name;
CALL_STATUS_FNC(getOVNameForTensor, ov_name, orig_name);
return ov_name;
}
/**
* @brief Method maps framework operator name to OpenVINO name
*
* @param orig_name Framework operation name
*
* @return OpenVINO name
*/
std::string getOVNameForOperation(const std::string& orig_name) const {
std::string ov_name;
CALL_STATUS_FNC(getOVNameForOperation, ov_name, orig_name);
return ov_name;
}
protected:
IE_SUPPRESS_DEPRECATED_START
/**

View File

@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -69,9 +69,11 @@ public:
*
* For single and multiple outputs networks.
*
* This method need to be called to find output names for using them later
* This method need to be called to find out OpenVINO output names for using them later
* when calling InferenceEngine::InferRequest::GetBlob or InferenceEngine::InferRequest::SetBlob
*
* If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor or
* InferenceEngine::ICNNNetwork::getOVNameForOperation methods to map framework names to OpenVINO names
*
* @param out Reference to the OutputsDataMap object
*/
@ -82,9 +84,12 @@ public:
* object.
*
* For single and multiple inputs networks.
* This method need to be called to find out input names for using them later
* This method need to be called to find out OpenVINO input names for using them later
* when calling InferenceEngine::InferRequest::SetBlob
*
* If you want to use framework names, you can use InferenceEngine::ICNNNetwork::getOVNameForTensor or
* InferenceEngine::ICNNNetwork::getOVNameForOperation methods to map framework names to OpenVINO names
*
* @param inputs Reference to InputsDataMap object.
*/
virtual void getInputsInfo(InputsDataMap& inputs) const noexcept = 0;
@ -179,6 +184,38 @@ public:
virtual StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
noexcept = 0;
/**
* @brief Methods maps framework tensor name to OpenVINO name
*
* @param ov_name OpenVINO name
* @param orig_name Framework tensor name
* @param resp Pointer to the response message that holds a description of an error if any occurred
*
* @return Status code of the operation
*/
virtual StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
(void) ov_name;
(void) orig_name;
(void) resp;
return NOT_IMPLEMENTED;
}
/**
* @brief Methods maps framework operation name to OpenVINO name
*
* @param ov_name OpenVINO name
* @param orig_name Framework operation name
* @param resp Pointer to the response message that holds a description of an error if any occurred
*
* @return Status code of the operation
*/
virtual StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
(void) ov_name;
(void) orig_name;
(void) resp;
return NOT_IMPLEMENTED;
}
/**
* @brief A virtual destructor.
*/

View File

@ -18,7 +18,9 @@ void CreateResultOp(Program& p, const std::shared_ptr<ngraph::op::v0::Result>& o
p.ValidateInputs(op, {1});
auto prev = op->get_input_node_shared_ptr(0);
NGRAPH_SUPPRESS_DEPRECATED_START
auto inputID = op->get_input_source_output(0).get_tensor().get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
if (inputID.empty()) {
inputID = prev->get_friendly_name();
if (prev->get_output_size() > 1) {

View File

@ -24,6 +24,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
for (size_t i = 0; i < op->get_output_size(); i++) {
std::string outLayerName = layerName + (is_single_out_split ? "" : "." + std::to_string(i));
const auto outLayerDims = op->get_output_shape(i);
NGRAPH_SUPPRESS_DEPRECATED_START
if (outLayerDims.size() != startOffset.size()) {
THROW_IE_EXCEPTION << "Invalid dimesions in split layer: " << op->get_friendly_name()
<< " output: " << op->get_output_tensor_name(i);
@ -34,6 +35,7 @@ void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>& op) {
<< " output: " << op->get_output_tensor_name(i);
}
}
NGRAPH_SUPPRESS_DEPRECATED_END
auto outTensor = CldnnTensorFromIEDims(outLayerDims, 1);
auto offsetTensor = CldnnTensorFromIEDims(startOffset, 0);

View File

@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -122,6 +122,12 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(
std::string outName = layer->get_friendly_name();
IE_ASSERT(layer->get_output_size() == 1); // Parameter as only singly output port
// map original names to OpenVINO name
_opNames[outName] = outName;
for (const auto& name : layer->get_output_tensor(0).get_names()) {
_tensorNames[name] = outName;
}
DataPtr& ptr = _data[outName];
IE_ASSERT(ptr); // Data must be allocated after the reshape method
@ -139,7 +145,10 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(
}
CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) {
if (network.getFunction() == nullptr) {
IE_SUPPRESS_DEPRECATED_START
const ICNNNetwork& iNetwork = network;
const auto net = dynamic_cast<const CNNNetworkNGraphImpl*>(&iNetwork);
if (network.getFunction() == nullptr || !net) {
THROW_IE_EXCEPTION << "Cannot create CNNNetwork with nGraph from legacy network format!";
}
@ -147,6 +156,9 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) {
InputsDataMap inputs = network.getInputsInfo();
OutputsDataMap outputs = network.getOutputsInfo();
_opNames = net->_opNames;
_tensorNames = net->_tensorNames;
for (const auto& outputInfo : outputs) {
const auto& name = outputInfo.second->getName();
DataPtr output = std::make_shared<Data>(name, outputInfo.second->getTensorDesc());
@ -164,6 +176,7 @@ CNNNetworkNGraphImpl::CNNNetworkNGraphImpl(const CNNNetwork& network) {
info->setLayout(inputInfo.second->getLayout());
_inputData[name] = info;
}
IE_SUPPRESS_DEPRECATED_END
}
void CNNNetworkNGraphImpl::setInputInfo(InputInfo::Ptr data) {
@ -204,19 +217,22 @@ StatusCode CNNNetworkNGraphImpl::addOutput(const std::string& layerName, size_t
try {
for (const auto & layer : _ngraph_function->get_ops()) {
if (layer->get_friendly_name() == layerName) {
// Result can have the same name as previous operation
if (layer->get_friendly_name() == layerName && !std::dynamic_pointer_cast<ngraph::op::Result>(layer)) {
std::string outputName = layerName;
if (layer->outputs().size() != 1) {
outputName += "." + std::to_string(outputIndex);
}
// Check that we don't have a result for the output port
for (const auto& port : layer->output(outputIndex).get_target_inputs()) {
if (dynamic_cast<ngraph::op::Result*>(port.get_node()))
return OK;
}
auto result = make_shared<::ngraph::op::Result>(layer->output(outputIndex));
result->set_friendly_name(outputName);
_ngraph_function->add_results({result});
std::string outputName = layerName;
if (layer->outputs().size() != 1) {
outputName += "." + std::to_string(outputIndex);
}
if (_outputData.count(outputName) == 0) {
reshape();
}
@ -237,6 +253,17 @@ void CNNNetworkNGraphImpl::addOutput(const ::ngraph::Output<::ngraph::Node> & ou
createDataForResult(output, dataName, data);
_data[dataName] = data;
_outputData[dataName] = data;
// Save original framework names
for (const auto& name : output.get_tensor().get_names()) {
_tensorNames[name] = dataName;
}
for (const auto consumerInput : output.get_target_inputs()) {
const auto &consumerLayer = consumerInput.get_node()->shared_from_this();
if (std::dynamic_pointer_cast<ngraph::op::Result>(consumerLayer)) {
_opNames[consumerLayer->get_friendly_name()] = dataName;
}
}
}
size_t CNNNetworkNGraphImpl::getBatchSize() const noexcept {
@ -391,7 +418,7 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath,
ResponseDesc* resp) const noexcept {
try {
std::map<std::string, ngraph::OpSet> custom_opsets;
for (auto extension : _ie_extensions) {
for (const auto& extension : _ie_extensions) {
auto opset = extension->getOpSets();
custom_opsets.insert(begin(opset), end(opset));
}
@ -410,6 +437,20 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath,
return OK;
}
StatusCode CNNNetworkNGraphImpl::getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
if (_tensorNames.find(orig_name) == _tensorNames.end())
return DescriptionBuffer(NOT_FOUND, resp) << "Framework tensor with name \"" << orig_name << "\" was not mapped to OpenVINO data!";
ov_name = _tensorNames.at(orig_name);
return OK;
}
StatusCode CNNNetworkNGraphImpl::getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept {
if (_opNames.find(orig_name) == _opNames.end())
return DescriptionBuffer(NOT_FOUND, resp) << "Framework operation with name \"" << orig_name << "\" was not mapped to OpenVINO data!";
ov_name = _opNames.at(orig_name);
return OK;
}
StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept {
try {
if (getBatchSize() == size) return OK;

View File

@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -11,6 +11,7 @@
#include <algorithm>
#include <functional>
#include <unordered_map>
#include <map>
#include <memory>
#include <string>
@ -81,6 +82,10 @@ public:
StatusCode serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const
noexcept override;
StatusCode getOVNameForTensor(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept override;
StatusCode getOVNameForOperation(std::string& ov_name, const std::string& orig_name, ResponseDesc* resp) const noexcept override;
// used by convertFunctionToICNNNetwork from legacy library
std::map<std::string, DataPtr> _data;
protected:
@ -91,6 +96,8 @@ private:
InferenceEngine::InputsDataMap _inputData;
std::map<std::string, DataPtr> _outputData;
const std::vector<IExtensionPtr> _ie_extensions;
std::unordered_map<std::string, std::string> _opNames;
std::unordered_map<std::string, std::string> _tensorNames;
/**
* @brief Create DataPtr for nGraph operation

View File

@ -1,4 +1,4 @@
// Copyright (C) 2018-2020 Intel Corporation
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -1876,7 +1876,9 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
cnnLayer->outData.clear();
continue;
}
NGRAPH_SUPPRESS_DEPRECATED_START
auto outName = layer->output(i).get_tensor().get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
if (outName.empty()) {
outName = ngraph::op::util::create_ie_output_name(layer->output(i));
}
@ -1930,7 +1932,9 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
if (std::dynamic_pointer_cast<::ngraph::op::Result>(layer)) {
IE_ASSERT(layer->get_input_size() == 1);
const auto &input = layer->input_value(0);
NGRAPH_SUPPRESS_DEPRECATED_START
auto name = input.get_tensor().get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
if (!name.empty())
cnnNetworkImpl->addOutput(name);
else

View File

@ -601,7 +601,7 @@ void V10Parser::parsePreProcess(CNNNetwork& network, const pugi::xml_node& root,
}
V10Parser::GenericLayerParams V10Parser::XmlDeserializer::parseGenericParams(const pugi::xml_node& node) {
const auto parsePort = [](const pugi::xml_node& parentNode,
const auto parsePort = [this](const pugi::xml_node& parentNode,
const GenericLayerParams& params,
bool input) -> GenericLayerParams::LayerPortData {
GenericLayerParams::LayerPortData port;
@ -626,6 +626,12 @@ V10Parser::GenericLayerParams V10Parser::XmlDeserializer::parseGenericParams(con
type = InferenceEngine::details::convertPrecision(preStr);
}
port.precision = type;
std::vector<std::string> names;
if (getParameters<std::string>(parentNode, "names", names)) {
for (const auto& name : names) {
port.names.emplace(name);
}
}
return port;
};
GenericLayerParams params;
@ -823,6 +829,10 @@ std::shared_ptr<ngraph::Node> V10Parser::XmlDeserializer::createNode(
}
ngraphNode->set_friendly_name(params.name);
for (size_t i = 0; i < params.outputPorts.size() && i < ngraphNode->get_output_size(); ++i) {
if (!params.outputPorts[i].names.empty())
ngraphNode->get_output_tensor(i).set_names(params.outputPorts[i].names);
}
return ngraphNode;
}

View File

@ -69,6 +69,7 @@ private:
// Precision and dimensions are needed only for GenericIE op
ngraph::element::Type_t precision;
SizeVector dims;
std::unordered_set<std::string> names;
};
size_t layerId;
std::string version;
@ -355,4 +356,4 @@ private:
#endif // IR_READER_V10
} // namespace InferenceEngine
} // namespace InferenceEngine

View File

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -129,8 +129,10 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptr<ngraph:
copy_runtime_info(ti, concat);
// set output name to Tensor to store it for ngraph to cnn conversion
NGRAPH_SUPPRESS_DEPRECATED_START
concat->output(0).get_tensor().set_name(
op::util::create_ie_output_name(ti->output(concat_desc->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
// connect the Concat layer to the corresponding TI outputs
for (auto &input : ti->output(concat_desc->m_output_index).get_target_inputs()) {
input.replace_source_output(concat);
@ -140,7 +142,9 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptr<ngraph:
std::shared_ptr<opset4::Result> result = body_functions[0]->get_results().at(concat_desc->m_body_value_index);
const auto& input_to_res = result->get_input_source_output(0);
// set output name to Tensor to store it for ngraph to cnn conversion
NGRAPH_SUPPRESS_DEPRECATED_START
input_to_res.get_tensor().set_name(op::util::create_ie_output_name(ti->output(concat_desc->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
for (auto &input : ti->output(concat_desc->m_output_index).get_target_inputs()) {
input.replace_source_output(input_to_res);
}
@ -153,7 +157,9 @@ bool ngraph::pass::UnrollTensorIterator::run_on_function(std::shared_ptr<ngraph:
const auto& in_value = result->input_value(0);
// set output name to Tensor to store it for ngraph to cnn conversion
NGRAPH_SUPPRESS_DEPRECATED_START
in_value.get_tensor().set_name(op::util::create_ie_output_name(ti->output(output_desc->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
for (const auto &input : ti->output(output_desc->m_output_index).get_target_inputs()) {
input.replace_source_output(result->get_input_source_output(0));
}

View File

@ -1,4 +1,4 @@
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
@ -180,7 +180,9 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe
for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) {
input.replace_source_output(outputs[i]->output(0));
}
NGRAPH_SUPPRESS_DEPRECATED_START
outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
}
}
@ -334,7 +336,9 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ
for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) {
input.replace_source_output(outputs[i]->output(0));
}
NGRAPH_SUPPRESS_DEPRECATED_START
outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
}
}
@ -489,7 +493,9 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ
for (const auto &input : ti->output(ordered_out_descs[i]->m_output_index).get_target_inputs()) {
input.replace_source_output(outputs[i]->output(0));
}
NGRAPH_SUPPRESS_DEPRECATED_START
outputs[i]->get_output_tensor(0).set_name(op::util::create_ie_output_name(ti->output(ordered_out_descs[i]->m_output_index)));
NGRAPH_SUPPRESS_DEPRECATED_END
}
}

View File

@ -662,6 +662,15 @@ void ngfunction_2_irv10(pugi::xml_node& netXml,
port.append_attribute("id").set_value(port_id++);
port.append_attribute("precision")
.set_value(get_output_precision_name(o).c_str());
std::string names;
for (const auto& name : o.get_tensor().get_names()) {
if (!names.empty())
names += ", ";
names += name;
}
if (!names.empty()) {
port.append_attribute("names").set_value(names.c_str());
}
for (auto d : o.get_shape()) {
pugi::xml_node dim = port.append_child("dim");
dim.append_child(pugi::xml_node_type::node_pcdata)

View File

@ -0,0 +1,58 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <file_utils.h>
#include <ie_api.h>
#include <ie_iextension.h>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "ie_core.hpp"
#include "ngraph/ngraph.hpp"
#include "transformations/serialize.hpp"
#include <ngraph/opsets/opset6.hpp>
class TensorNameSerializationTest : public CommonTestUtils::TestsCommon {
protected:
std::string test_name = GetTestName() + "_" + GetTimestamp();
std::string m_out_xml_path = test_name + ".xml";
std::string m_out_bin_path = test_name + ".bin";
void TearDown() override {
std::remove(m_out_xml_path.c_str());
std::remove(m_out_bin_path.c_str());
}
};
TEST_F(TensorNameSerializationTest, SerializeFunctionWithTensorNames) {
InferenceEngine::Core ie;
std::shared_ptr<ngraph::Function> function;
{
auto parameter = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10});
parameter->set_friendly_name("parameter");
parameter->get_output_tensor(0).set_names({"input"});
auto relu_prev = std::make_shared<ngraph::opset6::Relu>(parameter);
relu_prev->set_friendly_name("relu_prev");
relu_prev->get_output_tensor(0).set_names({"relu_prev_t", "identity_prev_t"});
auto relu = std::make_shared<ngraph::opset6::Relu>(relu_prev);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
const ngraph::ResultVector results{std::make_shared<ngraph::opset6::Result>(relu)};
results[0]->set_friendly_name("out");
ngraph::ParameterVector params{parameter};
function = std::make_shared<ngraph::Function>(results, params, "TensorNames");
}
InferenceEngine::CNNNetwork expected(function);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction(), true, true, true, true);
ASSERT_TRUE(success) << message;
}

View File

@ -0,0 +1,89 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <string>
#include "ngraph_reader_tests.hpp"
TEST_F(NGraphReaderTests, ReadNetworkWithTensorNames) {
std::string model = R"V0G0N(
<net name="Network" version="10">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32" names="input">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32" names="relu_t, identity_t">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
</edges>
</net>
)V0G0N";
Core ie;
Blob::Ptr weights;
auto network = ie.ReadNetwork(model, weights);
auto function = network.getFunction();
auto inputs = network.getInputsInfo();
auto outputs = network.getOutputsInfo();
std::unordered_set<std::string> inNames;
for (const auto& in : inputs)
inNames.emplace(in.first);
std::unordered_set<std::string> outNames;
for (const auto& out : outputs)
outNames.emplace(out.first);
ASSERT_EQ(1, inputs.size());
ASSERT_EQ(1, outputs.size());
ASSERT_EQ(1, function->get_results().size());
for (const auto& param : function->get_parameters()) {
ASSERT_TRUE(inNames.count(network.getOVNameForOperation(param->get_friendly_name())));
ASSERT_TRUE(!param->get_output_tensor(0).get_names().empty());
for (const auto& name : param->get_output_tensor(0).get_names())
ASSERT_TRUE(inNames.count(network.getOVNameForTensor(name)));
}
for (const auto& result : function->get_results()) {
ASSERT_TRUE(outNames.count(network.getOVNameForOperation(result->get_friendly_name())));
ASSERT_TRUE(!result->get_input_tensor(0).get_names().empty());
for (const auto& name : result->get_input_tensor(0).get_names())
ASSERT_TRUE(outNames.count(network.getOVNameForTensor(name)));
}
}

View File

@ -0,0 +1,16 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "subgraph_tests/tensor_names.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace SubgraphTestsDefinitions;
namespace {
INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest,
::testing::Values(CommonTestUtils::DEVICE_CPU),
TensorNamesTest::getTestCaseName);
} // namespace

View File

@ -9,6 +9,7 @@
std::vector<std::string> disabledTestPatterns() {
return {
".*TensorNamesTest\\.CheckAddOutput.*",
// TODO: FIX BUG 31661
// TODO: support InferRequest in GNAPlugin
".*InferRequestTests\\.canRun3AsyncRequestsConsistentlyFromThreadsWithoutWait.*",

View File

@ -0,0 +1,17 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "subgraph_tests/tensor_names.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace SubgraphTestsDefinitions;
namespace {
INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest,
::testing::Values(CommonTestUtils::DEVICE_GNA),
TensorNamesTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,18 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "subgraph_tests/tensor_names.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace SubgraphTestsDefinitions;
namespace {
INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest,
::testing::Values(CommonTestUtils::DEVICE_GPU),
TensorNamesTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,19 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "subgraph_tests/tensor_names.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace SubgraphTestsDefinitions;
namespace {
INSTANTIATE_TEST_CASE_P(smoke_Check, TensorNamesTest,
::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
TensorNamesTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,166 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "shared_test_classes/subgraph/tensor_names.hpp"
#include <unordered_set>
namespace SubgraphTestsDefinitions {
TEST_P(TensorNamesTest, CheckTensorNames) {
cnnNetwork = InferenceEngine::CNNNetwork{function};
ConfigureNetwork();
auto inputs = cnnNetwork.getInputsInfo();
auto outputs = cnnNetwork.getOutputsInfo();
std::unordered_set<std::string> inNames;
for (const auto& in : inputs)
inNames.emplace(in.first);
std::unordered_set<std::string> outNames;
for (const auto& out : outputs)
outNames.emplace(out.first);
for (const auto& param : function->get_parameters()) {
ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForOperation(param->get_friendly_name())));
for (const auto& name : param->get_output_tensor(0).get_names())
ASSERT_TRUE(inNames.count(cnnNetwork.getOVNameForTensor(name)));
}
for (const auto& result : function->get_results()) {
ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForOperation(result->get_friendly_name())));
for (const auto& name : result->input_value(0).get_tensor().get_names())
ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(name)));
}
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration);
inferRequest = executableNetwork.CreateInferRequest();
for (const auto& param : function->get_parameters()) {
ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(param->get_friendly_name())));
for (const auto& name : param->get_output_tensor(0).get_names())
ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name)));
}
for (const auto& result : function->get_results()) {
ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name())));
for (const auto& name : result->get_input_tensor(0).get_names()) {
ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name)));
}
}
}
TEST_P(TensorNamesTest, CheckTensorNamesAfterClone) {
cnnNetwork = InferenceEngine::CNNNetwork{function};
InferenceEngine::CNNNetwork clonedNet(static_cast<InferenceEngine::ICNNNetwork::Ptr>(cnnNetwork));
ConfigureNetwork();
auto inputs = clonedNet.getInputsInfo();
auto outputs = clonedNet.getOutputsInfo();
std::unordered_set<std::string> inNames;
for (const auto& in : inputs)
inNames.emplace(in.first);
std::unordered_set<std::string> outNames;
for (const auto& out : outputs)
outNames.emplace(out.first);
for (const auto& param : function->get_parameters()) {
ASSERT_TRUE(inNames.count(clonedNet.getOVNameForOperation(param->get_friendly_name())));
for (const auto& name : param->get_output_tensor(0).get_names())
ASSERT_TRUE(inNames.count(clonedNet.getOVNameForTensor(name)));
}
for (const auto& result : function->get_results()) {
ASSERT_TRUE(outNames.count(clonedNet.getOVNameForOperation(result->get_friendly_name())));
for (const auto& name : result->get_input_tensor(0).get_names()) {
ASSERT_TRUE(outNames.count(clonedNet.getOVNameForTensor(name)));
}
}
executableNetwork = core->LoadNetwork(clonedNet, targetDevice, configuration);
inferRequest = executableNetwork.CreateInferRequest();
for (const auto& param : function->get_parameters()) {
ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForOperation(param->get_friendly_name())));
for (const auto& name : param->get_output_tensor(0).get_names())
ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForTensor(name)));
}
for (const auto& result : function->get_results()) {
ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForOperation(result->get_friendly_name())));
for (const auto& name : result->input_value(0).get_tensor().get_names())
ASSERT_NO_THROW(inferRequest.GetBlob(clonedNet.getOVNameForTensor(name)));
}
}
TEST_P(TensorNamesTest, CheckAddOutput) {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
cnnNetwork = InferenceEngine::CNNNetwork{function};
ConfigureNetwork();
auto inputs = cnnNetwork.getInputsInfo();
auto outputs = cnnNetwork.getOutputsInfo();
std::unordered_set<std::string> inNames;
for (const auto& in : inputs)
inNames.emplace(in.first);
std::unordered_set<std::string> outNames;
for (const auto& out : outputs)
outNames.emplace(out.first);
ASSERT_EQ(1, inputs.size());
ASSERT_EQ(1, outputs.size());
ASSERT_EQ(1, function->get_results().size());
// Check that relu_prev doesn't exist in output and input maps
ASSERT_THROW(cnnNetwork.getOVNameForOperation("relu_prev"), InferenceEngine::NotFound);
for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) {
ASSERT_THROW(cnnNetwork.getOVNameForOperation(tensor_name), InferenceEngine::NotFound);
}
// Add relu_prev as output
cnnNetwork.addOutput("relu_prev");
inputs = cnnNetwork.getInputsInfo();
outputs = cnnNetwork.getOutputsInfo();
inNames.clear();
for (const auto& in : inputs)
inNames.emplace(in.first);
outNames.clear();
for (const auto& out : outputs)
outNames.emplace(out.first);
ASSERT_EQ(1, inputs.size());
ASSERT_EQ(2, outputs.size());
ASSERT_EQ(2, function->get_results().size());
// Check that relu_prev exists in output map
ASSERT_FALSE(inNames.count(cnnNetwork.getOVNameForOperation("relu_prev")));
for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) {
ASSERT_FALSE(inNames.count(cnnNetwork.getOVNameForTensor(tensor_name)));
}
ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForOperation("relu_prev")));
for (const std::string& tensor_name : {"relu_prev_t", "identity_prev_t"}) {
ASSERT_TRUE(outNames.count(cnnNetwork.getOVNameForTensor(tensor_name)));
}
executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice, configuration);
inferRequest = executableNetwork.CreateInferRequest();
for (const auto& param : cnnNetwork.getFunction()->get_parameters()) {
ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(param->get_friendly_name())));
for (const auto& name : param->get_output_tensor(0).get_names())
ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name)));
}
for (const auto& result : cnnNetwork.getFunction()->get_results()) {
ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForOperation(result->get_friendly_name())));
for (const auto& name : result->get_input_tensor(0).get_names()) {
ASSERT_NO_THROW(inferRequest.GetBlob(cnnNetwork.getOVNameForTensor(name)));
}
}
}
} // namespace SubgraphTestsDefinitions

View File

@ -0,0 +1,28 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <tuple>
#include <string>
#include <vector>
#include <memory>
#include "shared_test_classes/base/layer_test_utils.hpp"
#include "ngraph_functions/builders.hpp"
namespace SubgraphTestsDefinitions {
typedef std::tuple<
std::string // Device name
> constResultParams;
class TensorNamesTest : public testing::WithParamInterface<constResultParams>,
virtual public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(testing::TestParamInfo<constResultParams> obj);
protected:
void SetUp() override;
};
} // namespace SubgraphTestsDefinitions

View File

@ -0,0 +1,35 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/subgraph/tensor_names.hpp"
namespace SubgraphTestsDefinitions {
std::string TensorNamesTest::getTestCaseName(testing::TestParamInfo<constResultParams> obj) {
std::string targetDevice;
std::tie(targetDevice) = obj.param;
std::ostringstream result;
result << "TargetDevice=" << targetDevice;
return result.str();
}
void TensorNamesTest::SetUp() {
std::tie(targetDevice) = this->GetParam();
auto parameter = std::make_shared<ngraph::opset5::Parameter>(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10});
parameter->set_friendly_name("parameter");
parameter->get_output_tensor(0).set_names({"input"});
auto relu_prev = std::make_shared<ngraph::opset5::Relu>(parameter);
relu_prev->set_friendly_name("relu_prev");
relu_prev->get_output_tensor(0).set_names({"relu_prev_t", "identity_prev_t"});
auto relu = std::make_shared<ngraph::opset5::Relu>(relu_prev);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
const ngraph::ResultVector results{std::make_shared<ngraph::opset3::Result>(relu)};
results[0]->set_friendly_name("out");
ngraph::ParameterVector params{parameter};
function = std::make_shared<ngraph::Function>(results, params, "TensorNames");
}
} // namespace SubgraphTestsDefinitions

View File

@ -248,6 +248,13 @@ std::pair<bool, std::string> compare_functions(
}
for (int i = 0; i < node1->outputs().size(); ++i) {
const auto& tensor1 = node1->output(i).get_tensor();
const auto& tensor2 = node2->output(i).get_tensor();
if (tensor1.get_names() != tensor2.get_names()) {
err_log << "Output tensors names are different for nodes: "
<< node1->get_friendly_name() << " and " << node2->get_friendly_name() << std::endl;
}
if (!node1->output(i).get_partial_shape().same_scheme(
node2->output(i).get_partial_shape())) {
err_log << "Different shape detected\n"

View File

@ -17,6 +17,8 @@
#pragma once
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "ngraph/descriptor/input.hpp"

View File

@ -18,6 +18,7 @@
#include <memory>
#include <string>
#include <unordered_set>
#include "ngraph/partial_shape.hpp"
#include "ngraph/shape.hpp"
@ -44,8 +45,13 @@ namespace ngraph
Node* node,
size_t node_output_number);
NGRAPH_DEPRECATED("get_name() is deprecated! Please use get_names() instead.")
const std::string& get_name() const;
NGRAPH_DEPRECATED("set_name() is deprecated! Please use set_names() instead.")
void set_name(const std::string& name);
const std::unordered_set<std::string>& get_names() const;
void set_names(const std::unordered_set<std::string>& names);
void set_tensor_type(const element::Type& element_type, const PartialShape& pshape);
void set_element_type(const element::Type& elemenet_type);
void set_partial_shape(const PartialShape& partial_shape);
@ -68,6 +74,7 @@ namespace ngraph
size_t m_node_output_number{0};
std::string m_name;
std::unordered_set<std::string> m_names;
};
NGRAPH_API

View File

@ -327,6 +327,8 @@ namespace ngraph
descriptor::Tensor& get_input_tensor(size_t i) const;
/// Returns the tensor name for output i
NGRAPH_DEPRECATED(
"The tensor name was deprecated. Use get_output_tensor(i).get_names() instead.")
const std::string& get_output_tensor_name(size_t i) const;
std::set<Input<Node>> get_output_target_inputs(size_t i) const;
@ -347,6 +349,8 @@ namespace ngraph
const PartialShape& get_input_partial_shape(size_t i) const;
/// Returns the tensor name for input i
NGRAPH_DEPRECATED(
"The tensor name was deprecated. Use get_input_tensor(i).get_names() instead.")
const std::string& get_input_tensor_name(size_t i) const;
std::unordered_set<descriptor::Tensor*> liveness_new_list;

View File

@ -17,6 +17,7 @@
#pragma once
#include <cstring>
#include <unordered_set>
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/partial_shape.hpp"

View File

@ -63,6 +63,7 @@ namespace ngraph
/// \brief Get tensor's unique name
/// \return tensor's name
NGRAPH_DEPRECATED("Only output ports have names")
const std::string& get_name() const;
/// \brief Get the stale value of the tensor. A tensor is stale if its data is

View File

@ -42,11 +42,6 @@ descriptor::Tensor::Tensor(const element::Type& element_type,
{
}
void descriptor::Tensor::set_name(const string& name)
{
m_name = name;
}
void descriptor::Tensor::set_tensor_type(const element::Type& element_type,
const PartialShape& pshape)
{
@ -90,13 +85,41 @@ size_t descriptor::Tensor::size() const
return shape_size(get_shape()) * m_element_type.size();
}
NGRAPH_SUPPRESS_DEPRECATED_START
void descriptor::Tensor::set_name(const string& name)
{
m_name = name;
}
const std::string& descriptor::Tensor::get_name() const
{
return m_name;
}
NGRAPH_SUPPRESS_DEPRECATED_END
const std::unordered_set<std::string>& descriptor::Tensor::get_names() const
{
return m_names;
}
void descriptor::Tensor::set_names(const std::unordered_set<std::string>& names)
{
m_names = names;
}
ostream& operator<<(ostream& out, const descriptor::Tensor& tensor)
{
out << "Tensor(" << tensor.get_name() << ")";
std::string names;
for (const auto& name : tensor.get_names())
{
if (!names.empty())
names += ", ";
names += name;
}
NGRAPH_SUPPRESS_DEPRECATED_START
if (names.empty())
names = tensor.get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
out << "Tensor(" << names << ")";
return out;
}

View File

@ -924,7 +924,9 @@ bool ngraph::replace_output_update_name(Output<Node> output, const Output<Node>&
{
replacement.get_node()->set_friendly_name(output.get_node()->get_friendly_name());
// Update output tensor name
NGRAPH_SUPPRESS_DEPRECATED_START
replacement.get_tensor().set_name(output.get_node()->get_friendly_name());
NGRAPH_SUPPRESS_DEPRECATED_END
}
output.replace(replacement);
copy_runtime_info({replacement.get_node_shared_ptr(), output.get_node_shared_ptr()},

View File

@ -143,6 +143,10 @@ std::shared_ptr<Node>
{
clone->add_control_dependency(cdep);
}
for (size_t i = 0; i < get_output_size(); i++)
{
clone->get_output_tensor(i).set_names(get_output_tensor(i).get_names());
}
return clone;
}
@ -658,13 +662,6 @@ descriptor::Tensor& Node::get_input_tensor(size_t i) const
return input.get_tensor();
}
const string& Node::get_output_tensor_name(size_t i) const
{
NGRAPH_CHECK(
i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_name(size_t i)");
return m_outputs[i].get_tensor().get_name();
}
size_t Node::get_input_size() const
{
return m_inputs.size();
@ -690,6 +687,7 @@ const PartialShape& Node::get_input_partial_shape(size_t i) const
return m_inputs[i].get_partial_shape();
}
NGRAPH_SUPPRESS_DEPRECATED_START
const string& Node::get_input_tensor_name(size_t i) const
{
NGRAPH_CHECK(
@ -697,6 +695,14 @@ const string& Node::get_input_tensor_name(size_t i) const
return m_inputs[i].get_tensor().get_name();
}
const string& Node::get_output_tensor_name(size_t i) const
{
NGRAPH_CHECK(
i < m_outputs.size(), "index '", i, "' out of range in get_output_tensor_name(size_t i)");
return m_outputs[i].get_tensor().get_name();
}
NGRAPH_SUPPRESS_DEPRECATED_END
bool Node::has_same_type(std::shared_ptr<const Node> node) const
{
if (get_output_size() != node->get_output_size())

View File

@ -65,10 +65,12 @@ runtime::HostTensor::HostTensor(const std::string& name)
{
}
NGRAPH_SUPPRESS_DEPRECATED_START
runtime::HostTensor::HostTensor(const Output<Node>& value)
: HostTensor(value.get_element_type(), value.get_partial_shape(), value.get_tensor().get_name())
{
}
NGRAPH_SUPPRESS_DEPRECATED_END
void runtime::HostTensor::allocate_buffer()
{
@ -101,11 +103,13 @@ void runtime::HostTensor::allocate_buffer()
}
}
NGRAPH_SUPPRESS_DEPRECATED_START
runtime::HostTensor::HostTensor(const std::shared_ptr<op::v0::Constant>& constant)
: HostTensor(constant->output(0).get_tensor().get_name())
{
initialize(constant);
}
NGRAPH_SUPPRESS_DEPRECATED_END
void runtime::HostTensor::initialize(const std::shared_ptr<op::v0::Constant>& constant)
{

View File

@ -49,7 +49,9 @@ size_t runtime::Tensor::get_size_in_bytes() const
const std::string& runtime::Tensor::get_name() const
{
NGRAPH_SUPPRESS_DEPRECATED_START
return m_descriptor->get_name();
NGRAPH_SUPPRESS_DEPRECATED_END
}
bool runtime::Tensor::get_stale() const

View File

@ -23,6 +23,7 @@
#include "gtest/gtest.h"
#include "ngraph/function.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/opsets/opset6.hpp"
#include "ngraph/pass/manager.hpp"
#include "pass/liveness.hpp"
#include "util/test_tools.hpp"
@ -91,3 +92,23 @@ TEST(tensor, output_flag)
EXPECT_TRUE(op::is_output(f0->get_output_op(i)));
}
}
TEST(tensor, tensor_names)
{
auto arg0 = make_shared<opset6::Parameter>(element::f32, Shape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = make_shared<opset6::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f0 = make_shared<Function>(relu, ParameterVector{arg0});
ASSERT_EQ(arg0->get_output_tensor(0).get_names(), relu->get_input_tensor(0).get_names());
ASSERT_EQ(arg0->get_output_tensor(0).get_names(),
relu->input_value(0).get_tensor().get_names());
ASSERT_EQ(f0->get_result()->get_input_tensor(0).get_names(),
relu->get_output_tensor(0).get_names());
ASSERT_EQ(f0->get_result()->input_value(0).get_tensor().get_names(),
relu->get_output_tensor(0).get_names());
}