[CPU] Added support for network inputs and outputs with the same name. (#5000)

This commit is contained in:
Maksim Kutakov 2021-05-28 16:45:44 +03:00 committed by GitHub
parent b1e7a5cdb1
commit ad0e0c9f7c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 190 additions and 68 deletions

View File

@ -215,16 +215,18 @@ bool IInferRequestInternal::findInputAndOutputBlobByName(const std::string& name
[&](const std::pair<std::string, DataPtr>& pair) { [&](const std::pair<std::string, DataPtr>& pair) {
return pair.first == name; return pair.first == name;
}); });
if (foundOutputPair == std::end(_networkOutputs) && (foundInputPair == std::end(_networkInputs))) { bool retVal;
IE_THROW(NotFound) << "Failed to find input or output with name: \'" << name << "\'";
}
if (foundInputPair != std::end(_networkInputs)) { if (foundInputPair != std::end(_networkInputs)) {
foundInput = foundInputPair->second; foundInput = foundInputPair->second;
return true; retVal = true;
} else { } else if (foundOutputPair != std::end(_networkOutputs)) {
foundOutput = foundOutputPair->second; foundOutput = foundOutputPair->second;
return false; retVal = false;
} else {
IE_THROW(NotFound) << "Failed to find input or output with name: \'" << name << "\'";
} }
return retVal;
} }
void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, const std::string& name, bool isInput, const SizeVector& refDims) const { void IInferRequestInternal::checkBlob(const Blob::Ptr& blob, const std::string& name, bool isInput, const SizeVector& refDims) const {

View File

@ -738,7 +738,7 @@ void MKLDNNGraph::PushInputData(const std::string& name, const InferenceEngine::
} }
} }
void MKLDNNGraph::PullOutputData(BlobMap &out) { void MKLDNNGraph::PullOutputData(const BlobMap &out) {
if (!IsReady()) if (!IsReady())
IE_THROW() << "Wrong state. Topology not ready."; IE_THROW() << "Wrong state. Topology not ready.";
@ -746,22 +746,12 @@ void MKLDNNGraph::PullOutputData(BlobMap &out) {
auto name = outputMap.first; auto name = outputMap.first;
auto node = outputMap.second; auto node = outputMap.second;
const MKLDNNMemory& intr_blob = node->getParentEdgeAt(0)->getMemory(); const MKLDNNMemory& intr_blob = node->getParentEdgeAt(0)->getMemory();
if (out.find(name) == out.end()) {
// TODO [NM]: Do we really need this path? if (!out.count(name)) {
// TODO: Create blob from MemoryDesc IE_THROW(Unexpected) << "The network outputs do not contain mkldnn graph output node name: \"" << name << "\"";
Blob::Ptr outBlob = make_shared_blob<float>({Precision::FP32, node->getParentEdgeAt(0)->getDims().ToSizeVector(),
TensorDesc::getLayoutByDims(node->getParentEdgeAt(0)->getDims().ToSizeVector())},
reinterpret_cast<float*>(intr_blob.GetData()));
out[name] = outBlob;
} }
Blob::Ptr &ext_blob = out[name]; const Blob::Ptr &ext_blob = out.at(name);
// TODO: Why we allow allocation of output memory inside Infer call??
// Suggestion is to disable this behaviour
if (ext_blob->buffer() == nullptr) {
ext_blob->allocate();
}
auto srcPrec = MKLDNNExtensionUtils::DataTypeToIEPrecision(intr_blob.GetDataType()); auto srcPrec = MKLDNNExtensionUtils::DataTypeToIEPrecision(intr_blob.GetDataType());
auto dstPrec = ext_blob->getTensorDesc().getPrecision(); auto dstPrec = ext_blob->getTensorDesc().getPrecision();

View File

@ -55,7 +55,7 @@ public:
} }
void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in); void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in);
void PullOutputData(InferenceEngine::BlobMap &out); void PullOutputData(const InferenceEngine::BlobMap &out);
void Infer(MKLDNNInferRequest* request = nullptr, int batch = -1); void Infer(MKLDNNInferRequest* request = nullptr, int batch = -1);
@ -79,6 +79,14 @@ public:
return outputNodesMap; return outputNodesMap;
} }
bool hasInputWithName(const std::string& name) const {
return inputNodesMap.count(name);
}
bool hasOutputWithName(const std::string& name) const {
return outputNodesMap.count(name);
}
mkldnn::engine getEngine() const { mkldnn::engine getEngine() const {
return eng; return eng;
} }

View File

@ -34,6 +34,8 @@ MKLDNNPlugin::MKLDNNInferRequest::MKLDNNInferRequest(InferenceEngine::InputsData
if (execNetwork->_graphs.size() == 0) if (execNetwork->_graphs.size() == 0)
IE_THROW() << "No graph was found"; IE_THROW() << "No graph was found";
graph = &(execNetwork->GetGraph()._graph); graph = &(execNetwork->GetGraph()._graph);
// Allocate all input blobs
for (const auto& it : _networkInputs) { for (const auto& it : _networkInputs) {
MKLDNNInferRequest::GetBlob(it.first); MKLDNNInferRequest::GetBlob(it.first);
} }
@ -210,10 +212,9 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::
InferenceEngine::Blob::Ptr data; InferenceEngine::Blob::Ptr data;
InferenceEngine::BlobMap blobs; if (graph->hasInputWithName(name)) {
graph->getInputBlobs(blobs); InferenceEngine::BlobMap blobs;
graph->getInputBlobs(blobs);
if (blobs.find(name) != blobs.end()) {
// ROI blob is returned only if it was set previously. // ROI blob is returned only if it was set previously.
auto it = _preProcData.find(name); auto it = _preProcData.find(name);
if (it != _preProcData.end()) { if (it != _preProcData.end()) {
@ -221,60 +222,77 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::
return data; return data;
} }
if (_inputs.find(name) != _inputs.end()) { if (_inputs.find(name) == _inputs.end()) {
data = _inputs[name]; InferenceEngine::TensorDesc desc = blobs[name]->getTensorDesc();
checkBlob(data, name, true);
return data;
}
InferenceEngine::TensorDesc desc = blobs[name]->getTensorDesc(); if (_networkInputs.find(name) != _networkInputs.end()) {
if (_networkInputs.find(name) != _networkInputs.end()) { InferenceEngine::Layout l = _networkInputs[name]->getLayout();
InferenceEngine::Layout l = _networkInputs[name]->getLayout(); InferenceEngine::Precision p = _networkInputs[name]->getPrecision();
InferenceEngine::Precision p = _networkInputs[name]->getPrecision(); InferenceEngine::SizeVector dims = _networkInputs[name]->getTensorDesc().getDims();
InferenceEngine::SizeVector dims = _networkInputs[name]->getTensorDesc().getDims();
desc = InferenceEngine::TensorDesc(p, dims, l); desc = InferenceEngine::TensorDesc(p, dims, l);
} }
_inputs[name] = make_blob_with_precision(desc); _inputs[name] = make_blob_with_precision(desc);
_inputs[name]->allocate(); _inputs[name]->allocate();
if (blobs[name]->getTensorDesc() == desc && if (blobs[name]->getTensorDesc() == desc &&
graph->_meanImages.find(name) == graph->_meanImages.end() && !graph->getProperty().batchLimit) { graph->_meanImages.find(name) == graph->_meanImages.end() && !graph->getProperty().batchLimit) {
externalPtr[name] = _inputs[name]->buffer(); externalPtr[name] = _inputs[name]->buffer();
}
} }
data = _inputs[name]; data = _inputs[name];
checkBlob(data, name, true); checkBlob(data, name, true);
return data;
} }
blobs.clear();
graph->getOutputBlobs(blobs);
if (blobs.find(name) != blobs.end()) {
if (_outputs.find(name) != _outputs.end()) {
data = _outputs[name];
checkBlob(data, name, false);
return data;
}
InferenceEngine::TensorDesc desc = _networkOutputs[name]->getTensorDesc(); if (graph->hasOutputWithName(name)) {
desc.setPrecision(normalizeToSupportedPrecision(desc.getPrecision())); InferenceEngine::BlobMap blobs;
graph->getOutputBlobs(blobs);
if (_outputs.find(name) == _outputs.end()) {
if (!data) {
InferenceEngine::TensorDesc desc = _networkOutputs[name]->getTensorDesc();
desc.setPrecision(normalizeToSupportedPrecision(desc.getPrecision()));
// WA: need to avoid exception thrown when we compare blocking desc in SetBlob // WA: need to avoid exception thrown when we compare blocking desc in SetBlob
// in situation if we push output blobs as inputs for next network (in Hetero plugin) // in situation if we push output blobs as inputs for next network (in Hetero plugin)
// it may be that output tensor desc will be different from real input tensor desc for next network // it may be that output tensor desc will be different from real input tensor desc for next network
// because the optimal descriptor was chosen (e.g. inPlace case for Split node) // because the optimal descriptor was chosen (e.g. inPlace case for Split node)
auto currBlockDesc = InferenceEngine::BlockingDesc(desc.getBlockingDesc().getBlockDims(), desc.getBlockingDesc().getOrder()); auto currBlockDesc = InferenceEngine::BlockingDesc(desc.getBlockingDesc().getBlockDims(), desc.getBlockingDesc().getOrder());
desc = InferenceEngine::TensorDesc(desc.getPrecision(), desc.getDims(), currBlockDesc); desc = InferenceEngine::TensorDesc(desc.getPrecision(), desc.getDims(), currBlockDesc);
_outputs[name] = make_blob_with_precision(desc); data = make_blob_with_precision(desc);
_outputs[name]->allocate(); data->allocate();
if (blobs[name]->getTensorDesc() == desc && !graph->getProperty().batchLimit) { } else {
externalPtr[name] = _outputs[name]->buffer(); const auto& expectedTensorDesc = blobs[name]->getTensorDesc();
if (expectedTensorDesc.getPrecision() != data->getTensorDesc().getPrecision()) {
IE_THROW(ParameterMismatch) << "Network input and output use the same name: " << name << " but expect blobs with different precision: "
<< data->getTensorDesc().getPrecision() << " for input and " << expectedTensorDesc.getPrecision()
<< " for output.";
}
if (expectedTensorDesc.getDims() != data->getTensorDesc().getDims()) {
IE_THROW(ParameterMismatch) << "Network input and output use the same name: " << name << " but expect blobs with different shapes.";
}
if (data->getTensorDesc().getLayout() != InferenceEngine::Layout::ANY && expectedTensorDesc.getLayout() != InferenceEngine::Layout::ANY &&
expectedTensorDesc.getBlockingDesc() != data->getTensorDesc().getBlockingDesc()) {
IE_THROW(ParameterMismatch) << "Network input and output use the same name: " << name
<< " but expect blobs with different blocking descriptors.";
}
}
_outputs[name] = data;
if (!externalPtr.count(name) && data->getTensorDesc() == blobs[name]->getTensorDesc() && !graph->getProperty().batchLimit) {
externalPtr[name] = data->buffer();
}
} }
data = _outputs[name]; data = _outputs[name];
checkBlob(data, name, false); checkBlob(data, name, false);
return data;
} }
IE_THROW() << "Cannot find blob with name: " << name; if (!data) {
IE_THROW() << "Cannot find blob with name: " << name;
}
return data;
} }
void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr &data) { void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const std::string& name, const InferenceEngine::Blob::Ptr &data) {
@ -295,7 +313,9 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const std::string& name, const In
InferenceEngine::InputInfo::Ptr foundInput; InferenceEngine::InputInfo::Ptr foundInput;
InferenceEngine::DataPtr foundOutput; InferenceEngine::DataPtr foundOutput;
size_t dataSize = data->size(); size_t dataSize = data->size();
if (findInputAndOutputBlobByName(name, foundInput, foundOutput)) { findInputAndOutputBlobByName(name, foundInput, foundOutput);
if (foundInput) {
if (foundInput->getPrecision() != data->getTensorDesc().getPrecision()) { if (foundInput->getPrecision() != data->getTensorDesc().getPrecision()) {
IE_THROW(ParameterMismatch) << "Failed to set input blob with precision: " IE_THROW(ParameterMismatch) << "Failed to set input blob with precision: "
<< data->getTensorDesc().getPrecision() << ", if CNNNetwork input blob precision is: " << foundInput->getPrecision(); << data->getTensorDesc().getPrecision() << ", if CNNNetwork input blob precision is: " << foundInput->getPrecision();
@ -346,7 +366,8 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const std::string& name, const In
} }
_inputs[name] = data; _inputs[name] = data;
} }
} else { }
if (foundOutput) {
if (compoundBlobPassed) { if (compoundBlobPassed) {
IE_THROW(NotImplemented) IE_THROW(NotImplemented)
<< "cannot set compound blob: supported only for input pre-processing"; << "cannot set compound blob: supported only for input pre-processing";

View File

@ -208,7 +208,6 @@ protected:
* @param foundOutput A pointer to output DataPtr if found. * @param foundOutput A pointer to output DataPtr if found.
* @return `True` - if loaded network has input with provided name, * @return `True` - if loaded network has input with provided name,
* `false` - if loaded network has output with provided name * `false` - if loaded network has output with provided name
* @throws [parameter_mismatch] exception if input and output has the same name
* @throws [not_found] exception if there is no input and output layers with given name * @throws [not_found] exception if there is no input and output layers with given name
*/ */
bool findInputAndOutputBlobByName(const std::string& name, InputInfo::Ptr& foundInput, DataPtr& foundOutput) const; bool findInputAndOutputBlobByName(const std::string& name, InputInfo::Ptr& foundInput, DataPtr& foundOutput) const;

View File

@ -0,0 +1,14 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "subgraph_tests/parameter_result.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace SubgraphTestsDefinitions;
namespace {
INSTANTIATE_TEST_CASE_P(smoke_Check, ParameterResultSubgraphTest,
::testing::Values(CommonTestUtils::DEVICE_CPU),
ParameterResultSubgraphTest::getTestCaseName);
} // namespace

View File

@ -0,0 +1,88 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "shared_test_classes/subgraph/parameter_result.hpp"
#include "common_test_utils/test_constants.hpp"
using namespace SubgraphTestsDefinitions;
using namespace InferenceEngine;
namespace CPULayerTestsDefinitions {
class ParameterResultCustomBlobTest : public ParameterResultSubgraphTest {
protected:
void Infer() override {
constexpr size_t inferIterations = 10lu;
inferRequest = executableNetwork.CreateInferRequest();
auto inputBlob = inputs.front();
const size_t elementsCount = inputBlob->size();
for (size_t i = 0; i < inferIterations; ++i) {
CommonTestUtils::fill_data_random<Precision::FP32>(inputBlob, 10, 0, 1, i);
const auto& inputsInfo = cnnNetwork.getInputsInfo().begin()->second;
std::string inputName = cnnNetwork.getInputsInfo().begin()->first;
float* customInpData = new float[elementsCount];
auto inpBlobData = inputBlob->buffer().as<const float *>();
std::copy(inpBlobData, inpBlobData + elementsCount, customInpData);
auto& tensorDesc = inputsInfo->getTensorDesc();
auto customBlob = make_shared_blob<float>(tensorDesc, customInpData, elementsCount * sizeof(float));
inferRequest.SetBlob(inputName, customBlob);
inferRequest.Infer();
ParameterResultSubgraphTest::Validate();
delete[] customInpData;
}
}
void Validate() override {
//Do nothing. We call Validate() in the Infer() method
}
};
TEST_P(ParameterResultCustomBlobTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
// Just to show that it is not possible to set different precisions for inputs and outputs with the same name.
// If it was possible, the input would have I8 precision and couldn't store data from the custom blob.
inPrc = Precision::I8;
outPrc = Precision::FP32;
Run();
}
namespace {
INSTANTIATE_TEST_CASE_P(smoke_Check_Custom_Blob, ParameterResultCustomBlobTest,
::testing::Values(CommonTestUtils::DEVICE_CPU),
ParameterResultSubgraphTest::getTestCaseName);
} // namespace
class ParameterResultSameBlobTest : public ParameterResultSubgraphTest {
protected:
void Infer() override {
constexpr size_t inferIterations = 10lu;
for (size_t i = 0; i < inferIterations; ++i) {
ParameterResultSubgraphTest::Infer();
ParameterResultSubgraphTest::Validate();
}
}
void Validate() override {
//Do nothing. We call Validate() in the Infer() method
}
};
TEST_P(ParameterResultSameBlobTest, CompareWithRefs) {
SKIP_IF_CURRENT_TEST_IS_DISABLED()
Run();
}
namespace {
INSTANTIATE_TEST_CASE_P(smoke_Check_Same_Blob, ParameterResultSameBlobTest,
::testing::Values(CommonTestUtils::DEVICE_CPU),
ParameterResultSubgraphTest::getTestCaseName);
} // namespace
} // namespace CPULayerTestsDefinitions