[CPU] Get/Set Blob overhead has been eliminated. (#6737)

This commit is contained in:
Maksim Kutakov 2021-07-27 13:22:09 +03:00 committed by GitHub
parent dc5f44e929
commit e47a85b427
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 33 additions and 24 deletions

View File

@ -965,16 +965,20 @@ Config MKLDNNGraph::getProperty() const {
return config;
}
void MKLDNNGraph::getInputBlobs(InferenceEngine::BlobMap &resp) {
for (auto &it : inputNodesMap) {
resp[it.first] = it.second->getChildEdgeAt(0)->getBlob();
Blob::Ptr MKLDNNGraph::getInputBlob(const std::string& name) {
auto itr = inputNodesMap.find(name);
if (itr != inputNodesMap.end()) {
return itr->second->getChildEdgeAt(0)->getBlob();
}
return nullptr;
}
void MKLDNNGraph::getOutputBlobs(InferenceEngine::BlobMap &resp) {
for (auto &it : outputNodesMap) {
resp[it.first] = it.second->getParentEdgeAt(0)->getBlob();
Blob::Ptr MKLDNNGraph::getOutputBlob(const std::string& name) {
auto itr = outputNodesMap.find(name);
if (itr != outputNodesMap.end()) {
return itr->second->getParentEdgeAt(0)->getBlob();
}
return nullptr;
}
void MKLDNNGraph::RemoveEdge(MKLDNNEdgePtr& edge) {

View File

@ -44,8 +44,8 @@ public:
void setProperty(const std::map<std::string, std::string> &properties);
Config getProperty() const;
void getInputBlobs(InferenceEngine::BlobMap &in_map);
void getOutputBlobs(InferenceEngine::BlobMap &out_map);
InferenceEngine::Blob::Ptr getInputBlob(const std::string& name);
InferenceEngine::Blob::Ptr getOutputBlob(const std::string& name);
template<typename NET>
void CreateGraph(NET &network,

View File

@ -213,8 +213,6 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::
InferenceEngine::Blob::Ptr data;
if (graph->hasInputWithName(name)) {
InferenceEngine::BlobMap blobs;
graph->getInputBlobs(blobs);
// ROI blob is returned only if it was set previously.
auto it = _preProcData.find(name);
if (it != _preProcData.end()) {
@ -223,7 +221,12 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::
}
if (_inputs.find(name) == _inputs.end()) {
InferenceEngine::TensorDesc desc = blobs[name]->getTensorDesc();
auto pBlob = graph->getInputBlob(name);
if (!pBlob) {
IE_THROW() << "MKLDNN graph doesn't contain input node with name: " << name;
}
InferenceEngine::TensorDesc desc = pBlob->getTensorDesc();
if (_networkInputs.find(name) != _networkInputs.end()) {
InferenceEngine::Layout l = _networkInputs[name]->getLayout();
@ -235,7 +238,7 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::
_inputs[name] = make_blob_with_precision(desc);
_inputs[name]->allocate();
if (blobs[name]->getTensorDesc() == desc &&
if (pBlob->getTensorDesc() == desc &&
graph->_normalizePreprocMap.find(name) == graph->_normalizePreprocMap.end() && !graph->getProperty().batchLimit) {
externalPtr[name] = _inputs[name]->buffer();
}
@ -258,9 +261,12 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::
}
if (graph->hasOutputWithName(name)) {
InferenceEngine::BlobMap blobs;
graph->getOutputBlobs(blobs);
if (_outputs.find(name) == _outputs.end()) {
auto pBlob = graph->getOutputBlob(name);
if (!pBlob) {
IE_THROW() << "MKLDNN graph doesn't contain output node with name: " << name;
}
if (!data) {
InferenceEngine::TensorDesc desc = _networkOutputs[name]->getTensorDesc();
desc.setPrecision(normalizeToSupportedPrecision(desc.getPrecision()));
@ -275,7 +281,7 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::
data = make_blob_with_precision(desc);
data->allocate();
} else {
const auto& expectedTensorDesc = blobs[name]->getTensorDesc();
const auto& expectedTensorDesc = pBlob->getTensorDesc();
if (expectedTensorDesc.getPrecision() != data->getTensorDesc().getPrecision()) {
IE_THROW(ParameterMismatch) << "Network input and output use the same name: " << name << " but expect blobs with different precision: "
@ -295,7 +301,7 @@ InferenceEngine::Blob::Ptr MKLDNNPlugin::MKLDNNInferRequest::GetBlob(const std::
}
_outputs[name] = data;
if (!externalPtr.count(name) && data->getTensorDesc() == blobs[name]->getTensorDesc() && !graph->getProperty().batchLimit) {
if (!externalPtr.count(name) && data->getTensorDesc() == pBlob->getTensorDesc() && !graph->getProperty().batchLimit) {
externalPtr[name] = data->buffer();
}
}
@ -366,12 +372,12 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const std::string& name, const In
IE_THROW(ParameterMismatch) << "Failed to set input blob. Blocking descriptor mismatch.";
}
InferenceEngine::BlobMap blobs;
graph->getInputBlobs(blobs);
if (blobs.find(name) == blobs.end())
auto pBlob = graph->getInputBlob(name);
if (!pBlob) {
IE_THROW() << "MKLDNN graph doesn't contain input node with name: " << name;
}
if (data->getTensorDesc() == blobs.at(name)->getTensorDesc() &&
if (data->getTensorDesc() == pBlob->getTensorDesc() &&
graph->_normalizePreprocMap.find(name) == graph->_normalizePreprocMap.end() && !graph->getProperty().batchLimit) {
externalPtr[name] = data->buffer();
} else if (externalPtr.find(name) != externalPtr.end()) {
@ -404,12 +410,11 @@ void MKLDNNPlugin::MKLDNNInferRequest::SetBlob(const std::string& name, const In
IE_THROW(ParameterMismatch) << "Failed to set output blob. Blocking descriptor mismatch.";
}
InferenceEngine::BlobMap blobs;
graph->getOutputBlobs(blobs);
if (blobs.find(name) == blobs.end())
auto pBlob = graph->getOutputBlob(name);
if (!pBlob)
IE_THROW() << "MKLDNN graph doesn't contain output node with name: " << name;
if (data->getTensorDesc() == blobs.at(name)->getTensorDesc() &&
if (data->getTensorDesc() == pBlob->getTensorDesc() &&
!graph->getProperty().batchLimit) {
externalPtr[name] = data->buffer();
} else if (externalPtr.find(name) != externalPtr.end()) {