Fix for MKLDNN constant layers execution (#4632)

* Single mkldnn::engine for all MKLDNN graphs

* Fix for MKLDNN constant layers execution
This commit is contained in:
Vladislav Volkov 2021-03-05 16:28:41 +03:00 committed by GitHub
parent 278b52ca98
commit 121760476a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 24 additions and 15 deletions

View File

@ -169,9 +169,9 @@ void MKLDNNEdge::allocate(const void* mem_ptr) {
}
std::string MKLDNNEdge::name() const {
auto childPtr = getChild();
auto parentPtr = getParent();
return childPtr->getName() + "<->" + parentPtr->getName();
auto childPtr = getChild();
return parentPtr->getName() + std::to_string(parent_port) + "<->" + childPtr->getName() + std::to_string(child_port);
}
void MKLDNNEdge::externalAllocate(MKLDNNWeightsSharing::Ptr weightsCache) {

View File

@ -6,6 +6,7 @@
#include <string>
#include <map>
#include <vector>
#include <tuple>
#include <unordered_set>
#include <limits>
#include <fstream>
@ -67,6 +68,8 @@ using namespace InferenceEngine::details;
typedef std::unordered_set<MKLDNNEdgePtr> edge_cluster_t;
typedef std::vector<edge_cluster_t> edge_clusters_t;
mkldnn::engine MKLDNNGraph::eng(mkldnn::engine::kind::cpu, 0);
template<typename NET>
void MKLDNNGraph::ApplyUnrollPasses(NET &net) {
OV_ITT_SCOPED_TASK(itt::domains::MKLDNNPlugin, "MKLDNNGraph::ApplyUnrollPasses");
@ -453,15 +456,24 @@ void MKLDNNGraph::ExecuteConstantNodesOnly() {
auto acquireSharedOutputs = [this](MKLDNNNodePtr & graphNode) {
std::vector<shared_memory_ptr> outputs;
bool hasLocalAllocatedEdges = false;
bool hasExternalInvalidEdges = false;
for (size_t i = 0; i < graphNode->getChildEdges().size(); ++i) {
auto edgePtr = graphNode->getChildEdgeAt(i);
if (edgePtr && edgePtr->isUseExternalMemory()) {
outputs.emplace_back(weightsCache->get(edgePtr->name()));
if (edgePtr) {
if (edgePtr->isUseExternalMemory()) {
auto ptr = weightsCache->get(edgePtr->name());
outputs.emplace_back(ptr);
if (!ptr->isValid())
hasExternalInvalidEdges = true;
} else {
hasLocalAllocatedEdges = true;
}
}
}
return outputs;
return std::make_tuple(hasExternalInvalidEdges, hasLocalAllocatedEdges, outputs);
};
for (auto &graphNode : graphNodes) {
@ -471,12 +483,10 @@ void MKLDNNGraph::ExecuteConstantNodesOnly() {
if (weightsCache) {
auto sharedOutputs = acquireSharedOutputs(graphNode);
if (std::find_if(sharedOutputs.begin(), sharedOutputs.end(),
[](const shared_memory_ptr & ptr) {
return !ptr->isValid();
}) != sharedOutputs.end()) {
if (std::get<0>(sharedOutputs) || std::get<1>(sharedOutputs)) {
graphNode->execute(stream);
for (auto & output : sharedOutputs)
for (auto & output : std::get<2>(sharedOutputs))
output->valid(true);
}
} else {

View File

@ -30,7 +30,7 @@ public:
Ready = 1,
};
MKLDNNGraph(mkldnn::engine eng = mkldnn::engine(mkldnn::engine::kind::cpu, 0)) : status(NotReady), eng(eng) {}
MKLDNNGraph() = default;
Status GetStatus() {
return status;
@ -172,7 +172,7 @@ protected:
graphEdges.clear();
_meanImages.clear();
}
Status status;
Status status { NotReady };
Config config;
// For dumping purposes. -1 - no counting, all other positive
@ -191,7 +191,7 @@ protected:
std::map<std::string, MeanImage> _meanImages;
std::string _name;
mkldnn::engine eng;
static mkldnn::engine eng;
void Replicate(const InferenceEngine::CNNNetwork &network, const MKLDNNExtensionManager::Ptr& extMgr);
void Replicate(const InferenceEngine::TensorIterator::Body &subgraph, const MKLDNNExtensionManager::Ptr& extMgr);

View File

@ -187,8 +187,7 @@ private:
} // namespace MKLDNNPlugin
MKLDNNTensorIteratorNode::MKLDNNTensorIteratorNode(InferenceEngine::CNNLayerPtr layer, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache) :
MKLDNNNode(layer, eng, cache),
sub_graph(eng) {}
MKLDNNNode(layer, eng, cache) {}
void MKLDNNTensorIteratorNode::getSupportedDescriptors() {
auto *ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());