MKLDNN nodes factory improvements (#2252)

This commit is contained in:
Vladislav Volkov 2020-09-30 11:31:19 +03:00 committed by GitHub
parent fecc7eac90
commit 8f1ee05385
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 110 additions and 95 deletions

View File

@ -133,7 +133,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
for (const auto layer : NetPass::TIBodySortTopologically(subgraph)) {
CNNLayerPtr _layer = layer;
const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache));
const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache));
graphNodes.push_back(node);
layer2node[layer] = node;
@ -162,7 +162,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
CNNLayerPtr layer(new CNNLayer({"out_" + output->getName(), "Output", output->getTensorDesc().getPrecision()}));
layer->insData.push_back(output);
const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(output), 0));
node->addEdge(edge);
@ -182,7 +182,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
layer->insData.push_back(to_stub_data);
const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0));
node->addEdge(edge);
@ -197,7 +197,7 @@ void MKLDNNGraph::Replicate(const TensorIterator::Body &subgraph, const MKLDNNEx
CNNLayerPtr layer(new CNNLayer({"in_" + input->getName(), "Input", input->getTensorDesc().getPrecision()}));
layer->outData.push_back(input);
const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
for (auto p : getInputTo(input)) {
auto consumer = p.second;
@ -251,7 +251,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
_layer->outData = layer->outData;
}
const MKLDNNNodePtr node(MKLDNNNode::CreateNode(_layer, getEngine(), extMgr, weightsCache));
const MKLDNNNodePtr node(MKLDNNNode::factory().create(_layer, getEngine(), extMgr, weightsCache));
graphNodes.push_back(node);
layer2node[layer] = node;
@ -289,7 +289,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
CNNLayerPtr layer(new CNNLayer({"out_" + output.first, "Output", data->getTensorDesc().getPrecision()}));
layer->insData.push_back(data);
const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(data), 0));
node->addEdge(edge);
@ -309,7 +309,7 @@ void MKLDNNGraph::Replicate(const ICNNNetwork &network, const MKLDNNExtensionMan
CNNLayerPtr layer(new CNNLayer({"stub_" + parent_layer->name, "Output", to_stub_data->getTensorDesc().getPrecision()}));
layer->insData.push_back(to_stub_data);
const MKLDNNNodePtr node(MKLDNNNode::CreateNode(layer, getEngine(), extMgr, weightsCache));
const MKLDNNNodePtr node(MKLDNNNode::factory().create(layer, getEngine(), extMgr, weightsCache));
MKLDNNEdgePtr edge(new MKLDNNEdge(parent_node, node, _parent_port(to_stub_data), 0));
node->addEdge(edge);

View File

@ -5,12 +5,10 @@
#include "mkldnn_node.h"
#include "mkldnn_extension_mngr.h"
#include "caseless.hpp"
#include <vector>
#include <string>
#include <limits>
#include <cstdint>
#include <unordered_map>
#include <nodes/mkldnn_batchnorm_node.h>
#include <nodes/mkldnn_concat_node.h>
@ -150,13 +148,9 @@ Type TypeFromName(const std::string type) {
} // namespace MKLDNNPlugin
std::shared_ptr<MKLDNNNodesHolder> MKLDNNNode::GetNodesHolder() {
static std::shared_ptr<MKLDNNNodesHolder> localHolder = std::make_shared<MKLDNNNodesHolder>();
return localHolder;
}
void MKLDNNNode::AddNode(const std::string& name, CreatorByLayerFunction factory) {
GetNodesHolder()->nodes[name] = factory;
MKLDNNNode::Factory & MKLDNNNode::factory() {
static Factory factoryInstance;
return factoryInstance;
}
MKLDNNNode::MKLDNNNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
@ -260,41 +254,6 @@ void MKLDNNNode::remove() {
}
}
MKLDNNNode* MKLDNNNode::CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
MKLDNNNode *newNode = nullptr;
auto nodesHolder = GetNodesHolder();
if (nodesHolder->nodes.find("Generic") != nodesHolder->nodes.end()) {
std::unique_ptr<MKLDNNNode> ol(nodesHolder->nodes["Generic"](layer, eng, w_cache));
if (ol != nullptr && ol->created(extMgr))
newNode = ol.release();
}
if (newNode == nullptr) {
for (auto maker : nodesHolder->nodes) {
std::unique_ptr<MKLDNNNode> ol(maker.second(layer, eng, w_cache));
if (ol != nullptr && ol->created(extMgr)) {
newNode = ol.release();
break;
}
}
}
// WA-start : TI node requires all attributes to construct internal subgpath
// including extManager, socket and mkldnn::eng.
#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE)
MKLDNNTensorIteratorNode *ti = dynamic_cast<MKLDNNTensorIteratorNode*>(newNode);
if (ti != nullptr)
ti->setExtManager(extMgr);
#endif
// WA-end
if (!newNode)
THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name;
return newNode;
}
bool MKLDNNNode::isEdgesEmpty(const std::vector<MKLDNNEdgeWeakPtr>& edges) const {
for (auto &edge : edges) {
if (edge.lock())
@ -1157,3 +1116,44 @@ Layout MKLDNNNode::getWeightsLayoutByDims(SizeVector dims, bool isGrouped) {
void MKLDNNNode::appendPostOps(mkldnn::post_ops& ops) {
THROW_IE_EXCEPTION << "Fusing of " << this->getType() << " operation is not implemented";
}
MKLDNNNode* MKLDNNNode::Factory::create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache) {
MKLDNNNode *newNode = nullptr;
auto builder = builders.find(Generic);
if (builder != builders.end()) {
std::unique_ptr<MKLDNNNode> ol(builder->second(layer, eng, w_cache));
if (ol != nullptr && ol->created(extMgr))
newNode = ol.release();
}
if (newNode == nullptr) {
builder = builders.find(TypeFromName(layer->type));
if (builder != builders.end()) {
std::unique_ptr<MKLDNNNode> ol(builder->second(layer, eng, w_cache));
if (ol != nullptr && ol->created(extMgr))
newNode = ol.release();
}
}
// WA-start : TI node requires all attributes to construct internal subgpath
// including extManager, socket and mkldnn::eng.
#if defined (COMPILED_CPU_MKLDNN_TENSORITERATOR_NODE)
MKLDNNTensorIteratorNode *ti = dynamic_cast<MKLDNNTensorIteratorNode*>(newNode);
if (ti != nullptr)
ti->setExtManager(extMgr);
#endif
// WA-end
if (!newNode)
THROW_IE_EXCEPTION << "Unsupported primitive of type: " << layer->type << " name: " << layer->name;
return newNode;
}
void MKLDNNNode::Factory::registerNode(Type type, builder_t builder) {
builders[type] = builder;
}

View File

@ -8,9 +8,9 @@
#include <memory>
#include <vector>
#include <string>
#include <map>
#include <cassert>
#include <algorithm>
#include <caseless.hpp>
#include <ie_common.h>
#include "mkldnn_dims.h"
#include "mkldnn_memory.h"
@ -28,12 +28,6 @@ namespace MKLDNNPlugin {
using MKLDNNNodePtr = std::shared_ptr<MKLDNNNode>;
using MKLDNNNodeWeakPtr = std::weak_ptr<MKLDNNNode>;
using CreatorByLayerFunction = std::function<MKLDNNNode *(const InferenceEngine::CNNLayerPtr& layer,
const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)>;
struct MKLDNNNodesHolder {
std::map<std::string, CreatorByLayerFunction> nodes;
};
enum Type {
Unknown,
Generic,
@ -266,11 +260,11 @@ private:
class MKLDNNNode : public InferenceEngine::details::no_copy {
public:
static void AddNode(const std::string& name, CreatorByLayerFunction factory);
static std::shared_ptr<MKLDNNNodesHolder> GetNodesHolder();
class Factory;
template<typename To>
class Registrar;
static MKLDNNNode* CreateNode(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache);
static Factory & factory();
~MKLDNNNode() override = default;
@ -483,20 +477,6 @@ public:
return desc.outputNumbers();
}
template<typename To>
class Register {
public:
explicit Register(const std::string& type) {
MKLDNNNode::AddNode(type,
[](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
MKLDNNWeightsSharing::Ptr &w_cache)
-> MKLDNNNode* {
return new To(layer, eng, w_cache);
});
}
};
protected:
// TODO: It is necessary only in order to avoid modifications of cnnLayers and original topology
std::vector<MKLDNNDims> outDims;
@ -610,8 +590,39 @@ private:
ConstantType checkConstant(LOOK look, std::vector<MKLDNNNodePtr>& checkNodes);
};
class MKLDNNNode::Factory : InferenceEngine::details::no_copy {
public:
using builder_t = std::function<MKLDNNNode *(const InferenceEngine::CNNLayerPtr& layer,
const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &w_cache)>;
MKLDNNNode* create(const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
const MKLDNNExtensionManager::Ptr& extMgr, MKLDNNWeightsSharing::Ptr &w_cache);
void registerNode(Type type, builder_t builder);
private:
using map_t = std::unordered_map<Type, builder_t,
std::hash<std::underlying_type<MKLDNNPlugin::Type>::type>>;
map_t builders;
};
template<typename To>
class MKLDNNNode::Registrar {
public:
explicit Registrar(Type type) {
MKLDNNNode::factory().registerNode(type,
[type](const InferenceEngine::CNNLayerPtr& layer, const mkldnn::engine& eng,
MKLDNNWeightsSharing::Ptr &w_cache) -> MKLDNNNode* {
MKLDNNNode *node = new To(layer, eng, w_cache);
return node;
});
}
};
#define REG_MKLDNN_CONCAT2(X, Y) X ## Y
#define REG_MKLDNN_CONCAT(X, Y) REG_MKLDNN_CONCAT2(X, Y)
#define REG_MKLDNN_PRIM_FOR(__prim, __type) \
static MKLDNNNode::Register<__prim> __reg__##__type(#__type)
static MKLDNNNode::Registrar<__prim> REG_MKLDNN_CONCAT(_reg_, __LINE__)(__type)
template <typename T, typename U>
inline T div_up(const T a, const U b) {

View File

@ -294,7 +294,7 @@ void Engine::QueryNetwork(const ICNNNetwork& network, const std::map<std::string
auto layerIsSupported = [&] {
std::unique_ptr<MKLDNNNode> ptr;
try {
ptr.reset(MKLDNNNode::CreateNode(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
ptr.reset(MKLDNNNode::factory().create(*itLayer, {mkldnn::engine::kind::cpu, 0}, extensionManager, fake_w_cache));
} catch (InferenceEngine::details::InferenceEngineException&) {
return false;
}
@ -339,7 +339,7 @@ void Engine::QueryNetwork(const ICNNNetwork& network, const std::map<std::string
try {
mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
// if we can create and have not thrown exception, then layer is supported
std::unique_ptr <MKLDNNNode>(MKLDNNNode::CreateNode(*i, eng, extensionManager, fake_w_cache));
std::unique_ptr <MKLDNNNode>(MKLDNNNode::factory().create(*i, eng, extensionManager, fake_w_cache));
res.supportedLayersMap.insert({ (*i)->name, GetName() });
} catch (InferenceEngine::details::InferenceEngineException&) {
}

View File

@ -655,4 +655,4 @@ void MKLDNNConcatNode::execute(mkldnn::stream strm) {
}
}
REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concat);
REG_MKLDNN_PRIM_FOR(MKLDNNConcatNode, Concatenation);

View File

@ -29,7 +29,7 @@ public:
}
private:
static Register<MKLDNNDeformableConvolutionNode> reg;
static Registrar<MKLDNNDeformableConvolutionNode> reg;
bool withBiases = false;
bool isDW = false;
bool isMerged = false;

View File

@ -200,3 +200,4 @@ void MKLDNNInputNode::execute(mkldnn::stream strm) {
}
REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Input);
REG_MKLDNN_PRIM_FOR(MKLDNNInputNode, Output);

View File

@ -92,4 +92,4 @@ void MKLDNNLrnNode::createDescriptor(const std::vector<InferenceEngine::TensorDe
new lrn_forward::desc(prop_kind::forward_scoring, alg, in_candidate, size, alpha, beta, k)));
descs.push_back(desc);
}
REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, LRN);
REG_MKLDNN_PRIM_FOR(MKLDNNLrnNode, Lrn);

View File

@ -84,7 +84,7 @@ class MKLDNNMemoryOutputNode : public MKLDNNNode, public MKLDNNMemoryNode {
* @brief keeps reference to input sibling node
*/
MKLDNNNode* inputNode = nullptr;
static Register<MKLDNNMemoryOutputNode> reg;
static Registrar<MKLDNNMemoryOutputNode> reg;
MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr;
};
@ -106,7 +106,7 @@ public:
MKLDNNMemoryPtr getStore();
private:
MKLDNNMemoryPtr dataStore;
static Register<MKLDNNMemoryInputNode> reg;
static Registrar<MKLDNNMemoryInputNode> reg;
MKLDNNMemoryNodeVirtualEdge::Holder* holder = nullptr;
};
#endif

View File

@ -69,3 +69,4 @@ bool MKLDNNReshapeNode::created() const {
return getType() == Reshape || getType() == Flatten;
}
REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Reshape);
REG_MKLDNN_PRIM_FOR(MKLDNNReshapeNode, Flatten);

View File

@ -523,5 +523,6 @@ void MKLDNNRNN::execute(mkldnn::stream strm) {
strm.submit({exec_after.begin(), exec_after.end()});
}
REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNN);
REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNCell);
REG_MKLDNN_PRIM_FOR(MKLDNNRNN, RNNSeq);
} // namespace MKLDNNPlugin

View File

@ -112,4 +112,4 @@ void MKLDNNROIPoolingNode::createDescriptor(const std::vector<InferenceEngine::T
spatial_scale)));
descs.push_back(desc);
}
REG_MKLDNN_PRIM_FOR(MKLDNNROIPoolingNode, RoiPooling);
REG_MKLDNN_PRIM_FOR(MKLDNNROIPoolingNode, ROIPooling);

View File

@ -123,4 +123,4 @@ void MKLDNNSoftMaxNode::createDescriptor(const std::vector<InferenceEngine::Tens
new softmax_forward::desc(prop_kind::forward_scoring, in_candidate, axis)));
descs.push_back(desc);
}
REG_MKLDNN_PRIM_FOR(MKLDNNSoftMaxNode, Softmax);
REG_MKLDNN_PRIM_FOR(MKLDNNSoftMaxNode, SoftMax);

View File

@ -15,12 +15,13 @@
using namespace mkldnn;
using namespace MKLDNNPlugin;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
namespace MKLDNNPlugin {
static LayerConfig make_plain_config(const CNNLayerPtr &layer) {
static InferenceEngine::LayerConfig make_plain_config(const InferenceEngine::CNNLayerPtr &layer) {
using namespace InferenceEngine;
LayerConfig config;
for (const auto &in_w : layer->insData) {
@ -50,7 +51,7 @@ static LayerConfig make_plain_config(const CNNLayerPtr &layer) {
class PortIteratorHelper : public PortMapHelper {
public:
PortIteratorHelper(const MKLDNNMemoryPtr &from, const MKLDNNMemoryPtr &to,
bool as_input, const TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) {
bool as_input, const InferenceEngine::TensorIterator::PortMap &port_map, const mkldnn::engine& eng, int n_iter) : as_input(as_input) {
const auto &full_blob = as_input ? from : to;
const auto &part_blob = !as_input ? from : to;
@ -147,7 +148,7 @@ MKLDNNTensorIteratorNode::MKLDNNTensorIteratorNode(InferenceEngine::CNNLayerPtr
MKLDNNNode(layer, eng, cache) {}
void MKLDNNTensorIteratorNode::getSupportedDescriptors() {
auto *ti = dynamic_cast<class TensorIterator*>(getCnnLayer().get());
auto *ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());
if (ti == nullptr)
THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer.";
@ -189,7 +190,7 @@ void MKLDNNTensorIteratorNode::initSupportedPrimitiveDescriptors() {
void MKLDNNTensorIteratorNode::createPrimitive() {
auto ti = dynamic_cast<class TensorIterator*>(getCnnLayer().get());
auto ti = dynamic_cast<class InferenceEngine::TensorIterator*>(getCnnLayer().get());
if (ti == nullptr)
THROW_IE_EXCEPTION << "Cannot convert to TensorIterator layer.";

View File

@ -482,7 +482,7 @@ TEST_F(MKLDNNGraphGenericTests, canGetPrimitiveDescriptorsList) {
mkldnn::engine eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0));
MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layerPtr, eng, extMgr, cache));
node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layerPtr, eng, extMgr, cache));
ASSERT_EQ(MKLDNNPlugin::Type::Generic, node->getType());
ASSERT_NO_THROW(node->getSupportedDescriptors());

View File

@ -24,7 +24,7 @@ TEST_F(MKLDNNGraphReorderTests, cannotCreatePrimitiveDescriprorsWithoutOtherLaye
InferenceEngine::CNNLayerPtr layer(new InferenceEngine::CNNLayer({"TestReorder", "Reorder", InferenceEngine::Precision::FP32}));
MKLDNNPlugin::MKLDNNWeightsSharing::Ptr cache;
node.reset(MKLDNNPlugin::MKLDNNNode::CreateNode(layer, eng, {}, cache));
node.reset(MKLDNNPlugin::MKLDNNNode::factory().create(layer, eng, {}, cache));
ASSERT_EQ(MKLDNNPlugin::Type::Reorder, node->getType());
ASSERT_THROW(node->getSupportedDescriptors(), InferenceEngine::details::InferenceEngineException);

View File

@ -33,7 +33,7 @@ protected:
TEST_F(MKLDNNPrimitiveTest, DISABLED_canDeleteWeightInweitableLayer) {
//simulate how convlayer gets created
engine e(engine::cpu, 0);
//auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::CreateNode(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, ""));
//auto node = MKLDNNPlugin::MKLDNNNodePtr(MKLDNNPlugin::MKLDNNNode::factory().create(MKLDNNPlugin::Generic, InferenceEngine::Precision::FP32, ""));
// ChildConv *conv = new ChildConv(e);
// EXPECT_CALL(*conv, die()).Times(1);