[CPU] Changed math to mkldnn_math_node (#5617)

This commit is contained in:
Egor Shulman
2021-05-20 10:00:38 +03:00
committed by GitHub
parent dcc288680b
commit 98c35d6a08
6 changed files with 327 additions and 338 deletions

View File

@@ -178,7 +178,30 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
{ "Select", Select},
{ "ShuffleChannels", ShuffleChannels},
{ "DFT", DFT},
{ "IDFT", DFT}
{ "IDFT", DFT},
{ "Abs", Math},
{ "Acos", Math},
{ "Acosh", Math},
{ "Asin", Math},
{ "Asinh", Math},
{ "Atan", Math},
{ "Atanh", Math},
{ "Ceil", Math},
{ "Ceiling", Math},
{ "Cos", Math},
{ "Cosh", Math},
{ "Floor", Math},
{ "HardSigmoid", Math},
{ "Log", Math},
{ "Neg", Math},
{ "Reciprocal", Math},
{ "Selu", Math},
{ "Sign", Math},
{ "Sin", Math},
{ "Sinh", Math},
{ "SoftPlus", Math},
{ "Softsign", Math},
{ "Tan", Math},
};
Type TypeFromName(const std::string type) {

View File

@@ -93,6 +93,7 @@ enum Type {
Reference,
ShuffleChannels,
DFT,
Math,
};
Type TypeFromName(const std::string type);
@@ -209,6 +210,8 @@ static std::string NameFromType(Type type) {
return "ShuffleChannels";
case DFT:
return "DFT";
case Math:
return "Math";
default:
return "Unknown";
}

View File

@@ -8,28 +8,6 @@
#endif
MKLDNN_EXTENSION_NODE(CTCLossImpl, CTCLoss);
MKLDNN_EXTENSION_NODE(MathImpl, Abs);
MKLDNN_EXTENSION_NODE(MathImpl, Acos);
MKLDNN_EXTENSION_NODE(MathImpl, Acosh);
MKLDNN_EXTENSION_NODE(MathImpl, Asin);
MKLDNN_EXTENSION_NODE(MathImpl, Asinh);
MKLDNN_EXTENSION_NODE(MathImpl, Atan);
MKLDNN_EXTENSION_NODE(MathImpl, Atanh);
MKLDNN_EXTENSION_NODE(MathImpl, Ceil);
MKLDNN_EXTENSION_NODE(MathImpl, Ceiling);
MKLDNN_EXTENSION_NODE(MathImpl, Cos);
MKLDNN_EXTENSION_NODE(MathImpl, Cosh);
MKLDNN_EXTENSION_NODE(MathImpl, Floor);
MKLDNN_EXTENSION_NODE(MathImpl, HardSigmoid);
MKLDNN_EXTENSION_NODE(MathImpl, Log);
MKLDNN_EXTENSION_NODE(MathImpl, Neg);
MKLDNN_EXTENSION_NODE(MathImpl, Reciprocal);
MKLDNN_EXTENSION_NODE(MathImpl, Selu);
MKLDNN_EXTENSION_NODE(MathImpl, Sign);
MKLDNN_EXTENSION_NODE(MathImpl, Sin);
MKLDNN_EXTENSION_NODE(MathImpl, Sinh);
MKLDNN_EXTENSION_NODE(MathImpl, Softsign);
MKLDNN_EXTENSION_NODE(MathImpl, Tan);
MKLDNN_EXTENSION_NODE(ExperimentalDetectronTopKROIsImpl, ExperimentalDetectronTopKROIs);
MKLDNN_EXTENSION_NODE(ExtractImagePatchesImpl, ExtractImagePatches);
MKLDNN_EXTENSION_NODE(ReverseSequenceImpl, ReverseSequence);

View File

@@ -1,315 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "base.hpp"
#include <cmath>
#include <string>
#include <vector>
#include <cassert>
#include "ie_parallel.hpp"
#include "common/tensor_desc_creator.h"
#include "utils/general_utils.h"
#include <ngraph/ops.hpp>
namespace InferenceEngine {
namespace Extensions {
namespace Cpu {
using MKLDNNPlugin::TensorDescCreatorTypes;
class MathImpl: public ExtLayerBase {
public:
bool isSupportedOperation(const std::shared_ptr<const ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (initializers.find(op->get_type_info()) == initializers.end()) {
errorMessage = "Unsupported Math layer type.";
return false;
}
if (MKLDNNPlugin::one_of(op->get_type_info(),
ngraph::op::v0::HardSigmoid::type_info,
ngraph::op::v0::Selu::type_info)) {
auto firstConst = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto secondConst = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!firstConst || !secondConst) {
errorMessage = "Constant expected as the second and third inputs.";
return false;
}
}
} catch (...) {
return false;
}
return true;
}
explicit MathImpl(const std::shared_ptr<ngraph::Node>& op) :
alpha(0.f), beta(0.f), gamma(0.f) {
try {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage;
}
initializers[op->get_type_info()](op, *this);
if (MKLDNNPlugin::one_of(op->get_type_info(),
ngraph::op::v0::HardSigmoid::type_info,
ngraph::op::v0::Selu::type_info)) {
addConfig(op, {{TensorDescCreatorTypes::ncsp, Precision::FP32},
{TensorDescCreatorTypes::ncsp, Precision::FP32},
{TensorDescCreatorTypes::ncsp, Precision::FP32}},
{{TensorDescCreatorTypes::ncsp, Precision::FP32}});
} else {
addConfig(op, {{TensorDescCreatorTypes::ncsp, Precision::FP32}},
{{TensorDescCreatorTypes::ncsp, Precision::FP32}});
}
} catch (InferenceEngine::Exception &ex) {
errorMsg = ex.what();
throw;
}
}
StatusCode execute(std::vector<Blob::Ptr>& inputs, std::vector<Blob::Ptr>& outputs, ResponseDesc *resp) noexcept override {
size_t dataSize = outputs[0]->size();
const float *src_data = inputs[0]->cbuffer().as<const float *>() +
inputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
float* dst_data = outputs[0]->cbuffer().as<float *>() +
outputs[0]->getTensorDesc().getBlockingDesc().getOffsetPadding();
switch (getAlgorithm()) {
case MKLDNNPlugin::MathAbs:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = (std::abs)(src_data[i]);
});
break;
case MKLDNNPlugin::MathAcos:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = acosf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAcosh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = acoshf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAsin:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = asinf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAsinh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = asinhf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAtan:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = atanf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAtanh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = atanhf(src_data[i]);
});
break;
case MKLDNNPlugin::MathCeiling:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = ceilf(src_data[i]);
});
break;
case MKLDNNPlugin::MathCos:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = cosf(src_data[i]);
});
break;
case MKLDNNPlugin::MathCosh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = coshf(src_data[i]);
});
break;
case MKLDNNPlugin::MathFloor:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = floorf(src_data[i]);
});
break;
case MKLDNNPlugin::MathHardSigmoid:
alpha = (alpha == 0.0f) ? 0.2f : alpha;
beta = (beta == 0.0f) ? 0.5f : beta;
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = (std::max)(0.f, (std::min)(1.f, alpha * src_data[i] + beta));
});
break;
case MKLDNNPlugin::MathLog:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = logf(src_data[i]);
});
break;
case MKLDNNPlugin::MathNegative:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = -src_data[i];
});
break;
case MKLDNNPlugin::MathReciprocal:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = 1.0f / src_data[i];
});
break;
case MKLDNNPlugin::MathSelu:
alpha = (alpha == 0.0f) ? 1.67326f : alpha;
gamma = (gamma == 0.0f) ? 1.0507f : gamma;
parallel_for(dataSize, [&](size_t i) {
float x = src_data[i];
dst_data[i] = (x > 0.0f) ? (gamma * x) : (gamma * alpha * (exp(x) - 1.0f));
});
break;
case MKLDNNPlugin::MathSign:
parallel_for(dataSize, [&](size_t i) {
if (src_data[i] > 0.0f)
dst_data[i] = 1.0f;
else if (src_data[i] < 0.0f)
dst_data[i] = -1.0f;
else
dst_data[i] = 0.0f;
});
break;
case MKLDNNPlugin::MathSin:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = sinf(src_data[i]);
});
break;
case MKLDNNPlugin::MathSinh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = sinhf(src_data[i]);
});
break;
case MKLDNNPlugin::MathSoftPlus:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = logf(expf(src_data[i]) + 1);
});
break;
case MKLDNNPlugin::MathSoftsign:
parallel_for(dataSize, [&](size_t i) {
float x = src_data[i];
dst_data[i] = x / (1.f + (std::abs)(x));
});
break;
case MKLDNNPlugin::MathTan:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = tanf(src_data[i]);
});
break;
default:
if (resp) {
std::string errorMsg = "Incorrect Reduce layer type";
errorMsg.copy(resp->msg, sizeof(resp->msg) - 1);
}
return GENERAL_ERROR;
}
return OK;
}
private:
static std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_ptr<ngraph::Node>&, MathImpl& node)>> initializers;
float alpha = 0.0f;
float beta = 0.0f;
float gamma = 0.0f;
};
std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_ptr<ngraph::Node>&, MathImpl& node)>> MathImpl::initializers = {
{ngraph::op::v0::Abs::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathAbs;
}},
{ngraph::op::v0::Acos::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathAcos;
}},
{ngraph::op::v3::Acosh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathAcosh;
}},
{ngraph::op::v0::Asin::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathAsin;
}},
{ngraph::op::v3::Asinh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathAsinh;
}},
{ngraph::op::v0::Atan::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathAtan;
}},
{ngraph::op::v0::Ceiling::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathCeiling;
}},
{ngraph::op::v0::Cos::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathCos;
}},
{ngraph::op::v0::Cosh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathCosh;
}},
{ngraph::op::v0::Floor::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathFloor;
}},
{ngraph::op::v0::HardSigmoid::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathHardSigmoid;
node.alpha = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1))->cast_vector<float>()[0];
node.beta = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2))->cast_vector<float>()[0];
}},
{ngraph::op::v0::Log::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathLog;
}},
{ngraph::op::v0::Negative::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathNegative;
}},
{ngraph::op::v0::Selu::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathSelu;
node.alpha = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1))->cast_vector<float>()[0];
node.gamma = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2))->cast_vector<float>()[0];
}},
{ngraph::op::v0::Sign::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathSign;
}},
{ngraph::op::v0::Sin::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathSin;
}},
{ngraph::op::v0::Sinh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathSinh;
}},
{ngraph::op::v4::SoftPlus::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathSoftPlus;
}},
{ngraph::op::v0::Tan::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathTan;
}},
{ngraph::op::v3::Atanh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MathImpl& node) {
node.algorithm = MKLDNNPlugin::MathAtanh;
}}
};
REG_FACTORY_FOR(MathImpl, Abs);
REG_FACTORY_FOR(MathImpl, Acos);
REG_FACTORY_FOR(MathImpl, Acosh);
REG_FACTORY_FOR(MathImpl, Asin);
REG_FACTORY_FOR(MathImpl, Asinh);
REG_FACTORY_FOR(MathImpl, Atan);
REG_FACTORY_FOR(MathImpl, Atanh);
REG_FACTORY_FOR(MathImpl, Ceil);
REG_FACTORY_FOR(MathImpl, Ceiling);
REG_FACTORY_FOR(MathImpl, Cos);
REG_FACTORY_FOR(MathImpl, Cosh);
REG_FACTORY_FOR(MathImpl, Floor);
REG_FACTORY_FOR(MathImpl, HardSigmoid);
REG_FACTORY_FOR(MathImpl, Log);
REG_FACTORY_FOR(MathImpl, Neg);
REG_FACTORY_FOR(MathImpl, Reciprocal);
REG_FACTORY_FOR(MathImpl, Selu);
REG_FACTORY_FOR(MathImpl, Sign);
REG_FACTORY_FOR(MathImpl, Sin);
REG_FACTORY_FOR(MathImpl, Sinh);
REG_FACTORY_FOR(MathImpl, SoftPlus);
REG_FACTORY_FOR(MathImpl, Softsign);
REG_FACTORY_FOR(MathImpl, Tan);
} // namespace Cpu
} // namespace Extensions
} // namespace InferenceEngine

View File

@@ -0,0 +1,265 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <cmath>
#include <vector>
#include <string>
#include <ngraph/ops.hpp>
#include "ie_parallel.hpp"
#include "mkldnn_math_node.h"
#include "utils/general_utils.h"
using namespace MKLDNNPlugin;
using namespace InferenceEngine;
bool MKLDNNMathNode::isSupportedOperation(const std::shared_ptr<ngraph::Node>& op, std::string& errorMessage) noexcept {
try {
if (initializers.find(op->get_type_info()) == initializers.end()) {
errorMessage = "Unsupported Math layer type.";
return false;
}
if (MKLDNNPlugin::one_of(op->get_type_info(), ngraph::op::v0::HardSigmoid::type_info, ngraph::op::v0::Selu::type_info)) {
auto firstConst = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1));
auto secondConst = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!firstConst || !secondConst) {
errorMessage = "Constant expected as the second and third inputs.";
return false;
}
}
} catch (...) {
return false;
}
return true;
}
MKLDNNMathNode::MKLDNNMathNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng,
MKLDNNWeightsSharing::Ptr &cache) : MKLDNNNode(op, eng, cache), alpha(0.f), beta(0.f), gamma(0.f) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
IE_THROW(NotImplemented) << errorMessage;
}
initializers[op->get_type_info()](op, *this);
size_t sizeVector = op->get_input_size();
inDataConf.reserve(sizeVector);
for (int i = 0; i < sizeVector; ++i)
inDataConf.emplace_back(TensorDescCreatorTypes::ncsp, Precision::FP32);
}
void MKLDNNMathNode::initSupportedPrimitiveDescriptors() {
if (!supportedPrimitiveDescriptors.empty())
return;
addSupportedPrimDesc(inDataConf,
{{TensorDescCreatorTypes::ncsp, Precision::FP32}},
impl_desc_type::ref_any);
}
void MKLDNNMathNode::execute(mkldnn::stream strm) {
size_t dataSize = getChildEdgeAt(0)->getBlob()->size();
const float *src_data = reinterpret_cast<const float *>(getParentEdgeAt(0)->getMemoryPtr()->GetPtr());
float* dst_data = reinterpret_cast<float *>(getChildEdgeAt(0)->getMemoryPtr()->GetPtr());
switch (getAlgorithm()) {
case MKLDNNPlugin::MathAbs:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = (std::abs)(src_data[i]);
});
break;
case MKLDNNPlugin::MathAcos:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = acosf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAcosh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = acoshf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAsin:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = asinf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAsinh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = asinhf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAtan:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = atanf(src_data[i]);
});
break;
case MKLDNNPlugin::MathAtanh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = atanhf(src_data[i]);
});
break;
case MKLDNNPlugin::MathCeiling:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = ceilf(src_data[i]);
});
break;
case MKLDNNPlugin::MathCos:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = cosf(src_data[i]);
});
break;
case MKLDNNPlugin::MathCosh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = coshf(src_data[i]);
});
break;
case MKLDNNPlugin::MathFloor:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = floorf(src_data[i]);
});
break;
case MKLDNNPlugin::MathHardSigmoid:
alpha = (alpha == 0.0f) ? 0.2f : alpha;
beta = (beta == 0.0f) ? 0.5f : beta;
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = (std::max)(0.f, (std::min)(1.f, alpha * src_data[i] + beta));
});
break;
case MKLDNNPlugin::MathLog:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = logf(src_data[i]);
});
break;
case MKLDNNPlugin::MathNegative:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = -src_data[i];
});
break;
case MKLDNNPlugin::MathReciprocal:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = 1.0f / src_data[i];
});
break;
case MKLDNNPlugin::MathSelu:
alpha = (alpha == 0.0f) ? 1.67326f : alpha;
gamma = (gamma == 0.0f) ? 1.0507f : gamma;
parallel_for(dataSize, [&](size_t i) {
float x = src_data[i];
dst_data[i] = (x > 0.0f) ? (gamma * x) : (gamma * alpha * (exp(x) - 1.0f));
});
break;
case MKLDNNPlugin::MathSign:
parallel_for(dataSize, [&](size_t i) {
if (src_data[i] > 0.0f)
dst_data[i] = 1.0f;
else if (src_data[i] < 0.0f)
dst_data[i] = -1.0f;
else
dst_data[i] = 0.0f;
});
break;
case MKLDNNPlugin::MathSin:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = sinf(src_data[i]);
});
break;
case MKLDNNPlugin::MathSinh:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = sinhf(src_data[i]);
});
break;
case MKLDNNPlugin::MathSoftPlus:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = logf(expf(src_data[i]) + 1);
});
break;
case MKLDNNPlugin::MathSoftsign:
parallel_for(dataSize, [&](size_t i) {
float x = src_data[i];
dst_data[i] = x / (1.f + (std::abs)(x));
});
break;
case MKLDNNPlugin::MathTan:
parallel_for(dataSize, [&](size_t i) {
dst_data[i] = tanf(src_data[i]);
});
break;
default:
IE_THROW() << "Incorrect Reduce layer type";
}
}
bool MKLDNNMathNode::created() const {
return getType() == Math;
}
std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_ptr<ngraph::Node>&, MKLDNNMathNode& node)>> MKLDNNMathNode::initializers {
{ngraph::op::v0::Abs::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathAbs;
}},
{ngraph::op::v0::Acos::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathAcos;
}},
{ngraph::op::v3::Acosh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathAcosh;
}},
{ngraph::op::v0::Asin::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathAsin;
}},
{ngraph::op::v3::Asinh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathAsinh;
}},
{ngraph::op::v0::Atan::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathAtan;
}},
{ngraph::op::v0::Ceiling::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathCeiling;
}},
{ngraph::op::v0::Cos::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathCos;
}},
{ngraph::op::v0::Cosh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathCosh;
}},
{ngraph::op::v0::Floor::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathFloor;
}},
{ngraph::op::v0::HardSigmoid::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathHardSigmoid;
node.alpha = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1))->cast_vector<float>()[0];
node.beta = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2))->cast_vector<float>()[0];
}},
{ngraph::op::v0::Log::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathLog;
}},
{ngraph::op::v0::Negative::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathNegative;
}},
{ngraph::op::v0::Selu::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathSelu;
node.alpha = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(1))->cast_vector<float>()[0];
node.gamma = ngraph::as_type_ptr<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2))->cast_vector<float>()[0];
}},
{ngraph::op::v0::Sign::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathSign;
}},
{ngraph::op::v0::Sin::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathSin;
}},
{ngraph::op::v0::Sinh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathSinh;
}},
{ngraph::op::v4::SoftPlus::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathSoftPlus;
}},
{ngraph::op::v0::Tan::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathTan;
}},
{ngraph::op::v3::Atanh::type_info, [](const std::shared_ptr<ngraph::Node>& op, MKLDNNMathNode& node) {
node.algorithm = MKLDNNPlugin::MathAtanh;
}}
};
REG_MKLDNN_PRIM_FOR(MKLDNNMathNode, Math);

View File

@@ -0,0 +1,35 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_common.h>
#include <mkldnn_node.h>
namespace MKLDNNPlugin {
class MKLDNNMathNode : public MKLDNNNode {
public:
MKLDNNMathNode(const std::shared_ptr<ngraph::Node>& op, const mkldnn::engine& eng, MKLDNNWeightsSharing::Ptr &cache);
void getSupportedDescriptors() override {};
void initSupportedPrimitiveDescriptors() override;
void createPrimitive() override {};
void execute(mkldnn::stream strm) override;
bool created() const override;
static bool isSupportedOperation(const std::shared_ptr<ngraph::Node>& op, std::string& errorMessage) noexcept;
private:
static std::map<const ngraph::DiscreteTypeInfo, std::function<void(const std::shared_ptr<ngraph::Node>&, MKLDNNMathNode& node)>> initializers;
float alpha = 0.0f;
float beta = 0.0f;
float gamma = 0.0f;
std::vector<DataConfigurator> inDataConf;
std::string errorPrefix;
};
} // namespace MKLDNNPlugin