[CPU] Added eltwise Round-5 (#2347)

This commit is contained in:
Alexandra Sidorova 2020-11-02 18:44:50 +03:00 committed by GitHub
parent f2963713d0
commit 5596c7c841
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 76 additions and 54 deletions

View File

@ -600,7 +600,8 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndActivation(MKLDNNGraph &graph) {
return eltwiseNode &&
(eltwiseNode->getOpType() == Relu ||
(conv->getCnnLayer()->precision == Precision::FP32 &&
IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})));
IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid,
Round})));
};
for (int i = 0; i < graphNodes.size(); i++) {
@ -678,7 +679,8 @@ void MKLDNNGraphOptimizer::FuseFullyConnectedAndSimpleOperation(MKLDNNGraph &gra
if (eltwiseNode == nullptr)
THROW_IE_EXCEPTION << "Cannot get Eltwise node " << childNode->getName();
if (IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})) {
if (IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish,
Hsigmoid, Round})) {
return true;
} else if (IsOneOf(eltwiseNode->getOpType(), {MulAdd, Prelu})) {
if (eltwiseNode->getOpType() == MulAdd && eltwiseNode->getCnnLayer()->blobs.size() != 2)
@ -1044,7 +1046,8 @@ void MKLDNNGraphOptimizer::FuseConvolutionAndSimpleOperation(MKLDNNGraph &graph)
return ((eltwiseNode->getOpType() == MulAdd && node->getCnnLayer()->blobs.size() == 2) ||
(eltwiseNode->getOpType() == Prelu) ||
IsOneOf(eltwiseNode->getOpType(), {Relu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid}));
IsOneOf(eltwiseNode->getOpType(), {Relu, Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish,
Hsigmoid, Round}));
}
return false;
@ -1258,7 +1261,8 @@ void MKLDNNGraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(MKLDNNG
return eltwiseNode &&
(eltwiseNode->getOpType() == Relu ||
(conv->getCnnLayer()->precision == Precision::FP32 &&
IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid})));
IsOneOf(eltwiseNode->getOpType(), {Elu, Logistic, BoundedRelu, Clamp, Swish, Hswish, Mish, Hsigmoid,
Round})));
};
for (auto &graphNode : graphNodes) {
@ -1611,7 +1615,7 @@ void MKLDNNGraphOptimizer::FuseNormalizeAndSimpleOperation(MKLDNNGraph &graph) {
if (eltwiseNode == nullptr)
THROW_IE_EXCEPTION << "Cannot get Eltwise node " << node->getName();
return IsOneOf(eltwiseNode->getOpType(), {Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp, Tanh, Swish,
Hswish, Mish, Hsigmoid, Linear, Abs, Square, Sqrt}) ||
Hswish, Mish, Hsigmoid, Round, Linear, Abs, Square, Sqrt}) ||
((eltwiseNode->getOpType() == MulAdd && eltwiseNode->getCnnLayer()->blobs.size() == 2) ||
(eltwiseNode->getOpType() == Prelu));
}

View File

@ -75,6 +75,7 @@ static const InferenceEngine::details::caseless_unordered_map<std::string, Type>
{ "HSwish", Eltwise },
{ "Mish", Eltwise },
{ "HSigmoid", Eltwise },
{ "Round", Eltwise },
{ "ScaleShift", Eltwise },
{ "PReLU", Eltwise },
{ "Norm", Lrn },

View File

@ -312,7 +312,8 @@ private:
auto& eltwiseNode = dynamic_cast<const MKLDNNEltwiseNode&>(node);
switch (eltwiseNode.getOpType()) {
case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
case Mish: case Hsigmoid: case Round:
return jit_mkldnn_emitter::get_supported_precisions();
case Add: return jit_add_emitter::get_supported_precisions();
case MulAdd: return jit_mul_add_emitter::get_supported_precisions();
@ -345,7 +346,8 @@ private:
auto& eltwiseNode = dynamic_cast<const MKLDNNEltwiseNode&>(node);
switch (eltwiseNode.getOpType()) {
case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
case Mish: case Hsigmoid: case Round:
return std::make_shared<jit_mkldnn_emitter>(this, isa, eltwiseNode, exec_prec);
case Add: return std::make_shared<jit_add_emitter>(this, isa, eltwiseNode, exec_prec);
case MulAdd: return std::make_shared<jit_mul_add_emitter>(this, isa, eltwiseNode, exec_prec);
@ -764,6 +766,18 @@ MKLDNNEltwiseNode::initializers = {
opType = Hsigmoid;
algorithm = mkldnn::eltwise_hsigmoid;
}},
{"round", [](GenericLayer* activationLayer, EltwiseOpType& opType, mkldnn::algorithm& algorithm, float& alpha, float& beta) {
alpha = 0.0f;
beta = 0.0f;
opType = Round;
std::string mode = activationLayer->GetParamAsString("mode", "half_to_even");
if (mode == "half_to_even")
algorithm = mkldnn::eltwise_round_half_to_even;
else if (mode == "half_away_from_zero")
algorithm = mkldnn::eltwise_round_half_away_from_zero;
else
THROW_IE_EXCEPTION << "Round layer with name " << activationLayer->name << " doesn't support mode " << mode;
}},
};
void MKLDNNEltwiseNode::init() {
@ -833,7 +847,8 @@ void MKLDNNEltwiseNode::init() {
comparator(layerType, "swish") ||
comparator(layerType, "hswish") ||
comparator(layerType, "mish") ||
comparator(layerType, "hsigmoid")) {
comparator(layerType, "hsigmoid") ||
comparator(layerType, "round")) {
initializers[layerType](getCnnLayer().get(), eltwiseOp, eltwiseAlgorithm, alpha, beta);
} else {
THROW_IE_EXCEPTION << "Unsupported algorithm for Eltwise node with name `" << getName() << "`.";
@ -843,7 +858,8 @@ void MKLDNNEltwiseNode::init() {
size_t MKLDNNEltwiseNode::getOpInputsNum() const {
switch (getOpType()) {
case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt: case PowerStatic:
case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
case Mish: case Hsigmoid: case Round:
case LogicalNot:
return 1;
case Add: case Subtract: case Multiply: case Divide: case FloorMod: case Mod: case Maximum: case Minimum: case SquaredDifference:
@ -1469,7 +1485,8 @@ void MKLDNNEltwiseNode::executeReference(const std::vector<const uint8_t *>& src
switch (getOpType()) {
case Relu: case Gelu: case Elu: case Tanh: case Logistic: case Square: case Abs: case Sqrt:
case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish: case Mish: case Hsigmoid:
case Linear: case BoundedRelu: case SoftRelu: case Relu6: case Exp: case Clamp: case Swish: case Hswish:
case Mish: case Hsigmoid: case Round:
*dst_ptr_f = ref_eltwise_injector->compute_scalar(src_f[0]); break;
case Add: *dst_ptr_f = src_f[0] + src_f[1]; break;
case MulAdd: *dst_ptr_f = src_f[0] * src_f[1] + src_f[2]; break;
@ -1570,6 +1587,8 @@ void MKLDNNEltwiseNode::appendPostOps(mkldnn::post_ops& ops) {
case mkldnn::eltwise_hswish:
case mkldnn::eltwise_mish:
case mkldnn::eltwise_hsigmoid:
case mkldnn::eltwise_round_half_to_even:
case mkldnn::eltwise_round_half_away_from_zero:
ops.append_eltwise(1.0, getAlgorithm(), getAlpha(), getBeta());
break;
case mkldnn::depthwise_scale_shift:

View File

@ -59,7 +59,8 @@ enum EltwiseOpType {
Prelu,
Mish,
Hswish,
Hsigmoid
Hsigmoid,
Round
};
struct jit_eltwise_params {

View File

@ -2123,7 +2123,7 @@ bool MKLDNNInterpolateNode::canFuse(const MKLDNNNodePtr& node) const {
if (eltwiseNode == nullptr)
THROW_IE_EXCEPTION << "Cannot get eltwise node " << node->getName();
return isOneOf(eltwiseNode->getOpType(), {MulAdd, Prelu, Relu, Gelu, Elu, Logistic, BoundedRelu, Clamp,
Tanh, Swish, Hswish, Mish, Hsigmoid, Linear, Abs, Square, Sqrt});
Tanh, Swish, Hswish, Mish, Hsigmoid, Round, Linear, Abs, Square, Sqrt});
}
return false;

View File

@ -23,34 +23,36 @@ const std::vector<InferenceEngine::Precision> netPrecisions = {
};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationTypes = {
{Sigmoid, {}},
{Tanh, {}},
{Relu, {}},
{Exp, {}},
{Log, {}},
{Sign, {}},
{Abs, {}},
{Clamp, {{-2.0f, 2.0f}}},
{Negative, {}},
{Acos, {}},
{Asin, {}},
{Atan, {}},
{Cos, {}},
{Cosh, {}},
{Floor, {}},
{Sin, {}},
{Sinh, {}},
{Sqrt, {}},
{Tan, {}},
{Elu, {{0.1f}}},
{Erf, {}},
{HardSigmoid, {{0.2f, 0.5f}}},
{Selu, {{1.6732f, 1.0507f}}},
{Ceiling, {}},
{Mish, {}},
{HSwish, {}},
{SoftPlus, {}},
{HSigmoid, {}}
{Sigmoid, {}},
{Tanh, {}},
{Relu, {}},
{Exp, {}},
{Log, {}},
{Sign, {}},
{Abs, {}},
{Clamp, {{-2.0f, 2.0f}}},
{Negative, {}},
{Acos, {}},
{Asin, {}},
{Atan, {}},
{Cos, {}},
{Cosh, {}},
{Floor, {}},
{Sin, {}},
{Sinh, {}},
{Sqrt, {}},
{Tan, {}},
{Elu, {{0.1f}}},
{Erf, {}},
{HardSigmoid, {{0.2f, 0.5f}}},
{Selu, {{1.6732f, 1.0507f}}},
{Ceiling, {}},
{Mish, {}},
{HSwish, {}},
{SoftPlus, {}},
{HSigmoid, {}},
{RoundHalfToEven, {}},
{RoundHalfAwayFromZero, {}}
};
const std::map<ActivationTypes, std::vector<std::vector<float>>> activationParamTypes = {

@ -1 +1 @@
Subproject commit d7d8ed46078b637794bc91215e1a982bb0f1683a
Subproject commit 5ef085d5af65e8966e03cdfcbaa65761d61a5c9a

View File

@ -153,13 +153,11 @@ def test_round_even():
assert list(node.get_output_shape(0)) == [3, 10]
assert node.get_output_element_type(0) == Type.f32
# Excluded because this part needs mklddn implementation of Round operation
# Need to uncomment and check when 37651 will be done.
# input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
# expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0]
input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0]
# result = run_op_node([input_tensor], ng.round, "HALF_TO_EVEN")
# assert np.allclose(result, expected)
result = run_op_node([input_tensor], ng.round, "HALF_TO_EVEN")
assert np.allclose(result, expected)
def test_round_away():
@ -172,13 +170,11 @@ def test_round_away():
assert list(node.get_output_shape(0)) == [3, 10]
assert node.get_output_element_type(0) == Type.f32
# Excluded because this part needs mklddn implementation of Round operation
# Need to uncomment and check when 37651 will be done.
# input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
# expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0]
input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0]
# result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO")
# assert np.allclose(result, expected)
result = run_op_node([input_tensor], ng.round, "HALF_AWAY_FROM_ZERO")
assert np.allclose(result, expected)
def test_hsigmoid():

View File

@ -338,7 +338,6 @@ tests_expected_to_fail = [
"OnnxBackendNodeModelTest.test_clip_default_int8_max_cpu"),
(xfail_issue_38091,
"OnnxBackendNodeModelTest.test_gather_negative_indices_cpu",
"OnnxBackendNodeModelTest.test_round_cpu",
"OnnxBackendNodeModelTest.test_mvn_cpu",
"OnnxBackendNodeModelTest.test_elu_example_cpu"),
(xfail_issue_35929,