diff --git a/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md b/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md
index afd35f1cf20..6636736057a 100644
--- a/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md
+++ b/docs/MO_DG/prepare_model/Supported_Frameworks_Layers.md
@@ -150,6 +150,7 @@ Standard TensorFlow\* operations:
| ExpandDims | No |
| ExperimentalSparseWeightedSum | CPU only |
| ExtractImagePatches | No |
+| EuclideanNorm | No |
| Fill | No |
| Floor | No |
| FusedBatchNorm | No |
@@ -365,6 +366,8 @@ Standard ONNX\* operators:
| ROIAlign | No |
| Range | No |
| Reciprocal | No |
+| ReduceL1 | No |
+| ReduceL2 | No |
| ReduceMax | No |
| ReduceMean | No |
| ReduceMin | No |
diff --git a/docs/ops/opset4.md b/docs/ops/opset4.md
index 03994c70187..16a857d43fc 100644
--- a/docs/ops/opset4.md
+++ b/docs/ops/opset4.md
@@ -101,7 +101,8 @@ declared in `namespace opset4`.
* [Range](generation/Range_4.md)
* [ReLU](activation/ReLU_1.md)
* [ReadValue](infrastructure/ReadValue_3.md)
-* [ReduceLp](reduction/ReduceLp_4.md)
+* [ReduceL1](reduction/ReduceL1_4.md)
+* [ReduceL2](reduction/ReduceL2_4.md)
* [ReduceLogicalAnd](reduction/ReduceLogicalAnd_1.md)
* [ReduceLogicalOr](reduction/ReduceLogicalOr_1.md)
* [ReduceMax](reduction/ReduceMax_1.md)
diff --git a/docs/ops/reduction/ReduceLp_4.md b/docs/ops/reduction/ReduceL1_4.md
similarity index 76%
rename from docs/ops/reduction/ReduceLp_4.md
rename to docs/ops/reduction/ReduceL1_4.md
index c0ea0d708dc..d3f9d3160cc 100644
--- a/docs/ops/reduction/ReduceLp_4.md
+++ b/docs/ops/reduction/ReduceL1_4.md
@@ -1,10 +1,10 @@
-## ReduceLp {#openvino_docs_ops_reduction_ReduceLp_4}
+## ReduceLp {#openvino_docs_ops_reduction_ReduceL1_4}
-**Versioned name**: *ReduceLp-4*
+**Versioned name**: *ReduceL1-4*
**Category**: *Reduction*
-**Short description**: *ReduceLp* operation performs reduction with finding the Lp norm of the 1st input tensor in slices specified by the 2nd input.
+**Short description**: *ReduceL1* operation performs reduction with finding the L1 norm (sum of absolute values) of the 1st input tensor in slices specified by the 2nd input.
**Attributes**
@@ -20,9 +20,7 @@
* **1**: Input tensor x of type *T1*. **Required.**
-* **2**: Scalar or 1D tensor of type *T_IND* with axis indices for the 1st input along which reduction is performed. Accepted range is `[-r, r-1]` where where `r` is the rank of input tensor, all values must be unique, repeats are not allowed. **Required.**
-
-* **3**: Scalar of type *T2* with value order `p` of the normalization. Possible values: `1` for L1 or `2` for L2. **Required.**
+* **2**: Scalar or 1D tensor of type *T_IND* with axis indices for the 1st input along which reduction is performed. Accepted range is `[-r, r - 1]` where where `r` is the rank of input tensor, all values must be unique, repeats are not allowed. **Required.**
**Outputs**
@@ -30,17 +28,17 @@
**Types**
-* *T1*: any supported numeric type.
-* *T2*: any supported integer type.
-* *T_IND*: `int64` or `int32`.
+* *T1*: numeric type.
+* *T2*: `int64` or `int32`.
**Detailed Description**
Each element in the output is the result of reduction with finding a Lp norm operation along dimensions specified by the 2nd input:
- `output[i0, i1, ..., iN] = Lp[j0,..., jN](x[j0, ..., jN]))`
+ `output[i0, i1, ..., iN] = L1[j0,..., jN](x[j0, ..., jN]))`
+
+Where indices i0, ..., iN run through all valid indices for the 1st input and finding the Lp norm `L1[j0, ..., jN]` have `jk = ik` for those dimensions `k` that are not in the set of indices specified by the 2nd input of the operation.
-Where indices i0, ..., iN run through all valid indices for the 1st input and finding the Lp norm `Lp[j0, ..., jN]` have `jk = ik` for those dimensions `k` that are not in the set of indices specified by the 2nd input of the operation.
Corner cases:
1. When the 2nd input is an empty list, then this operation does nothing, it is an identity.
@@ -49,7 +47,7 @@ Corner cases:
**Example**
```xml
-
+
@@ -61,10 +59,9 @@ Corner cases:
2
-
-
+
6
12
1
@@ -75,7 +72,7 @@ Corner cases:
```
```xml
-
+
@@ -87,10 +84,9 @@ Corner cases:
2
-
-
+
6
12
@@ -99,7 +95,7 @@ Corner cases:
```
```xml
-
+
@@ -111,10 +107,9 @@ Corner cases:
1
-
-
+
6
10
24
@@ -124,7 +119,7 @@ Corner cases:
```
```xml
-
+
@@ -136,10 +131,9 @@ Corner cases:
1
-
-
+
6
12
24
diff --git a/docs/ops/reduction/ReduceL2_4.md b/docs/ops/reduction/ReduceL2_4.md
new file mode 100644
index 00000000000..918f8cb4d5c
--- /dev/null
+++ b/docs/ops/reduction/ReduceL2_4.md
@@ -0,0 +1,143 @@
+## ReduceLp {#openvino_docs_ops_reduction_ReduceL2_4}
+
+**Versioned name**: *ReduceL2-4*
+
+**Category**: *Reduction*
+
+**Short description**: *ReduceL2* operation performs reduction with finding the L2 norm (square root of sum of squares) of the 1st input tensor in slices specified by the 2nd input.
+
+**Attributes**
+
+* *keep_dims*
+
+ * **Description**: If set to `True` it holds axes that are used for reduction. For each such axis, output dimension is equal to 1.
+ * **Range of values**: True or False
+ * **Type**: `boolean`
+ * **Default value**: False
+ * **Required**: *no*
+
+**Inputs**
+
+* **1**: Input tensor x of type *T1*. **Required.**
+
+* **2**: Scalar or 1D tensor of type *T_IND* with axis indices for the 1st input along which reduction is performed. Accepted range is `[-r, r - 1]` where where `r` is the rank of input tensor, all values must be unique, repeats are not allowed. **Required.**
+
+**Outputs**
+
+* **1**: Tensor of the same type as the 1st input tensor and `shape[i] = shapeOf(input1)[i]` for all `i` that is not in the list of axes from the 2nd input. For dimensions from the 2nd input tensor, `shape[i] == 1` if `keep_dims == True`, or `i`-th dimension is removed from the output otherwise.
+
+**Types**
+
+* *T1*: floating point type.
+* *T2*: `int64` or `int32`.
+
+**Detailed Description**
+
+Each element in the output is the result of reduction with finding a Lp norm operation along dimensions specified by the 2nd input:
+
+ `output[i0, i1, ..., iN] = L2[j0,..., jN](x[j0, ..., jN]))`
+
+Where indices i0, ..., iN run through all valid indices for the 1st input and finding the Lp norm `L2[j0, ..., jN]` have `jk = ik` for those dimensions `k` that are not in the set of indices specified by the 2nd input of the operation.
+
+Corner cases:
+
+1. When the 2nd input is an empty list, then this operation does nothing, it is an identity.
+2. When the 2nd input contains all dimensions of the 1st input, this means that a single reduction scalar value is calculated for entire input tensor.
+
+**Example**
+
+```xml
+
+
+
+
+ 6
+ 12
+ 10
+ 24
+
+
+ 2
+
+
+
+
+ 6
+ 12
+ 1
+ 1
+
+
+
+```
+
+```xml
+
+
+
+
+ 6
+ 12
+ 10
+ 24
+
+
+ 2
+
+
+
+
+ 6
+ 12
+
+
+
+```
+
+```xml
+
+
+
+
+ 6
+ 12
+ 10
+ 24
+
+
+ 1
+
+
+
+
+ 6
+ 10
+ 24
+
+
+
+```
+
+```xml
+
+
+
+
+ 6
+ 12
+ 10
+ 24
+
+
+ 1
+
+
+
+
+ 6
+ 12
+ 24
+
+
+
+```
\ No newline at end of file
diff --git a/inference-engine/src/cldnn_engine/cldnn_engine.cpp b/inference-engine/src/cldnn_engine/cldnn_engine.cpp
index 12bd1705e78..df81cba2b44 100644
--- a/inference-engine/src/cldnn_engine/cldnn_engine.cpp
+++ b/inference-engine/src/cldnn_engine/cldnn_engine.cpp
@@ -23,7 +23,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include
@@ -90,7 +90,9 @@ InferenceEngine::ICNNNetwork::Ptr clDNNEngine::CloneAndTransformNetwork(const In
return std::dynamic_pointer_cast(node) ||
std::dynamic_pointer_cast(node) ||
std::dynamic_pointer_cast(node) ||
- std::dynamic_pointer_cast(node);
+ std::dynamic_pointer_cast(node) ||
+ std::dynamic_pointer_cast(node) ||
+ std::dynamic_pointer_cast(node);
};
auto nGraphFunc = clonedNetwork->getFunction();
// Disable shape inference (WA for generic operations)
diff --git a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp
index cced81480b2..a108f66ed65 100644
--- a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp
+++ b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp
@@ -525,6 +525,34 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr
res->params = params;
return res;
});
+
+ addSpecificCreator({"ReduceMin", "ReduceMax", "ReduceMean", "ReduceProd", "ReduceSum", "ReduceL1", "ReduceL2"},
+ [](const std::shared_ptr<::ngraph::Node>& node, const std::map params) -> CNNLayerPtr {
+ LayerParams attrs = {node->get_friendly_name(), node->description(), details::convertPrecision(node->get_output_element_type(0))};
+ auto reduce_node = std::dynamic_pointer_cast(node);
+ auto res = std::make_shared(attrs);
+ res->params = params;
+ res->params["keep_dims"] = reduce_node->get_keep_dims() ? "True" : "False";
+ return res;
+ });
+
+ addSpecificCreator({"ReduceLogicalAnd"}, [](const std::shared_ptr<::ngraph::Node>& node, const std::map params) -> CNNLayerPtr {
+ LayerParams attrs = {node->get_friendly_name(), "ReduceAnd", details::convertPrecision(node->get_output_element_type(0))};
+ auto reduce_node = std::dynamic_pointer_cast(node);
+ auto res = std::make_shared(attrs);
+ res->params = params;
+ res->params["keep_dims"] = reduce_node->get_keep_dims() ? "True" : "False";
+ return res;
+ });
+
+ addSpecificCreator({"ReduceLogicalOr"}, [](const std::shared_ptr<::ngraph::Node>& node, const std::map params) -> CNNLayerPtr {
+ LayerParams attrs = {node->get_friendly_name(), "ReduceOr", details::convertPrecision(node->get_output_element_type(0))};
+ auto reduce_node = std::dynamic_pointer_cast(node);
+ auto res = std::make_shared(attrs);
+ res->params = params;
+ res->params["keep_dims"] = reduce_node->get_keep_dims() ? "True" : "False";
+ return res;
+ });
}
CNNLayerPtr InferenceEngine::details::CNNLayerCreator::create() {
@@ -613,11 +641,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr>(),
std::make_shared>(),
std::make_shared>(),
- std::make_shared>(),
- std::make_shared>(),
- std::make_shared>(),
- std::make_shared>(),
- std::make_shared>(),
std::make_shared>(),
std::make_shared>(),
std::make_shared>(),
@@ -648,8 +671,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr>(),
std::make_shared>(),
std::make_shared>(),
- std::make_shared>(),
- std::make_shared>(),
std::make_shared>(),
std::make_shared>(),
};
diff --git a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp
index abe61fb7dc9..7c8565a194e 100644
--- a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp
+++ b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp
@@ -1799,66 +1799,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::share
return res;
}
-template <>
-CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
- LayerParams params = {layer->get_friendly_name(), "ReduceMin",
- details::convertPrecision(layer->get_output_element_type(0))};
- auto res = std::make_shared(params);
- auto castedLayer = ngraph::as_type_ptr(layer);
- if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
-
- res->params["keep_dims"] = castedLayer->get_keep_dims() ? "true" : "false";
- return res;
-}
-
-template <>
-CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
- LayerParams params = {layer->get_friendly_name(), "ReduceMax",
- details::convertPrecision(layer->get_output_element_type(0))};
- auto res = std::make_shared(params);
- auto castedLayer = ngraph::as_type_ptr(layer);
- if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
-
- res->params["keep_dims"] = castedLayer->get_keep_dims() ? "true" : "false";
- return res;
-}
-
-template <>
-CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
- LayerParams params = {layer->get_friendly_name(), "ReduceMean",
- details::convertPrecision(layer->get_output_element_type(0))};
- auto res = std::make_shared(params);
- auto castedLayer = ngraph::as_type_ptr(layer);
- if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
-
- res->params["keep_dims"] = castedLayer->get_keep_dims() ? "true" : "false";
- return res;
-}
-
-template <>
-CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
- LayerParams params = {layer->get_friendly_name(), "ReduceProd",
- details::convertPrecision(layer->get_output_element_type(0))};
- auto res = std::make_shared(params);
- auto castedLayer = ngraph::as_type_ptr(layer);
- if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
-
- res->params["keep_dims"] = castedLayer->get_keep_dims() ? "true" : "false";
- return res;
-}
-
-template <>
-CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
- LayerParams params = {layer->get_friendly_name(), "ReduceSum",
- details::convertPrecision(layer->get_output_element_type(0))};
- auto res = std::make_shared(params);
- auto castedLayer = ngraph::as_type_ptr(layer);
- if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
-
- res->params["keep_dims"] = castedLayer->get_keep_dims() ? "true" : "false";
- return res;
-}
-
template <>
CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
THROW_IE_EXCEPTION << "NormalizeL2 operation should be converted to NormalizeIE";
@@ -2099,30 +2039,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::
return res;
}
-template <>
-CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
- LayerParams params = {layer->get_friendly_name(), "ReduceAnd", details::convertPrecision(layer->get_output_element_type(0))};
- auto res = std::make_shared(params);
-
- auto castedLayer = std::dynamic_pointer_cast(layer);
- if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
-
- res->params["keep_dims"] = castedLayer->get_keep_dims() ? "True" : "False";
- return res;
-}
-
-template <>
-CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
- LayerParams params = {layer->get_friendly_name(), "ReduceOr", details::convertPrecision(layer->get_output_element_type(0))};
- auto res = std::make_shared(params);
-
- auto castedLayer = std::dynamic_pointer_cast(layer);
- if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name;
-
- res->params["keep_dims"] = castedLayer->get_keep_dims() ? "True" : "False";
- return res;
-}
-
template <>
CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const {
THROW_IE_EXCEPTION << "NonMaxSuppression operation must be converted to NonMaxSuppressionIE operation.";
diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
index 0219c83870f..95f3747d57b 100644
--- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
+++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp
@@ -32,7 +32,7 @@
#include
#include
#include
-#include
+#include
#include
#include
#include "ngraph_ops/fully_connected.hpp"
@@ -80,7 +80,9 @@ static void Transformation(ICNNNetwork::Ptr& clonedNetwork) {
return std::dynamic_pointer_cast(node) ||
std::dynamic_pointer_cast(node) ||
- std::dynamic_pointer_cast(node);
+ std::dynamic_pointer_cast(node) ||
+ std::dynamic_pointer_cast(node) ||
+ std::dynamic_pointer_cast(node);
};
auto nGraphFunc = clonedNetwork->getFunction();
// Disable shape inference (WA for generic operations)
diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp
index 32c76d4b916..764ca7962b0 100644
--- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp
+++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp
@@ -330,11 +330,6 @@ std::shared_ptr V10Parser::createNode(const std::vector>("Range"),
std::make_shared>("PriorBox"),
std::make_shared>("PriorBoxClustered"),
- std::make_shared>("ReduceMax"),
- std::make_shared>("ReduceMin"),
- std::make_shared>("ReduceMean"),
- std::make_shared>("ReduceProd"),
- std::make_shared>("ReduceSum"),
std::make_shared>("ReorgYolo"),
std::make_shared>("RegionYolo"),
std::make_shared>("Result"),
@@ -362,8 +357,6 @@ std::shared_ptr V10Parser::createNode(const std::vector>("LogicalOr"),
std::make_shared>("LogicalXor"),
std::make_shared>("LogicalNot"),
- std::make_shared>("ReduceLogicalAnd"),
- std::make_shared>("ReduceLogicalOr"),
};
// Check that operation in default opsets
@@ -1496,76 +1489,6 @@ std::shared_ptr V10Parser::LayerCreator::cr
return std::make_shared(inputs[0], ngraph::Strides {stride});
}
-// ReduceMin layer
-template <>
-std::shared_ptr V10Parser::LayerCreator::createLayer(
- const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
- const GenericLayerParams& layerParsePrms) {
- checkParameters(inputs, layerParsePrms, 2);
- pugi::xml_node dn = node.child("data");
-
- if (dn.empty())
- THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
-
- return std::make_shared(inputs[0], inputs[1], GetBoolAttr(dn, "keep_dims", false));
-}
-
-// ReduceMax layer
-template <>
-std::shared_ptr V10Parser::LayerCreator::createLayer(
- const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
- const GenericLayerParams& layerParsePrms) {
- checkParameters(inputs, layerParsePrms, 2);
- pugi::xml_node dn = node.child("data");
-
- if (dn.empty())
- THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
-
- return std::make_shared(inputs[0], inputs[1], GetBoolAttr(dn, "keep_dims", false));
-}
-
-// ReduceMean layer
-template <>
-std::shared_ptr V10Parser::LayerCreator::createLayer(
- const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
- const GenericLayerParams& layerParsePrms) {
- checkParameters(inputs, layerParsePrms, 2);
- pugi::xml_node dn = node.child("data");
-
- if (dn.empty())
- THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
-
- return std::make_shared(inputs[0], inputs[1], GetBoolAttr(dn, "keep_dims", false));
-}
-
-// ReduceProd layer
-template <>
-std::shared_ptr V10Parser::LayerCreator::createLayer(
- const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
- const GenericLayerParams& layerParsePrms) {
- checkParameters(inputs, layerParsePrms, 2);
- pugi::xml_node dn = node.child("data");
-
- if (dn.empty())
- THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
-
- return std::make_shared(inputs[0], inputs[1], GetBoolAttr(dn, "keep_dims", false));
-}
-
-// ReduceSum layer
-template <>
-std::shared_ptr V10Parser::LayerCreator::createLayer(
- const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
- const GenericLayerParams& layerParsePrms) {
- checkParameters(inputs, layerParsePrms, 2);
- pugi::xml_node dn = node.child("data");
-
- if (dn.empty())
- THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
-
- return std::make_shared(inputs[0], inputs[1], GetBoolAttr(dn, "keep_dims", false));
-}
-
// Transpose layer
template <>
std::shared_ptr V10Parser::LayerCreator::createLayer(
@@ -2177,34 +2100,6 @@ std::shared_ptr V10Parser::LayerCreator(inputs[0]);
}
-// ReduceLogicalAnd layer
-template <>
-std::shared_ptr V10Parser::LayerCreator::createLayer(
- const ngraph::OutputVector & inputs, const pugi::xml_node& node, std::istream& binStream,
- const GenericLayerParams& layerParsePrms) {
- checkParameters(inputs, layerParsePrms, 2);
- pugi::xml_node dn = node.child("data");
-
- if (dn.empty())
- THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
-
- return std::make_shared(inputs[0], inputs[1], GetBoolAttr(dn, "keep_dims"));
-}
-
-// ReduceLogicalOr layer
-template <>
-std::shared_ptr V10Parser::LayerCreator::createLayer(
- const ngraph::OutputVector & inputs, const pugi::xml_node& node, std::istream& binStream,
- const GenericLayerParams& layerParsePrms) {
- checkParameters(inputs, layerParsePrms, 2);
- pugi::xml_node dn = node.child("data");
-
- if (dn.empty())
- THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
-
- return std::make_shared(inputs[0], inputs[1], GetBoolAttr(dn, "keep_dims"));
-}
-
// NonMaxSuppression layer
template <>
std::shared_ptr V10Parser::LayerCreator::createLayer(
diff --git a/inference-engine/src/transformations/include/transformations/reduce_l1_decomposition.hpp b/inference-engine/src/transformations/include/transformations/reduce_l1_decomposition.hpp
new file mode 100644
index 00000000000..f35134b0d6a
--- /dev/null
+++ b/inference-engine/src/transformations/include/transformations/reduce_l1_decomposition.hpp
@@ -0,0 +1,31 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+#include
+
+#include
+
+#include
+#include
+#include "ngraph/pattern/matcher.hpp"
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API ReduceL1Decomposition;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Decomposes ReduceL1 into ReduceSum(abs(x)).
+ */
+class ngraph::pass::ReduceL1Decomposition: public ngraph::pass::MatcherPass {
+public:
+ ReduceL1Decomposition();
+};
diff --git a/inference-engine/src/transformations/include/transformations/reduce_l2_decomposition.hpp b/inference-engine/src/transformations/include/transformations/reduce_l2_decomposition.hpp
new file mode 100644
index 00000000000..180ea79c911
--- /dev/null
+++ b/inference-engine/src/transformations/include/transformations/reduce_l2_decomposition.hpp
@@ -0,0 +1,31 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include
+#include
+
+#include
+
+#include
+#include
+#include "ngraph/pattern/matcher.hpp"
+
+namespace ngraph {
+namespace pass {
+
+class TRANSFORMATIONS_API ReduceL2Decomposition;
+
+} // namespace pass
+} // namespace ngraph
+
+/**
+ * @ingroup ie_transformation_common_api
+ * @brief Decomposes ReduceL2 into sqrt(ReduceSum(x * x)).
+ */
+class ngraph::pass::ReduceL2Decomposition: public ngraph::pass::MatcherPass {
+public:
+ ReduceL2Decomposition();
+};
diff --git a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp
index bccce874c08..e390b4a84e0 100644
--- a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp
+++ b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_opset1_to_legacy.cpp
@@ -47,6 +47,8 @@
#include
#include
#include
+#include
+#include
#include
#include
@@ -64,6 +66,11 @@ bool ngraph::pass::ConvertOpSet1ToLegacy::run_on_function(std::shared_ptr();
+ // the following two transformations produce ReduceSum operations so they
+ // must be executed before the ConvertReduceSumToPooling transformation
+ manager.register_pass();
+ manager.register_pass();
+
// List if Decomposition and Conversion transformations that can be
// applied simultaneously in a single graph traversal
auto decomp = manager.register_pass();
diff --git a/inference-engine/src/transformations/src/transformations/reduce_l1_decomposition.cpp b/inference-engine/src/transformations/src/transformations/reduce_l1_decomposition.cpp
new file mode 100644
index 00000000000..c543b96ee6c
--- /dev/null
+++ b/inference-engine/src/transformations/src/transformations/reduce_l1_decomposition.cpp
@@ -0,0 +1,38 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/reduce_l1_decomposition.hpp"
+
+#include
+
+#include
+#include
+#include
+
+ngraph::pass::ReduceL1Decomposition::ReduceL1Decomposition() {
+ // decomposes ReduceL1 operations into ReduceSum(abs(x))
+ auto reduce_l1 = ngraph::pattern::wrap_type();
+
+ ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) {
+ auto &pattern_to_output = m.get_pattern_value_map();
+ auto reduce_l1_node = std::dynamic_pointer_cast(pattern_to_output.at(reduce_l1).get_node_shared_ptr());
+
+ if (m_transformation_callback(reduce_l1_node)) {
+ return false;
+ }
+
+ auto abs = std::make_shared(reduce_l1_node->input_value(0));
+ auto reduce_sum = std::make_shared(abs, reduce_l1_node->input_value(1), reduce_l1_node->get_keep_dims());
+
+ reduce_sum->set_friendly_name(m.get_match_root()->get_friendly_name());
+ ngraph::copy_runtime_info(reduce_l1_node,
+ {abs, reduce_sum});
+ ngraph::replace_node(m.get_match_root(), reduce_sum);
+ return true;
+ };
+
+ auto m = std::make_shared(reduce_l1, "ReduceL1Decomposition");
+ register_matcher(m, callback);
+}
+
diff --git a/inference-engine/src/transformations/src/transformations/reduce_l2_decomposition.cpp b/inference-engine/src/transformations/src/transformations/reduce_l2_decomposition.cpp
new file mode 100644
index 00000000000..d3887f549e2
--- /dev/null
+++ b/inference-engine/src/transformations/src/transformations/reduce_l2_decomposition.cpp
@@ -0,0 +1,39 @@
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "transformations/reduce_l2_decomposition.hpp"
+
+#include
+
+#include
+#include
+#include
+
+ngraph::pass::ReduceL2Decomposition::ReduceL2Decomposition() {
+ // decomposes ReduceL2 operations into sqrt(ReduceSum(x * x))
+ auto reduce_l2 = ngraph::pattern::wrap_type();
+
+ ngraph::matcher_pass_callback callback = [=](ngraph::pattern::Matcher &m) {
+ auto &pattern_to_output = m.get_pattern_value_map();
+ auto reduce_l2_node = std::dynamic_pointer_cast(pattern_to_output.at(reduce_l2).get_node_shared_ptr());
+
+ if (m_transformation_callback(reduce_l2_node)) {
+ return false;
+ }
+
+ auto const_2 = ngraph::opset4::Constant::create(reduce_l2_node->input_value(0).get_element_type(), Shape{}, {2.0f});
+ auto square = std::make_shared(reduce_l2_node->input_value(0), const_2);
+ auto reduce_sum = std::make_shared(square, reduce_l2_node->input_value(1), reduce_l2_node->get_keep_dims());
+ auto sqrt = std::make_shared(reduce_sum);
+ reduce_sum->set_friendly_name(m.get_match_root()->get_friendly_name());
+ ngraph::copy_runtime_info(reduce_l2_node,
+ {sqrt, reduce_sum, square, const_2});
+ ngraph::replace_node(m.get_match_root(), sqrt);
+ return true;
+ };
+
+ auto m = std::make_shared(reduce_l2, "ReduceL2Decomposition");
+ register_matcher(m, callback);
+}
+
diff --git a/inference-engine/tests/functional/inference_engine/transformations/reduce_l1_decomposition_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/reduce_l1_decomposition_test.cpp
new file mode 100644
index 00000000000..db9a68a1b58
--- /dev/null
+++ b/inference-engine/tests/functional/inference_engine/transformations/reduce_l1_decomposition_test.cpp
@@ -0,0 +1,48 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "common_test_utils/ngraph_test_utils.hpp"
+
+using namespace testing;
+
+TEST(TransformationTests, ReduceL1DecompositionTest) {
+ std::shared_ptr f(nullptr), f_ref(nullptr);
+ {
+ auto data = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic(1));
+ auto axes = std::make_shared(ngraph::element::i32, ngraph::Shape{1});
+ auto reduce_l1 = std::make_shared(data, axes, true);
+
+ f = std::make_shared(ngraph::NodeVector{reduce_l1}, ngraph::ParameterVector{data, axes});
+
+ ngraph::pass::Manager manager;
+ manager.register_pass();
+ manager.register_pass();
+ manager.run_passes(f);
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto data = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic(1));
+ auto axes = std::make_shared(ngraph::element::i32, ngraph::Shape{1});
+ auto abs = std::make_shared(data);
+ auto reduce_l1 = std::make_shared(abs, axes, true);
+
+ f_ref = std::make_shared(ngraph::NodeVector{reduce_l1}, ngraph::ParameterVector{data, axes});
+ }
+
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
diff --git a/inference-engine/tests/functional/inference_engine/transformations/reduce_l2_decomposition_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/reduce_l2_decomposition_test.cpp
new file mode 100644
index 00000000000..4306fbab7ab
--- /dev/null
+++ b/inference-engine/tests/functional/inference_engine/transformations/reduce_l2_decomposition_test.cpp
@@ -0,0 +1,49 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "common_test_utils/ngraph_test_utils.hpp"
+
+using namespace testing;
+
+TEST(TransformationTests, ReduceL2DecompositionTest) {
+ std::shared_ptr f(nullptr), f_ref(nullptr);
+ {
+ auto data = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic(1));
+ auto axes = std::make_shared(ngraph::element::i32, ngraph::Shape{1});
+ auto reduce_l1 = std::make_shared(data, axes, true);
+
+ f = std::make_shared(ngraph::NodeVector{reduce_l1}, ngraph::ParameterVector{data, axes});
+
+ ngraph::pass::Manager manager;
+ manager.register_pass();
+ manager.register_pass();
+ manager.run_passes(f);
+ ASSERT_NO_THROW(check_rt_info(f));
+ }
+
+ {
+ auto data = std::make_shared(ngraph::element::f32, ngraph::PartialShape::dynamic(1));
+ auto axes = std::make_shared(ngraph::element::i32, ngraph::Shape{1});
+ auto pow = std::make_shared(data, ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{}, {2.0}));
+ auto reduce_sum = std::make_shared(pow, axes, true);
+ auto sqrt = std::make_shared(reduce_sum);
+
+ f_ref = std::make_shared(ngraph::NodeVector{sqrt}, ngraph::ParameterVector{data, axes});
+ }
+
+ auto res = compare_functions(f, f_ref);
+ ASSERT_TRUE(res.first) << res.second;
+}
diff --git a/model-optimizer/automation/package_BOM.txt b/model-optimizer/automation/package_BOM.txt
index 935bde2b5d4..8b884cb9002 100644
--- a/model-optimizer/automation/package_BOM.txt
+++ b/model-optimizer/automation/package_BOM.txt
@@ -38,7 +38,7 @@ extensions/back/PackBinaryWeights.py
extensions/back/pass_separator.py
extensions/back/priorbox_mutation.py
extensions/back/ProposalMutation.py
-extensions/back/ReduceToPooling.py
+extensions/back/ReduceMerge.py
extensions/back/ReduceTransposeDimensions.py
extensions/back/remove_last_softmax_pattern.py
extensions/back/RemoveUselessConvert.py
@@ -291,12 +291,7 @@ extensions/front/onnx/quantize_ext.py
extensions/front/onnx/quantize_linear_ext.py
extensions/front/onnx/quantize_linear_resolver.py
extensions/front/onnx/range_ext.py
-extensions/front/onnx/reduce_l2_ext.py
-extensions/front/onnx/reduce_max_ext.py
-extensions/front/onnx/reduce_mean_ext.py
-extensions/front/onnx/reduce_min_ext.py
-extensions/front/onnx/reduce_prod_ext.py
-extensions/front/onnx/reduce_sum_ext.py
+extensions/front/onnx/reduce_ext.py
extensions/front/onnx/remove_filtering_boxes_by_size.py
extensions/front/onnx/resize_ext.py
extensions/front/onnx/resize_to_interpolate.py
@@ -327,7 +322,6 @@ extensions/front/PowerToEltwises.py
extensions/front/rank_decomposer.py
extensions/front/reciprocal.py
extensions/front/reduce_axis_normalizer.py
-extensions/front/ReduceL2Decomposition.py
extensions/front/reshape_dim_normalizer.py
extensions/front/restore_ports.py
extensions/front/scatter_normalizer.py
diff --git a/model-optimizer/extensions/back/ReduceToPooling.py b/model-optimizer/extensions/back/ReduceMerge.py
similarity index 100%
rename from model-optimizer/extensions/back/ReduceToPooling.py
rename to model-optimizer/extensions/back/ReduceMerge.py
diff --git a/model-optimizer/extensions/back/TransposeReduceFusing.py b/model-optimizer/extensions/back/TransposeReduceFusing.py
index 5e0b48783a6..13d638a1af5 100644
--- a/model-optimizer/extensions/back/TransposeReduceFusing.py
+++ b/model-optimizer/extensions/back/TransposeReduceFusing.py
@@ -18,7 +18,7 @@ from typing import Dict
import numpy as np
from extensions.back.FuseTransposesSequence import FuseTransposesSequence
-from extensions.back.ReduceToPooling import ReduceMerge
+from extensions.back.ReduceMerge import ReduceMerge
from extensions.ops.ReduceOps import reduce_map
from extensions.ops.gather import Gather
from mo.back.replacement import BackReplacementPattern
diff --git a/model-optimizer/extensions/front/ReduceL2Decomposition.py b/model-optimizer/extensions/front/ReduceL2Decomposition.py
deleted file mode 100644
index 11d91fae5a1..00000000000
--- a/model-optimizer/extensions/front/ReduceL2Decomposition.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
- Copyright (C) 2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-from extensions.front.reduce_axis_normalizer import ReduceAxisNormalizer
-from extensions.ops.ReduceOps import ReduceSum
-from extensions.ops.elementwise import Pow, Mul
-from mo.front.common.partial_infer.utils import int64_array, float_array
-from mo.front.common.replacement import FrontReplacementOp
-from mo.front.tf.graph_utils import create_op_with_const_inputs
-from mo.graph.graph import Graph, Node, rename_node
-
-
-class ReduceL2Decomposition(FrontReplacementOp):
- op = 'ReduceL2'
- enabled = True
-
- def run_before(self):
- return [ReduceAxisNormalizer]
-
- def replace_op(self, graph: Graph, node: Node):
- node_name = node.soft_get('name', node.id)
-
- rename_node(node, node_name + '/TBR')
- sqr_node = Mul(graph, {}).create_node()
- reduce_sum_node = ReduceSum(graph, {'keep_dims': node.soft_get('keep_dims', 0),
- 'axis': node.soft_get('axis', None)}).create_node()
- sqrt_node = create_op_with_const_inputs(graph, Pow, {1: float_array(0.5)})
- rename_node(sqrt_node, node_name)
-
- # Connect nodes
- node.in_port(0).get_connection().set_destination(sqr_node.in_port(0))
- sqr_node.in_port(0).get_connection().add_destination(sqr_node.in_port(1))
- sqr_node.out_port(0).connect(reduce_sum_node.in_port(0))
- reduce_sum_node.out_port(0).connect(sqrt_node.in_port(0))
-
- return [sqrt_node.id]
diff --git a/model-optimizer/extensions/front/ReduceL2Decomposition_test.py b/model-optimizer/extensions/front/ReduceL2Decomposition_test.py
deleted file mode 100644
index 55b666680e5..00000000000
--- a/model-optimizer/extensions/front/ReduceL2Decomposition_test.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""
- Copyright (C) 2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-import unittest
-
-import numpy as np
-
-from extensions.front.ReduceL2Decomposition import ReduceL2Decomposition
-from mo.utils.ir_engine.compare_graphs import compare_graphs
-from mo.utils.unittest.graph import build_graph, const
-
-nodes_attributes = {
- 'input': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
- 'reduce_l2': {'type': None, 'kind': 'op', 'op': 'ReduceL2', 'axis': 0, 'name': 'my_reduce', 'keep_dims': 0},
- 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'},
-
- # new layers
- 'mul': {'type': 'Multiply', 'kind': 'op', 'op': 'Mul'},
- 'reduce_sum': {'type': 'ReduceSum', 'kind': 'op', 'op': 'ReduceSum', 'axis': 0, 'keep_dims': 0},
- 'pow': {'type': 'Power', 'kind': 'op', 'op': 'Pow'},
- **const('half', np.array(0.5, dtype=np.float32)),
-}
-
-
-class ReduceL2DecompositionTest(unittest.TestCase):
- def test(self):
- graph = build_graph(nodes_attributes,
- [('input', 'reduce_l2', {'in': 0, 'out': 0}),
- ('reduce_l2', 'result', {'in': 0, 'out': 0}),
- ],
- {}, nodes_with_edges_only=True)
-
- graph_ref = build_graph(nodes_attributes,
- [('input', 'mul', {'in': 0, 'out': 0}),
- ('input', 'mul', {'in': 1, 'out': 0}),
- ('mul', 'reduce_sum', {'in': 0, 'out': 0}),
- ('reduce_sum', 'pow', {'in': 0, 'out': 0}),
- ('half', 'pow', {'in': 1, 'out': 0}),
- ('pow', 'result', {'in': 0, 'out': 0}),
- ],
- {}, nodes_with_edges_only=True)
-
- graph.graph['layout'] = 'NCHW'
- graph.stage = 'front'
-
- ReduceL2Decomposition().find_and_replace_pattern(graph)
-
- (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True)
- self.assertTrue(flag, resp)
- self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Pow')[0]]['name'] == 'my_reduce')
diff --git a/model-optimizer/extensions/front/onnx/reduce_ext.py b/model-optimizer/extensions/front/onnx/reduce_ext.py
new file mode 100644
index 00000000000..2bb3b83844e
--- /dev/null
+++ b/model-optimizer/extensions/front/onnx/reduce_ext.py
@@ -0,0 +1,97 @@
+"""
+ Copyright (C) 2020 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from extensions.ops.ReduceOps import ReduceL1, ReduceL2, ReduceMax, ReduceMean, ReduceMin, ReduceProd, ReduceSum
+from mo.front.common.partial_infer.utils import int64_array
+from mo.front.extractor import FrontExtractorOp
+from mo.front.onnx.extractors.utils import onnx_attr
+from mo.graph.graph import Node
+
+
+def update_reduce_node_attrs_with(node: Node, c: callable):
+ axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type=lambda x: int64_array(x))
+ keep_dims = onnx_attr(node, 'keepdims', 'i', default=True)
+ c.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims})
+
+
+class ReduceL1Extractor(FrontExtractorOp):
+ op = 'ReduceL1'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node: Node):
+ update_reduce_node_attrs_with(node, ReduceL1)
+ return cls.enabled
+
+
+class ReduceL2Extractor(FrontExtractorOp):
+ op = 'ReduceL2'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node: Node):
+ update_reduce_node_attrs_with(node, ReduceL2)
+ return cls.enabled
+
+
+class ReduceMaxFrontExtractor(FrontExtractorOp):
+ op = 'ReduceMax'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node: Node):
+ update_reduce_node_attrs_with(node, ReduceMax)
+ return cls.enabled
+
+
+class ReduceMeanFrontExtractor(FrontExtractorOp):
+ op = 'ReduceMean'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node: Node):
+ update_reduce_node_attrs_with(node, ReduceMean)
+ return cls.enabled
+
+
+class ReduceMinFrontExtractor(FrontExtractorOp):
+ op = 'ReduceMin'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node: Node):
+ update_reduce_node_attrs_with(node, ReduceMin)
+ return cls.enabled
+
+
+class ReduceProdFrontExtractor(FrontExtractorOp):
+ op = 'ReduceProd'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node: Node):
+ update_reduce_node_attrs_with(node, ReduceProd)
+ return cls.enabled
+
+
+class ReduceSumFrontExtractor(FrontExtractorOp):
+ op = 'ReduceSum'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node: Node):
+ update_reduce_node_attrs_with(node, ReduceSum)
+ return cls.enabled
diff --git a/model-optimizer/extensions/front/onnx/reduce_l2_ext.py b/model-optimizer/extensions/front/onnx/reduce_l2_ext.py
deleted file mode 100644
index 7a7034f0ad8..00000000000
--- a/model-optimizer/extensions/front/onnx/reduce_l2_ext.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
- Copyright (C) 2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-from extensions.front.reduce_axis_normalizer import ReduceAxisNormalizer
-from extensions.ops.ReduceOps import ReduceL2
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.front.onnx.extractors.utils import onnx_attr
-from mo.graph.graph import Node
-
-
-class ReduceL2FrontExtractor(FrontExtractorOp):
- op = 'ReduceL2'
- enabled = True
-
- @classmethod
- def extract(cls, node: Node):
- axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type=lambda x: int64_array(x))
- keep_dims = onnx_attr(node, 'keepdims', 'i', default=True)
- ReduceL2.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims})
- return cls.enabled
diff --git a/model-optimizer/extensions/front/onnx/reduce_max_ext.py b/model-optimizer/extensions/front/onnx/reduce_max_ext.py
deleted file mode 100644
index 2c456aacb3d..00000000000
--- a/model-optimizer/extensions/front/onnx/reduce_max_ext.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
- Copyright (C) 2018-2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from extensions.ops.ReduceOps import ReduceMax
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.front.onnx.extractors.utils import onnx_attr
-from mo.graph.graph import Node
-
-
-class ReduceMaxFrontExtractor(FrontExtractorOp):
- op = 'ReduceMax'
- enabled = True
-
- @classmethod
- def extract(cls, node: Node):
- axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type=lambda x: int64_array(x))
- keep_dims = onnx_attr(node, 'keepdims', 'i', default=True)
- ReduceMax.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims})
- return cls.enabled
diff --git a/model-optimizer/extensions/front/onnx/reduce_mean_ext.py b/model-optimizer/extensions/front/onnx/reduce_mean_ext.py
deleted file mode 100644
index 2b3e3c9b567..00000000000
--- a/model-optimizer/extensions/front/onnx/reduce_mean_ext.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
- Copyright (C) 2018-2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from extensions.ops.ReduceOps import ReduceMean
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.front.onnx.extractors.utils import onnx_attr
-from mo.graph.graph import Node
-
-
-class ReduceMeanFrontExtractor(FrontExtractorOp):
- op = 'ReduceMean'
- enabled = True
-
- @classmethod
- def extract(cls, node: Node):
- axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type=lambda x: int64_array(x))
- keep_dims = onnx_attr(node, 'keepdims', 'i', default=True)
- ReduceMean.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims})
- return cls.enabled
diff --git a/model-optimizer/extensions/front/onnx/reduce_min_ext.py b/model-optimizer/extensions/front/onnx/reduce_min_ext.py
deleted file mode 100644
index 42c871e88a0..00000000000
--- a/model-optimizer/extensions/front/onnx/reduce_min_ext.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
- Copyright (C) 2018-2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from extensions.ops.ReduceOps import ReduceMin
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.front.onnx.extractors.utils import onnx_attr
-from mo.graph.graph import Node
-
-
-class ReduceMinFrontExtractor(FrontExtractorOp):
- op = 'ReduceMin'
- enabled = True
-
- @classmethod
- def extract(cls, node: Node):
- axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type=lambda x: int64_array(x))
- keep_dims = onnx_attr(node, 'keepdims', 'i', default=True)
- ReduceMin.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims})
- return cls.enabled
diff --git a/model-optimizer/extensions/front/onnx/reduce_prod_ext.py b/model-optimizer/extensions/front/onnx/reduce_prod_ext.py
deleted file mode 100644
index d1e87b9dfb9..00000000000
--- a/model-optimizer/extensions/front/onnx/reduce_prod_ext.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
- Copyright (C) 2018-2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from extensions.ops.ReduceOps import ReduceProd
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.front.onnx.extractors.utils import onnx_attr
-from mo.graph.graph import Node
-
-
-class ReduceProdFrontExtractor(FrontExtractorOp):
- op = 'ReduceProd'
- enabled = True
-
- @classmethod
- def extract(cls, node: Node):
- axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type=lambda x: int64_array(x))
- keep_dims = onnx_attr(node, 'keepdims', 'i', default=True)
- ReduceProd.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims})
- return cls.enabled
diff --git a/model-optimizer/extensions/front/onnx/reduce_sum_ext.py b/model-optimizer/extensions/front/onnx/reduce_sum_ext.py
deleted file mode 100644
index 5b1bc1792d4..00000000000
--- a/model-optimizer/extensions/front/onnx/reduce_sum_ext.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""
- Copyright (C) 2018-2020 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-from extensions.ops.ReduceOps import ReduceSum
-from mo.front.common.partial_infer.utils import int64_array
-from mo.front.extractor import FrontExtractorOp
-from mo.front.onnx.extractors.utils import onnx_attr
-from mo.graph.graph import Node
-
-
-class ReduceSumFrontExtractor(FrontExtractorOp):
- op = 'ReduceSum'
- enabled = True
-
- @classmethod
- def extract(cls, node: Node):
- axis = onnx_attr(node, 'axes', 'ints', default=None, dst_type=lambda x: int64_array(x))
- keep_dims = onnx_attr(node, 'keepdims', 'i', default=True)
- ReduceSum.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims})
- return cls.enabled
diff --git a/model-optimizer/extensions/front/reduce_axis_normalizer.py b/model-optimizer/extensions/front/reduce_axis_normalizer.py
index 000b3db6988..79208ef8455 100644
--- a/model-optimizer/extensions/front/reduce_axis_normalizer.py
+++ b/model-optimizer/extensions/front/reduce_axis_normalizer.py
@@ -46,16 +46,17 @@ class ReduceAxisNormalizer(FrontReplacementSubgraph):
node = match['reduce']
connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
if len(connected_in_ports) == 1:
+ node_name = node.soft_get('name', node.id)
+
# if the 'axis' is None then we still add a second input to the layer with a 1D array with 1 element equal
# to None. The infer function handles this case because the input shape is known at this stage only
if node.has('axis'):
- const = Const(graph, {'value': node.axis}).create_node()
+ const = Const(graph, {'name': node_name + '/axis', 'value': node.axis}).create_node()
node.add_input_port(1, skip_if_exist=True)
const.out_port(0).connect(node.in_port(1))
del graph.node[node.id]['axis']
else:
# The default (if there is no 'axis') is to reduce over all the dimensions of the input tensor.
- node_name = node.name
begin_of_range = Const(graph, dict(name=node_name + '/range_begin_', value=0)).create_node()
step = Const(graph, dict(name=node_name + '/range_step_', value=1)).create_node()
diff --git a/model-optimizer/extensions/front/tf/reduce_ext.py b/model-optimizer/extensions/front/tf/reduce_ext.py
index e8d87f51c60..c593fcd7884 100644
--- a/model-optimizer/extensions/front/tf/reduce_ext.py
+++ b/model-optimizer/extensions/front/tf/reduce_ext.py
@@ -13,7 +13,7 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-from extensions.ops.ReduceOps import ReduceProd, ReduceAnd, ReduceMax, ReduceMean, ReduceSum
+from extensions.ops.ReduceOps import ReduceProd, ReduceAnd, ReduceMax, ReduceMean, ReduceSum, ReduceL2
from mo.front.extractor import FrontExtractorOp
from mo.graph.graph import Node
@@ -67,3 +67,13 @@ class SumFrontExtractor(FrontExtractorOp):
def extract(cls, node: Node):
ReduceSum.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
+
+
+class EuclideanNormFrontExtractor(FrontExtractorOp):
+ op = 'EuclideanNorm'
+ enabled = True
+
+ @classmethod
+ def extract(cls, node: Node):
+ ReduceL2.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
+ return cls.enabled
diff --git a/model-optimizer/extensions/ops/ReduceOps.py b/model-optimizer/extensions/ops/ReduceOps.py
index ca0ff3e2007..622b5eec3a3 100644
--- a/model-optimizer/extensions/ops/ReduceOps.py
+++ b/model-optimizer/extensions/ops/ReduceOps.py
@@ -24,6 +24,7 @@ from mo.ops.op import Op
reduce_map = {
'ReduceSum': np.sum,
'ReduceProd': np.prod,
+ 'ReduceL1': lambda x, axis, keepdims: np.sum(a=np.absolute(x), axis=axis, keepdims=keepdims),
'ReduceL2': lambda x, axis, keepdims: np.sqrt(np.sum(a=np.square(x), axis=axis, keepdims=keepdims)),
'ReduceMax': np.max,
'ReduceMin': np.min,
@@ -86,12 +87,13 @@ class ReduceOp(Op):
enabled = False
op = None
op_type = None
+ version = 'opset1'
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'op': self.op,
'type': self.op_type,
- 'version': 'opset1',
+ 'version': self.version,
'infer': reduce_infer,
'keep_dims': 0,
'in_ports_count': 2,
@@ -138,10 +140,15 @@ class ReduceMean(ReduceOp):
enabled = True
+class ReduceL1(ReduceOp):
+ op = 'ReduceL1'
+ op_type = 'ReduceL1'
+ version = 'opset4'
+
class ReduceL2(ReduceOp):
op = 'ReduceL2'
- op_type = None
- enabled = True
+ op_type = 'ReduceL2'
+ version = 'opset4'
class ReduceAnd(ReduceOp):
diff --git a/model-optimizer/extensions/ops/ReduceOps_test.py b/model-optimizer/extensions/ops/ReduceOps_test.py
index 97d7f05d2f1..2d0fffcc507 100644
--- a/model-optimizer/extensions/ops/ReduceOps_test.py
+++ b/model-optimizer/extensions/ops/ReduceOps_test.py
@@ -28,38 +28,42 @@ nodes_attributes = {
**regular_op_with_shaped_data('data', [1, 3, 224, 224], {'type': 'Parameter', 'value': None,
'_out_port_data_type': {0: np.float32}}),
**valued_const_with_data('axis', int64_array(0)),
- **regular_op_with_shaped_data('reduce_l2', None, {'op': 'ReduceL2', 'type': None, 'name': 'my_reduce_l2'}),
+ **regular_op_with_shaped_data('reduce_lp', None, {'op': 'ReduceLp', 'type': None, 'name': 'my_reduce_lp'}),
**regular_op_with_shaped_data('identity', None, {'op': 'Identity', 'name': 'identity'}),
**result('output'),
}
@generator
-class TestCumSum(unittest.TestCase):
+class ReduceLpTest(unittest.TestCase):
@generate(*[
- ([3, 2, 2], [0], True),
- ([3, 2, 2], [1], True),
- ([3, 2, 2], [2], True),
- ([3, 2, 2], [0], False),
- ([3, 2, 2], [1], False),
- ([3, 2, 2], [2], False),
+ ([3, 2, 2], [0], True, 1),
+ ([3, 2, 2], [0], True, 2),
+ ([3, 2, 2], [1], True, 2),
+ ([3, 2, 2], [2], True, 2),
+ ([3, 2, 2], [0], False, 1),
+ ([3, 2, 2], [0], False, 2),
+ ([3, 2, 2], [1], False, 2),
+ ([3, 2, 2], [2], False, 2),
])
- def test_reduce_l2(self, shape, axes, keepdims):
+ def test_reduce_lp(self, shape, axes, keepdims, p):
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
- reduced = np.sqrt(np.sum(a=np.square(data), axis=tuple(axes), keepdims=keepdims))
+ reduced = np.power(np.sum(a=np.abs(np.power(data, p)), axis=tuple(axes), keepdims=keepdims), 1 / p)
axis = int64_array(axes)
+ p = int64_array(p)
graph = build_graph(nodes_attributes,
- [*connect('data', '0:reduce_l2'),
- *connect('axis', '1:reduce_l2'),
- *connect('reduce_l2', '0:identity'),
+ [*connect('data', '0:reduce_lp'),
+ *connect('axis', '1:reduce_lp'),
+ *connect('reduce_lp', '0:identity'),
('identity', 'identity_d', {'out': 0}),
('identity_d', 'output')
],
{'data_d': {'value': data, 'shape': data.shape},
'axis_d': {'value': axis, 'shape': axis.shape},
- 'reduce_l2': {'keep_dims': keepdims}},
+ 'reduce_lp': {'keep_dims': keepdims}},
nodes_with_edges_only=True)
- reduce_node = Node(graph, 'reduce_l2')
+ reduce_node = Node(graph, 'reduce_lp')
+ reduce_node.op = reduce_node.type = 'ReduceL' + str(p)
reduce_infer(reduce_node)
self.assertTrue(np.array_equal(reduce_node.out_port(0).data.get_value(), reduced))
diff --git a/ngraph/core/include/ngraph/op/reduce_l1.hpp b/ngraph/core/include/ngraph/op/reduce_l1.hpp
new file mode 100644
index 00000000000..83191bf3bcb
--- /dev/null
+++ b/ngraph/core/include/ngraph/op/reduce_l1.hpp
@@ -0,0 +1,60 @@
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#pragma once
+
+#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp"
+
+namespace ngraph
+{
+ namespace op
+ {
+ namespace v4
+ {
+ /// \brief Reduction operation using L1 norm: L1(x) = sum(abs(x)) if all dimensions are
+ /// specified for the normalisation.
+ ///
+ /// Reduces the tensor, eliminating the specified reduction axes by taking the L1-norm.
+ class NGRAPH_API ReduceL1 : public util::ArithmeticReductionKeepDims
+ {
+ public:
+ static constexpr NodeTypeInfo type_info{"ReduceL1", 4};
+ const NodeTypeInfo& get_type_info() const override { return type_info; }
+ /// \brief Constructs a reducet L1-norm operation.
+ ReduceL1() = default;
+ /// \brief Constructs a reduce L1-norm operation.
+ ///
+ /// \param arg The tensor to be reduced.
+ /// \param reduction_axes The axis positions (0-based) to be eliminated.
+ /// \param p The scalar defining the order of normalization.
+ /// \param keep_dims If set to true it holds axes that are used for reduction.
+ ReduceL1(const Output& arg,
+ const Output& reduction_axes,
+ bool keep_dims = false);
+
+ size_t get_version() const override { return 4; }
+ /// \return The default value for Reduce.
+ virtual std::shared_ptr get_default_value() const override;
+
+ virtual std::shared_ptr
+ clone_with_new_inputs(const OutputVector& new_args) const override;
+
+ bool evaluate(const HostTensorVector& outputs,
+ const HostTensorVector& inputs) const override;
+ };
+ }
+ }
+}
diff --git a/ngraph/core/include/ngraph/op/reduce_l2.hpp b/ngraph/core/include/ngraph/op/reduce_l2.hpp
new file mode 100644
index 00000000000..3e23300efad
--- /dev/null
+++ b/ngraph/core/include/ngraph/op/reduce_l2.hpp
@@ -0,0 +1,58 @@
+//*****************************************************************************
+// Copyright 2017-2020 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//*****************************************************************************
+
+#pragma once
+
+#include "ngraph/op/util/arithmetic_reductions_keep_dims.hpp"
+
+namespace ngraph
+{
+ namespace op
+ {
+ namespace v4
+ {
+ /// \brief Reduction operation using L2 norm:
+ ///
+ /// Reduces the tensor, eliminating the specified reduction axes by taking the L2-norm.
+ class NGRAPH_API ReduceL2 : public util::ArithmeticReductionKeepDims
+ {
+ public:
+ static constexpr NodeTypeInfo type_info{"ReduceL2", 4};
+ const NodeTypeInfo& get_type_info() const override { return type_info; }
+ /// \brief Constructs a reducet L2-norm operation.
+ ReduceL2() = default;
+ /// \brief Constructs a reduce L2-norm operation.
+ ///
+ /// \param arg The tensor to be reduced.
+ /// \param reduction_axes The axis positions (0-based) to be eliminated.
+ /// \param keep_dims If set to true it holds axes that are used for reduction.
+ ReduceL2(const Output& arg,
+ const Output& reduction_axes,
+ bool keep_dims = false);
+
+ size_t get_version() const override { return 4; }
+ /// \return The default value for Reduce.
+ virtual std::shared_ptr get_default_value() const override;
+
+ virtual std::shared_ptr
+ clone_with_new_inputs(const OutputVector& new_args) const override;
+
+ bool evaluate(const HostTensorVector& outputs,
+ const HostTensorVector& inputs) const override;
+ };
+ }
+ }
+}
diff --git a/ngraph/core/include/ngraph/ops.hpp b/ngraph/core/include/ngraph/ops.hpp
index 40bc28555ca..af0be2a0f41 100644
--- a/ngraph/core/include/ngraph/ops.hpp
+++ b/ngraph/core/include/ngraph/ops.hpp
@@ -115,6 +115,8 @@
#include "ngraph/op/quantized_dot.hpp"
#include "ngraph/op/range.hpp"
#include "ngraph/op/read_value.hpp"
+#include "ngraph/op/reduce_l1.hpp"
+#include "ngraph/op/reduce_l2.hpp"
#include "ngraph/op/reduce_logical_and.hpp"
#include "ngraph/op/reduce_logical_or.hpp"
#include "ngraph/op/reduce_mean.hpp"
diff --git a/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp
index c7ece5b73ef..dbd5ccd8252 100644
--- a/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp
+++ b/ngraph/core/include/ngraph/opsets/opset4_tbl.hpp
@@ -158,4 +158,6 @@ NGRAPH_OP(Atanh, ngraph::op::v3)
NGRAPH_OP(CTCLoss, ngraph::op::v4)
NGRAPH_OP(NonMaxSuppression, ngraph::op::v4)
NGRAPH_OP(Mish, ngraph::op::v4)
+NGRAPH_OP(ReduceL1, ngraph::op::v4)
+NGRAPH_OP(ReduceL2, ngraph::op::v4)
NGRAPH_OP(Swish, ngraph::op::v4)
diff --git a/ngraph/core/include/ngraph/runtime/reference/any.hpp b/ngraph/core/include/ngraph/runtime/reference/any.hpp
index 7eb4f88a621..89b05b0ca54 100644
--- a/ngraph/core/include/ngraph/runtime/reference/any.hpp
+++ b/ngraph/core/include/ngraph/runtime/reference/any.hpp
@@ -30,10 +30,10 @@ namespace ngraph
static inline void any(const char* arg,
char* out,
const Shape& in_shape,
- const Shape& out_shape,
- const AxisSet& reduction_axes)
+ const AxisSet& reduction_axes,
+ bool keep_dims)
{
- CoordinateTransform output_transform(out_shape);
+ CoordinateTransform output_transform(reduce(in_shape, reduction_axes, keep_dims));
for (const Coordinate& output_coord : output_transform)
{
@@ -44,7 +44,7 @@ namespace ngraph
for (const Coordinate& input_coord : input_transform)
{
- Coordinate output_coord = reduce(input_coord, reduction_axes);
+ Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims);
out[output_transform.index(output_coord)] =
out[output_transform.index(output_coord)] ||
arg[input_transform.index(input_coord)];
diff --git a/ngraph/core/include/ngraph/runtime/reference/autobroadcast_binop.hpp b/ngraph/core/include/ngraph/runtime/reference/autobroadcast_binop.hpp
index 8a1b0cfd368..70410784226 100644
--- a/ngraph/core/include/ngraph/runtime/reference/autobroadcast_binop.hpp
+++ b/ngraph/core/include/ngraph/runtime/reference/autobroadcast_binop.hpp
@@ -333,7 +333,7 @@ namespace ngraph
for (const Coordinate& output_coord : output_transform)
{
- Coordinate arg1_coord = reduce(output_coord, arg1_squeezed_axes);
+ Coordinate arg1_coord = reduce(output_coord, arg1_squeezed_axes, false);
out[output_transform.index(output_coord)] =
elementwise_functor(arg0[arg0_transform.index(output_coord)],
arg1[arg1_transform.index(arg1_coord)]);
@@ -452,9 +452,9 @@ namespace ngraph
for (const Coordinate& output_coord : output_transform)
{
- Coordinate arg0_coord = reduce(output_coord, arg0_squeezed_axes);
- Coordinate arg1_coord = reduce(output_coord, arg1_squeezed_axes);
- Coordinate arg2_coord = reduce(output_coord, arg2_squeezed_axes);
+ Coordinate arg0_coord = reduce(output_coord, arg0_squeezed_axes, false);
+ Coordinate arg1_coord = reduce(output_coord, arg1_squeezed_axes, false);
+ Coordinate arg2_coord = reduce(output_coord, arg2_squeezed_axes, false);
out[output_transform.index(output_coord)] =
elementwise_functor(arg0[arg0_transform.index(arg0_coord)],
arg1[arg1_transform.index(arg1_coord)],
@@ -536,8 +536,8 @@ namespace ngraph
for (const Coordinate& output_coord : output_transform)
{
- Coordinate arg0_coord = reduce(output_coord, arg0_squeezed_axes);
- Coordinate arg2_coord = reduce(output_coord, arg2_squeezed_axes);
+ Coordinate arg0_coord = reduce(output_coord, arg0_squeezed_axes, false);
+ Coordinate arg2_coord = reduce(output_coord, arg2_squeezed_axes, false);
out[output_transform.index(output_coord)] =
elementwise_functor(arg0[arg0_transform.index(arg0_coord)],
arg1[arg1_transform.index(output_coord)],
diff --git a/ngraph/core/include/ngraph/runtime/reference/broadcast.hpp b/ngraph/core/include/ngraph/runtime/reference/broadcast.hpp
index 726166e0b7e..8c324fb845d 100644
--- a/ngraph/core/include/ngraph/runtime/reference/broadcast.hpp
+++ b/ngraph/core/include/ngraph/runtime/reference/broadcast.hpp
@@ -58,7 +58,7 @@ namespace ngraph
for (const Coordinate& output_coord : output_transform)
{
- Coordinate input_coord = reduce(output_coord, adjusted_axes);
+ Coordinate input_coord = reduce(output_coord, adjusted_axes, false);
out[output_transform.index(output_coord)] =
arg[input_transform.index(input_coord)];
}
diff --git a/ngraph/core/include/ngraph/runtime/reference/logical_reduction.hpp b/ngraph/core/include/ngraph/runtime/reference/logical_reduction.hpp
index 9a13a792956..2c063297802 100644
--- a/ngraph/core/include/ngraph/runtime/reference/logical_reduction.hpp
+++ b/ngraph/core/include/ngraph/runtime/reference/logical_reduction.hpp
@@ -26,33 +26,16 @@ namespace ngraph
{
namespace runtime
{
- namespace
- {
- Shape get_shape_no_keep_dims(const AxisSet& reduction_axes, const Shape& input_shape)
- {
- Shape shape_no_keep_dims;
-
- for (size_t i = 0; i < input_shape.size(); i++)
- {
- if (reduction_axes.count(i) == 0)
- {
- shape_no_keep_dims.push_back(input_shape[i]);
- }
- }
-
- return shape_no_keep_dims;
- }
- }
-
namespace reference
{
static inline void reduce_logical_and(const char* arg,
char* out,
const Shape& input_shape,
- const AxisSet& reduction_axes)
+ const AxisSet& reduction_axes,
+ bool keep_dims)
{
CoordinateTransform output_transform(
- get_shape_no_keep_dims(reduction_axes, input_shape));
+ reduce(input_shape, reduction_axes, keep_dims));
for (const Coordinate& output_coord : output_transform)
{
@@ -63,7 +46,7 @@ namespace ngraph
for (const Coordinate& input_coord : input_transform)
{
- Coordinate output_coord = reduce(input_coord, reduction_axes);
+ Coordinate output_coord = reduce(input_coord, reduction_axes, keep_dims);
out[output_transform.index(output_coord)] =
out[output_transform.index(output_coord)] &&
arg[input_transform.index(input_coord)];
@@ -73,13 +56,10 @@ namespace ngraph
static inline void reduce_logical_or(const char* arg,
char* out,
const Shape& input_shape,
- const AxisSet& reduction_axes)
+ const AxisSet& reduction_axes,
+ bool keep_dims)
{
- runtime::reference::any(arg,
- out,
- input_shape,
- get_shape_no_keep_dims(reduction_axes, input_shape),
- reduction_axes);
+ runtime::reference::any(arg, out, input_shape, reduction_axes, keep_dims);
}
}
}
diff --git a/ngraph/core/include/ngraph/runtime/reference/max.hpp b/ngraph/core/include/ngraph/runtime/reference/max.hpp
index 01ea2d67406..cd62a09fea8 100644
--- a/ngraph/core/include/ngraph/runtime/reference/max.hpp
+++ b/ngraph/core/include/ngraph/runtime/reference/max.hpp
@@ -29,13 +29,17 @@ namespace ngraph
namespace reference
{
template
- void max(const T* arg, T* out, const Shape& in_shape, const AxisSet& reduction_axes)
+ void max(const T* arg,
+ T* out,
+ const Shape& in_shape,
+ const AxisSet& reduction_axes,
+ bool keep_dims)
{
T minval = std::numeric_limits::has_infinity
? T(-std::numeric_limits::infinity())
: std::numeric_limits