From 86347bd9094d13c92f1dcdecf00be4e11858ea2c Mon Sep 17 00:00:00 2001 From: Bartosz Lesniewski Date: Tue, 8 Dec 2020 04:42:47 +0100 Subject: [PATCH] Remove ops from Layer Creator/ Node Converter - part 3 (#3356) * remove convert op from layer creator * remove depthtospace op from layer creator * remove mvn op from layer creator * remove normalizel2 op from layer creator * remove notequal op from layer creator * remove subtract op from layer creator * correct mvn op behavior when copied with new input * fix trying to get precision from empty output of normalize layer * fix normalize layer not setting output type * remove trailing whitespace * add fp64 to possible convert op precision types * use a function to translate bool string representation * merge emergency opset changes for mvn and roipooling ops --- .../legacy/ngraph_ops/normalize_ie.hpp | 4 +- .../src/convert_function_to_cnn_network.cpp | 100 +++++++++++++++++- .../src/ie_cnn_layer_builder_ngraph.cpp | 81 -------------- .../src/ngraph_ops/normalize_ie.cpp | 7 ++ .../src/readers/ir_reader/ie_ir_parser.cpp | 90 ++-------------- ngraph/core/include/ngraph/op/not_equal.hpp | 1 + ngraph/core/src/op/mvn.cpp | 2 + ngraph/core/src/op/normalize_l2.cpp | 1 + ngraph/core/src/op/not_equal.cpp | 5 + 9 files changed, 121 insertions(+), 170 deletions(-) diff --git a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/normalize_ie.hpp b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/normalize_ie.hpp index df800ce564b..5bb17795117 100644 --- a/inference-engine/src/legacy_api/include/legacy/ngraph_ops/normalize_ie.hpp +++ b/inference-engine/src/legacy_api/include/legacy/ngraph_ops/normalize_ie.hpp @@ -33,8 +33,8 @@ public: bool get_across_spatial() const { return m_across_spatial;} void validate_and_infer_types() override; - - std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + bool visit_attributes(AttributeVisitor &visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector &new_args) const override; protected: float m_eps; diff --git a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp index c728b732c4b..40f8b03bf6e 100644 --- a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp +++ b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp @@ -1029,6 +1029,93 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr return res; }); + addSpecificCreator({"Convert"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map& params) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Convert", + details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + + auto p = details::convertPrecision(node->get_output_element_type(0)); + std::string precision_str; + switch (p) { + case Precision::FP16: + precision_str = "FP16"; + break; + case Precision::BF16: + precision_str = "BF16"; + break; + case Precision::FP32: + precision_str = "FP32"; + break; + case Precision::FP64: + precision_str = "FP64"; + break; + case Precision::I8: + precision_str = "I8"; + break; + case Precision::I16: + precision_str = "I16"; + break; + case Precision::I32: + precision_str = "I32"; + break; + case Precision::I64: + precision_str = "I64"; + break; + case Precision::U8: + precision_str = "U8"; + break; + case Precision::U16: + precision_str = "U16"; + break; + case Precision::U32: + precision_str = "U32"; + break; + case Precision::U64: + precision_str = "U64"; + break; + case Precision::BOOL: + precision_str = "BOOL"; + break; + default: + THROW_IE_EXCEPTION << "Unsupported type"; + } + + res->params["precision"] = precision_str; + return res; + }); + + addSpecificCreator({"MVN"}, [](const std::shared_ptr<::ngraph::Node>& node, + const std::map ¶ms) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "MVN", + details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + + res->params["normalize_variance"] = params.at("normalize_variance"); + res->params["normalize_variance"] = res->getBoolStrParamAsIntStr("normalize_variance"); + res->params["eps"] = params.at("eps"); + res->params["across_channels"] = params.at("across_channels"); + res->params["across_channels"] = res->getBoolStrParamAsIntStr("across_channels"); + return res; + }); + + addSpecificCreator({"NormalizeIE"}, [](const std::shared_ptr<::ngraph::Node> &node, + const std::map ¶ms) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Normalize", + details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + + res->params = params; + res->params["channel_shared"] = res->getBoolStrParamAsIntStr("channel_shared"); + res->params["across_spatial"] = res->getBoolStrParamAsIntStr("across_spatial"); + + const auto weightsNode = node->input_value(1).get_node_shared_ptr(); + if (auto castedLayer = ngraph::as_type_ptr(weightsNode)) { + res->blobs["weights"] = InferenceEngine::details::shareWeights(castedLayer); + } + return res; + }); + addSpecificCreator({"Clamp"}, [](const std::shared_ptr<::ngraph::Node>& node, const std::map& params) -> CNNLayerPtr { LayerParams attrs = {node->get_friendly_name(), "Clamp", details::convertPrecision(node->get_output_element_type(0))}; @@ -1138,6 +1225,15 @@ InferenceEngine::details::CNNLayerCreator::CNNLayerCreator(const std::shared_ptr return res; }); + addSpecificCreator({"Subtract"}, [](const std::shared_ptr<::ngraph::Node> &node, + const std::map ¶ms) -> CNNLayerPtr { + LayerParams attrs = {node->get_friendly_name(), "Eltwise", + details::convertPrecision(node->get_output_element_type(0))}; + auto res = std::make_shared(attrs); + res->params["operation"] = "sub"; + return res; + }); + addSpecificCreator({"FakeQuantize"}, [](const std::shared_ptr<::ngraph::Node>& node, const std::map& params) -> CNNLayerPtr { LayerParams attrs = {node->get_friendly_name(), "FakeQuantize", details::convertPrecision(node->get_output_element_type(0))}; @@ -1208,19 +1304,16 @@ void convertFunctionToICNNNetwork(const std::shared_ptr> convertors = { std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), @@ -1230,7 +1323,6 @@ void convertFunctionToICNNNetwork(const std::shared_ptr>(), std::make_shared>(), std::make_shared>(), - std::make_shared>(), std::make_shared>(), std::make_shared>(), std::make_shared>(), diff --git a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp index faedfc71f9f..6b3317daf26 100644 --- a/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp +++ b/inference-engine/src/legacy_api/src/ie_cnn_layer_builder_ngraph.cpp @@ -337,61 +337,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "Convert", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - auto p = details::convertPrecision(layer->get_output_element_type(0)); - std::string precision_str; - switch (p) { - case Precision::FP16: - precision_str = "FP16"; - break; - case Precision::BF16: - precision_str = "BF16"; - break; - case Precision::FP32: - precision_str = "FP32"; - break; - case Precision::FP64: - precision_str = "FP64"; - break; - case Precision::I8: - precision_str = "I8"; - break; - case Precision::I16: - precision_str = "I16"; - break; - case Precision::I32: - precision_str = "I32"; - break; - case Precision::I64: - precision_str = "I64"; - break; - case Precision::U8: - precision_str = "U8"; - break; - case Precision::U16: - precision_str = "U16"; - break; - case Precision::U32: - precision_str = "U32"; - break; - case Precision::U64: - precision_str = "U64"; - break; - case Precision::BOOL: - precision_str = "BOOL"; - break; - default: - THROW_IE_EXCEPTION << "Unsupported type"; - } - - res->params["precision"] = precision_str; - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "Ceiling", @@ -453,23 +398,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr< return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "MVN", details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - auto castedLayer = ngraph::as_type_ptr(layer); - if (castedLayer == nullptr) THROW_IE_EXCEPTION << "Cannot get " << params.type << " layer " << params.name; - - res->params["eps"] = asString(castedLayer->get_eps()); - - const size_t chanelAxis = 1; - ngraph::AxisSet reductionAxes = castedLayer->get_reduction_axes(); - res->params["across_channels"] = asString(reductionAxes.count(chanelAxis) > 0); - - res->params["normalize_variance"] = asString(castedLayer->get_normalize_variance()); - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "Crop", @@ -502,15 +430,6 @@ CNNLayer::Ptr NodeConverter::createLayer(const std::shared_p return res; } -template <> -CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { - LayerParams params = {layer->get_friendly_name(), "Eltwise", - details::convertPrecision(layer->get_output_element_type(0))}; - auto res = std::make_shared(params); - res->params["operation"] = "sub"; - return res; -} - template <> CNNLayer::Ptr NodeConverter::createLayer(const std::shared_ptr& layer) const { LayerParams params = {layer->get_friendly_name(), "Eltwise", diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/normalize_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/normalize_ie.cpp index 7d672d4d685..6fb38938767 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/normalize_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/normalize_ie.cpp @@ -35,3 +35,10 @@ shared_ptr op::NormalizeIE::clone_with_new_inputs(const OutputVector& new_ check_new_args_count(this, new_args); return make_shared(new_args.at(0), new_args.at(1), m_eps, m_across_spatial, m_channel_shared, m_output_type); } + +bool op::NormalizeIE::visit_attributes(AttributeVisitor& visitor) { + visitor.on_attribute("eps", m_eps); + visitor.on_attribute("channel_shared", m_channel_shared); + visitor.on_attribute("across_spatial", m_across_spatial); + return true; +} diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 7bc61a5494c..1dadcbb46f2 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -397,13 +396,10 @@ std::shared_ptr V10Parser::createNode(const std::vector> creators = { std::make_shared>("AvgPool"), - std::make_shared>("Convert"), std::make_shared>("CTCGreedyDecoder"), std::make_shared>("DeformableConvolution"), std::make_shared>("DeformablePSROIPooling"), std::make_shared>("SpaceToDepth"), - std::make_shared>("DepthToSpace"), - std::make_shared>("Subtract"), std::make_shared>("Broadcast"), std::make_shared>("StridedSlice"), std::make_shared>("Gather"), @@ -415,14 +411,11 @@ std::shared_ptr V10Parser::createNode(const std::vector>("SquaredDifference"), std::make_shared>("LessEqual"), std::make_shared>("Equal"), - std::make_shared>("NotEqual"), std::make_shared>("FloorMod"), - std::make_shared>("MVN"), std::make_shared>("LSTMCell"), std::make_shared>("MaxPool"), std::make_shared>("Minimum"), std::make_shared>("NonMaxSuppression"), - std::make_shared>("NormalizeL2"), std::make_shared>("ReorgYolo"), std::make_shared>("RegionYolo"), std::make_shared>("Result"), @@ -480,12 +473,16 @@ std::shared_ptr V10Parser::createNode(const std::vector V10Parser::LayerCreator::cre return fillSubGraphLayer(inputs, node, weights, layerParsePrms, loop); } -// Covnert layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 1); - pugi::xml_node dn = node.child("data"); - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - return std::make_shared(inputs[0], - details::convertPrecision(GetStrAttr(dn, "destination_type"))); -} - // LSTMCell layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -844,15 +827,6 @@ std::shared_ptr V10Parser::LayerCreator::cr return std::make_shared(inputs[0], inputs[1]); } -// NotEqual layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 2); - return std::make_shared(inputs[0], inputs[1]); -} - // FloorMod layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -862,23 +836,6 @@ std::shared_ptr V10Parser::LayerCreator: return std::make_shared(inputs[0], inputs[1]); } -// MVN layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 1); - pugi::xml_node dn = node.child("data"); - - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - double eps = GetFloatAttr(dn, "eps"); - bool across = GetUIntAttr(dn, "across_channels", 0) == 1; - bool normalize_variance = GetUIntAttr(dn, "normalize_variance", 0) == 1; - return std::make_shared(inputs[0], across, normalize_variance, eps); -} - // VariadicSplit layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( @@ -959,14 +916,6 @@ std::shared_ptr V10Parser::LayerCreator:: return std::make_shared(inputs[0], inputs[1]); } -// Subtract layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 2); - return std::make_shared(inputs[0], inputs[1]); -} // Broadcast layer template <> @@ -1344,31 +1293,6 @@ std::shared_ptr V10Parser::LayerCreator::c return std::make_shared(inputs[0], inputs[1], inputs[2]); } -// NormalizeL2 layer -template <> -std::shared_ptr V10Parser::LayerCreator::createLayer( - const ngraph::OutputVector& inputs, const pugi::xml_node& node, const Blob::CPtr& weights, - const GenericLayerParams& layerParsePrms) { - checkParameters(inputs, layerParsePrms, 2); - pugi::xml_node dn = node.child("data"); - - if (dn.empty()) - THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name; - - float eps = GetFloatAttr(dn, "eps"); - std::string eps_mode = GetStrAttr(dn, "eps_mode"); - ngraph::op::EpsMode em; - if (eps_mode == "add") { - em = ngraph::op::EpsMode::ADD; - } else if (eps_mode == "max") { - em = ngraph::op::EpsMode::MAX; - } else { - THROW_IE_EXCEPTION << "NormalizeL2 unsupported eps_mode: " << eps_mode; - } - - return std::make_shared(inputs[0], inputs[1], eps, em); -} - // LogicalAnd layer template <> std::shared_ptr V10Parser::LayerCreator::createLayer( diff --git a/ngraph/core/include/ngraph/op/not_equal.hpp b/ngraph/core/include/ngraph/op/not_equal.hpp index dfd551ddbef..ca511dc2fe1 100644 --- a/ngraph/core/include/ngraph/op/not_equal.hpp +++ b/ngraph/core/include/ngraph/op/not_equal.hpp @@ -49,6 +49,7 @@ namespace ngraph bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; + virtual bool visit_attributes(AttributeVisitor& visitor) override; }; } // namespace v1 } diff --git a/ngraph/core/src/op/mvn.cpp b/ngraph/core/src/op/mvn.cpp index 8408e09939b..4247482fb4d 100644 --- a/ngraph/core/src/op/mvn.cpp +++ b/ngraph/core/src/op/mvn.cpp @@ -49,6 +49,8 @@ op::MVN::MVN(const Output& data, AxisSet reduction_axes, bool normalize_va , m_reduction_axes{reduction_axes} { constructor_validate_and_infer_types(); + const size_t chanelAxis = 1; + m_across_channels = (m_reduction_axes.count(chanelAxis) > 0); } // decompose_op() relies on knowing the data type of input data which might diff --git a/ngraph/core/src/op/normalize_l2.cpp b/ngraph/core/src/op/normalize_l2.cpp index 68981048936..bf0d6abf850 100644 --- a/ngraph/core/src/op/normalize_l2.cpp +++ b/ngraph/core/src/op/normalize_l2.cpp @@ -84,6 +84,7 @@ void op::NormalizeL2::pre_validate_and_infer_types() } } } + set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } AxisSet op::NormalizeL2::get_reduction_axes() const diff --git a/ngraph/core/src/op/not_equal.cpp b/ngraph/core/src/op/not_equal.cpp index 9990bd3126c..6dd5d2dcb09 100644 --- a/ngraph/core/src/op/not_equal.cpp +++ b/ngraph/core/src/op/not_equal.cpp @@ -94,3 +94,8 @@ bool op::v1::NotEqual::evaluate(const HostTensorVector& outputs, OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::NotEqual::evaluate"); return not_equalop::evaluate_not_equal(inputs[0], inputs[1], outputs[0], get_autob()); } + +bool op::v1::NotEqual::visit_attributes(AttributeVisitor& visitor) +{ + return true; +}