diff --git a/inference-engine/src/inference_engine/src/compilation_context.cpp b/inference-engine/src/inference_engine/src/compilation_context.cpp index de956bdcd92..82cd4ac8a42 100644 --- a/inference-engine/src/inference_engine/src/compilation_context.cpp +++ b/inference-engine/src/inference_engine/src/compilation_context.cpp @@ -123,7 +123,7 @@ std::string NetworkCompilationContext::computeHash(const CNNNetwork& network, } else if (auto fNames = std::dynamic_pointer_cast>(rtMapData.second)) { seed = hash_combine(seed, fNames->get().getNames()); - } else if (auto prim = std::dynamic_pointer_cast>( + } else if (auto prim = std::dynamic_pointer_cast>( rtMapData.second)) { seed = hash_combine(seed, prim->get().getPrimitivesPriority()); } diff --git a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp index 4b61ca5de01..86860efef82 100644 --- a/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp +++ b/inference-engine/src/legacy_api/src/convert_function_to_cnn_network.cpp @@ -1981,12 +1981,12 @@ void convertFunctionToICNNNetwork(const std::shared_ptrparams[ExecGraphInfoSerialization::ORIGINAL_NAMES] = originalNames; } - std::string primitivesPriority = ::ngraph::getPrimitivesPriority(layer); + std::string primitivesPriority = ov::getPrimitivesPriority(layer); if (!primitivesPriority.empty()) { cnnLayer->params["PrimitivesPriority"] = primitivesPriority; } diff --git a/inference-engine/src/low_precision_transformations/src/convert_subtract_constant.cpp b/inference-engine/src/low_precision_transformations/src/convert_subtract_constant.cpp index 8bf38985309..fde8f5494ab 100644 --- a/inference-engine/src/low_precision_transformations/src/convert_subtract_constant.cpp +++ b/inference-engine/src/low_precision_transformations/src/convert_subtract_constant.cpp @@ -9,6 +9,7 @@ #include #include +#include #include "low_precision/network_helper.hpp" using namespace ngraph; @@ -82,8 +83,7 @@ ngraph::pass::low_precision::ConvertSubtractConstant::ConvertSubtractConstant(co NetworkHelper::copyInfo(subtractConstant, resultConvert); resultConvert->set_friendly_name(subtractConstant->get_friendly_name() + "/Convert"); - auto& rtInfo = resultConvert->get_rt_info(); - rtInfo["DISABLED_CONSTANT_FOLDING"] = std::make_shared>(""); + ov::disable_constant_folding(resultConvert); const auto newSubtract = std::make_shared(opsMap.at(weightsConvertWrapper).get_node_shared_ptr(), resultConvert); NetworkHelper::copyInfo(subtract, newSubtract); diff --git a/inference-engine/src/low_precision_transformations/src/convolution.cpp b/inference-engine/src/low_precision_transformations/src/convolution.cpp index 97abcb2b9ab..16aac6c2362 100644 --- a/inference-engine/src/low_precision_transformations/src/convolution.cpp +++ b/inference-engine/src/low_precision_transformations/src/convolution.cpp @@ -13,6 +13,7 @@ #include #include #include "low_precision/network_helper.hpp" +#include namespace ngraph { namespace pass { @@ -321,8 +322,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph } if (ov::is_type(onWeights)) { - auto& rt = onWeights->get_rt_info(); - rt["DISABLED_CONSTANT_FOLDING"] = std::make_shared>(""); + ov::disable_constant_folding(onWeights); } return true; } diff --git a/inference-engine/src/low_precision_transformations/src/convolution_backprop_data.cpp b/inference-engine/src/low_precision_transformations/src/convolution_backprop_data.cpp index cec1b73b9a0..c64c69cf12a 100644 --- a/inference-engine/src/low_precision_transformations/src/convolution_backprop_data.cpp +++ b/inference-engine/src/low_precision_transformations/src/convolution_backprop_data.cpp @@ -13,6 +13,7 @@ #include #include #include "low_precision/network_helper.hpp" +#include namespace ngraph { namespace pass { @@ -212,8 +213,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con } if (ov::is_type(onWeights)) { - auto& rt = onWeights->get_rt_info(); - rt["DISABLED_CONSTANT_FOLDING"] = std::make_shared>(""); + ov::disable_constant_folding(onWeights); } return true; diff --git a/inference-engine/src/mkldnn_plugin/utils/ngraph_utils.hpp b/inference-engine/src/mkldnn_plugin/utils/ngraph_utils.hpp index 044b12e9dd7..3232097c416 100644 --- a/inference-engine/src/mkldnn_plugin/utils/ngraph_utils.hpp +++ b/inference-engine/src/mkldnn_plugin/utils/ngraph_utils.hpp @@ -22,12 +22,12 @@ inline std::string getRTInfoValue(const std::map &node) { const auto &rtInfo = node->get_rt_info(); - using PrimitivesPriorityWraper = ngraph::VariantWrapper; + using PrimitivesPriorityWraper = ngraph::VariantWrapper; - if (!rtInfo.count(PrimitivesPriorityWraper::get_type_info_static().name)) return ""; + if (!rtInfo.count(PrimitivesPriorityWraper::get_type_info_static())) return ""; - const auto &attr = rtInfo.at(PrimitivesPriorityWraper::get_type_info_static().name); - ngraph::PrimitivesPriority pp = ngraph::as_type_ptr(attr)->get(); + const auto &attr = rtInfo.at(PrimitivesPriorityWraper::get_type_info_static()); + ov::PrimitivesPriority pp = ngraph::as_type_ptr(attr)->get(); return pp.getPrimitivesPriority(); } diff --git a/inference-engine/src/transformations/include/transformations/rt_info/attributes.hpp b/inference-engine/src/transformations/include/transformations/rt_info/attributes.hpp new file mode 100644 index 00000000000..a68f6975f19 --- /dev/null +++ b/inference-engine/src/transformations/include/transformations/rt_info/attributes.hpp @@ -0,0 +1,49 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace ov { +namespace pass { +class TRANSFORMATIONS_API Attributes { +public: + Attributes() { + register_factory>(); + register_factory>(); + register_factory>(); + register_factory>(); + register_factory>(); + } + + Variant * create_by_type_info(const ov::DiscreteTypeInfo & type_info) { + return m_factory_registry.create(type_info); + } +private: + template + void register_factory() { + m_factory_registry.register_factory(ngraph::FactoryRegistry::template get_default_factory()); + } + + ngraph::FactoryRegistry m_factory_registry; +}; +} // namespace pass +} // namespace ov \ No newline at end of file diff --git a/inference-engine/src/transformations/include/transformations/rt_info/disable_constant_folding.hpp b/inference-engine/src/transformations/include/transformations/rt_info/disable_constant_folding.hpp index eb7a55cf8ec..73e0b73ea8d 100644 --- a/inference-engine/src/transformations/include/transformations/rt_info/disable_constant_folding.hpp +++ b/inference-engine/src/transformations/include/transformations/rt_info/disable_constant_folding.hpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#pragma once + #include #include #include @@ -13,7 +15,7 @@ #include -namespace ngraph { +namespace ov { /** * @ingroup ie_runtime_attr_api @@ -25,15 +27,19 @@ public: }; TRANSFORMATIONS_API void disable_constant_folding(const std::shared_ptr& node); -} // namespace ngraph -namespace ov { -extern template class TRANSFORMATIONS_API VariantImpl; +TRANSFORMATIONS_API void enable_constant_folding(const std::shared_ptr& node); + +TRANSFORMATIONS_API bool constant_folding_is_disabled(const std::shared_ptr& node); + +extern template class TRANSFORMATIONS_API VariantImpl; template<> -class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { +class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { public: - OPENVINO_RTTI("DISABLED_CONSTANT_FOLDING"); + OPENVINO_RTTI("disabled_constant_folding", "0"); + + VariantWrapper() = default; VariantWrapper(const value_type &value) : VariantImpl(value) {} diff --git a/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp b/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp index 6e70824918b..720ce4f53b1 100644 --- a/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp +++ b/inference-engine/src/transformations/include/transformations/rt_info/fused_names_attribute.hpp @@ -7,6 +7,8 @@ * @file fused_names_attribute.hpp */ +#pragma once + #include #include #include @@ -15,7 +17,8 @@ #include #include -#include "openvino/core/rtti.hpp" +#include +#include #include @@ -31,6 +34,8 @@ private: std::set fused_names; public: + friend class VariantWrapper; + /** * A default constructor */ @@ -86,13 +91,23 @@ extern template class TRANSFORMATIONS_API VariantImpl; template<> class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { public: - OPENVINO_RTTI("Variant::RuntimeAttribute::FusedNames"); - BWDCMP_RTTI_DECLARATION; + OPENVINO_RTTI("fused_names", "0"); + + VariantWrapper() = default; VariantWrapper(const value_type &value) : VariantImpl(value) {} std::shared_ptr merge(const ngraph::NodeVector & nodes) override; std::shared_ptr init(const std::shared_ptr & node) override; + + bool visit_attributes(AttributeVisitor & visitor) override; +}; + +template <> +class TRANSFORMATIONS_API AttributeAdapter> : public DirectValueAccessor> { +public: + OPENVINO_RTTI("AttributeAdapter>"); + AttributeAdapter(std::set& value) : DirectValueAccessor>(value) {} }; } // namespace ov diff --git a/inference-engine/src/transformations/include/transformations/rt_info/nms_selected_indices.hpp b/inference-engine/src/transformations/include/transformations/rt_info/nms_selected_indices.hpp index affdc026f0a..21dcce905a3 100644 --- a/inference-engine/src/transformations/include/transformations/rt_info/nms_selected_indices.hpp +++ b/inference-engine/src/transformations/include/transformations/rt_info/nms_selected_indices.hpp @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 // +#pragma once + #include #include #include @@ -28,11 +30,9 @@ extern template class TRANSFORMATIONS_API VariantImpl; template<> class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { public: - static constexpr VariantTypeInfo type_info{"NMS_SELECTED_INDICES", 0}; + OPENVINO_RTTI("nms_selected_indices", "0"); - const VariantTypeInfo &get_type_info() const override { - return type_info; - } + VariantWrapper() = default; VariantWrapper(const value_type &value) : VariantImpl(value) {} diff --git a/inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp b/inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp index bc1a352fdb4..0d48a6d15e9 100644 --- a/inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp +++ b/inference-engine/src/transformations/include/transformations/rt_info/primitives_priority_attribute.hpp @@ -19,7 +19,7 @@ #include #include -namespace ngraph { +namespace ov { /** * @ingroup ie_runtime_attr_api @@ -31,6 +31,7 @@ private: std::string primitives_priority; public: + friend class VariantWrapper; /** * A default constructor */ @@ -54,22 +55,22 @@ public: */ TRANSFORMATIONS_API std::string getPrimitivesPriority(const std::shared_ptr & node); -} // namespace ngraph - -namespace ov { - -extern template class TRANSFORMATIONS_API VariantImpl; +extern template class TRANSFORMATIONS_API VariantImpl; template<> -class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { +class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { public: - OPENVINO_RTTI("VariantWrapper"); + OPENVINO_RTTI("primitives_priority", "0"); + + VariantWrapper() = default; VariantWrapper(const value_type &value) : VariantImpl(value) {} std::shared_ptr merge(const ngraph::NodeVector & nodes) override; std::shared_ptr init(const std::shared_ptr & node) override; + + bool visit_attributes(AttributeVisitor & visitor) override; }; } // namespace ov diff --git a/inference-engine/src/transformations/include/transformations/rt_info/strides_property.hpp b/inference-engine/src/transformations/include/transformations/rt_info/strides_property.hpp index f252356bada..d2e32fd91c9 100644 --- a/inference-engine/src/transformations/include/transformations/rt_info/strides_property.hpp +++ b/inference-engine/src/transformations/include/transformations/rt_info/strides_property.hpp @@ -9,19 +9,18 @@ #include #include - namespace ov { template <> class TRANSFORMATIONS_API VariantWrapper : public VariantImpl { public: - OPENVINO_RTTI("VariantWrapper"); - VariantWrapper(const value_type& value) - : VariantImpl(value) { - } -}; + OPENVINO_RTTI("strides", "0"); -} // namespace ov + VariantWrapper() = default; + + VariantWrapper(const value_type& value) : VariantImpl(value) {} +}; TRANSFORMATIONS_API bool has_strides_prop(const ngraph::Input& node); TRANSFORMATIONS_API ngraph::Strides get_strides_prop(const ngraph::Input& node); TRANSFORMATIONS_API void insert_strides_prop(ngraph::Input& node, const ngraph::Strides& strides); +} // namespace ov diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/weights_dequantize_to_fake_quantize.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/weights_dequantize_to_fake_quantize.cpp index d0a6175f29e..c1716fa2497 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/weights_dequantize_to_fake_quantize.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/weights_dequantize_to_fake_quantize.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include "itt.hpp" NGRAPH_RTTI_DEFINITION(ngraph::pass::WeightsDequantizeToFakeQuantize, "WeightsDequantizeToFakeQuantize", 0); @@ -62,8 +63,8 @@ ngraph::pass::WeightsDequantizeToFakeQuantize::WeightsDequantizeToFakeQuantize() ngraph::copy_runtime_info(nodes_to_copy_RT_info_from, fq); multiply_node->output(0).replace(fq->output(0)); - if (convert_node->get_rt_info().count("DISABLED_CONSTANT_FOLDING")) - convert_node->get_rt_info().erase("DISABLED_CONSTANT_FOLDING"); + if (ov::constant_folding_is_disabled(convert_node)) + ov::enable_constant_folding(convert_node); return true; }; diff --git a/inference-engine/src/transformations/src/transformations/init_node_info.cpp b/inference-engine/src/transformations/src/transformations/init_node_info.cpp index 7cabe55d831..3069146c587 100644 --- a/inference-engine/src/transformations/src/transformations/init_node_info.cpp +++ b/inference-engine/src/transformations/src/transformations/init_node_info.cpp @@ -19,14 +19,14 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::InitNodeInfo, "InitNodeInfo", 0); bool ngraph::pass::InitNodeInfo::run_on_function(std::shared_ptr f) { RUN_ON_FUNCTION_SCOPE(InitNodeInfo); std::vector > attributes { - std::make_shared >(FusedNames()) + std::make_shared >(ngraph::FusedNames()) }; using VariantCreator = std::function(const std::string&)>; std::map update_attributes { {"PrimitivesPriority", [](const std::string & value) -> std::shared_ptr { - return std::make_shared >(PrimitivesPriority(value)); + return std::make_shared >(ov::PrimitivesPriority(value)); } } }; @@ -42,9 +42,9 @@ bool ngraph::pass::InitNodeInfo::run_on_function(std::shared_ptrget_type_info().name)) continue; + if (rtInfo.count(attr->get_type_info())) continue; if (auto init_attr = attr->init(node)) { - rtInfo[attr->get_type_info().name] = init_attr; + rtInfo[attr->get_type_info()] = init_attr; } } // Convert manually set attributes to appropriate VariantWrapper class instances @@ -54,7 +54,7 @@ bool ngraph::pass::InitNodeInfo::run_on_function(std::shared_ptr >(rtInfo[attr.first])) { rtInfo.erase(attr.first); auto res = attr.second(variant_string->get()); - rtInfo[res->get_type_info().name] = res; + rtInfo[res->get_type_info()] = res; } } } diff --git a/inference-engine/src/transformations/src/transformations/low_precision/disable_convert_constant_folding_on_const_path.cpp b/inference-engine/src/transformations/src/transformations/low_precision/disable_convert_constant_folding_on_const_path.cpp index 8096a9ac6bc..f5f4475f2e3 100644 --- a/inference-engine/src/transformations/src/transformations/low_precision/disable_convert_constant_folding_on_const_path.cpp +++ b/inference-engine/src/transformations/src/transformations/low_precision/disable_convert_constant_folding_on_const_path.cpp @@ -13,6 +13,7 @@ #include #include #include +#include using namespace ngraph; @@ -53,8 +54,7 @@ ngraph::pass::DisableConvertConstantFoldingOnConstPath::DisableConvertConstantFo auto child = target_inputs.begin()->get_node(); if (ov::is_type(parent) && (ov::is_type(child) || ov::is_type(child))) { - auto& rtInfo = convert->get_rt_info(); - rtInfo["DISABLED_CONSTANT_FOLDING"] = std::make_shared>(""); + ov::disable_constant_folding(convert); return true; } diff --git a/inference-engine/src/transformations/src/transformations/rt_info/disable_constant_folding.cpp b/inference-engine/src/transformations/src/transformations/rt_info/disable_constant_folding.cpp index ad2d4613fa4..a5c6b4a1a16 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/disable_constant_folding.cpp +++ b/inference-engine/src/transformations/src/transformations/rt_info/disable_constant_folding.cpp @@ -4,9 +4,19 @@ #include "transformations/rt_info/disable_constant_folding.hpp" -template class ov::VariantImpl; +template class ov::VariantImpl; -void ngraph::disable_constant_folding(const std::shared_ptr& node) { +void ov::disable_constant_folding(const std::shared_ptr& node) { auto & rt_info = node->get_rt_info(); - rt_info[VariantWrapper::get_type_info_static().name] = make_variant({}); + rt_info[VariantWrapper::get_type_info_static()] = make_variant({}); +} + +void ov::enable_constant_folding(const std::shared_ptr& node) { + auto & rt_info = node->get_rt_info(); + rt_info.erase(VariantWrapper::get_type_info_static()); +} + +bool ov::constant_folding_is_disabled(const std::shared_ptr &node) { + const auto & rt_info = node->get_rt_info(); + return rt_info.count(VariantWrapper::get_type_info_static()); } diff --git a/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp b/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp index 1b192b82d51..7f69f5c531c 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp +++ b/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp @@ -38,9 +38,9 @@ std::string ngraph::getFusedNames(const std::shared_ptr &node) { const auto &rtInfo = node->get_rt_info(); using FusedNamesWrapper = VariantWrapper; - if (!rtInfo.count(FusedNamesWrapper::get_type_info_static().name)) return {}; + if (!rtInfo.count(FusedNamesWrapper::get_type_info_static())) return {}; - const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static().name); + const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static()); FusedNames fusedNames = ov::as_type_ptr(attr)->get(); return fusedNames.getNames(); } @@ -51,24 +51,23 @@ std::vector ngraph::getFusedNamesVector(const std::shared_ptrget_rt_info(); using FusedNamesWrapper = VariantWrapper; - if (!rtInfo.count(FusedNamesWrapper::get_type_info_static().name)) return {}; + if (!rtInfo.count(FusedNamesWrapper::get_type_info_static())) return {}; - const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static().name); + const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static()); FusedNames fusedNames = ov::as_type_ptr(attr)->get(); return fusedNames.getVectorNames(); } template class ov::VariantImpl; -BWDCMP_RTTI_DEFINITION(VariantWrapper); - std::shared_ptr VariantWrapper::merge(const ngraph::NodeVector & nodes) { FusedNames mergedNames; for (auto &node : nodes) { const auto &rtInfo = node->get_rt_info(); - if (!rtInfo.count(VariantWrapper::get_type_info_static().name)) continue; - const auto attr = rtInfo.at(VariantWrapper::get_type_info_static().name); + if (!rtInfo.count(VariantWrapper::get_type_info_static())) continue; + + const auto attr = rtInfo.at(VariantWrapper::get_type_info_static()); if (auto fusedNames = std::dynamic_pointer_cast >(attr)) { mergedNames.fuseWith(fusedNames->get()); } @@ -79,3 +78,8 @@ std::shared_ptr VariantWrapper::merge(const ngraph: std::shared_ptr VariantWrapper::init(const std::shared_ptr & node) { return std::make_shared > (FusedNames(node->get_friendly_name())); } + +bool VariantWrapper::visit_attributes(AttributeVisitor &visitor) { + visitor.on_attribute("value", m_value.fused_names); + return true; +} \ No newline at end of file diff --git a/inference-engine/src/transformations/src/transformations/rt_info/nms_selected_indices.cpp b/inference-engine/src/transformations/src/transformations/rt_info/nms_selected_indices.cpp index 93de84cd24e..b5623619521 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/nms_selected_indices.cpp +++ b/inference-engine/src/transformations/src/transformations/rt_info/nms_selected_indices.cpp @@ -6,14 +6,12 @@ template class ov::VariantImpl; -constexpr ov::VariantTypeInfo ov::VariantWrapper::type_info; - void ov::set_nms_selected_indices(Node * node) { auto & rt_info = node->get_rt_info(); - rt_info[VariantWrapper::type_info.name] = make_variant({}); + rt_info[VariantWrapper::get_type_info_static()] = make_variant({}); } bool ov::has_nms_selected_indices(const Node * node) { const auto & rt_info = node->get_rt_info(); - return rt_info.count(VariantWrapper::type_info.name); + return rt_info.count(VariantWrapper::get_type_info_static()); } diff --git a/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp b/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp index 6386d463848..e3809945e37 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp +++ b/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp @@ -23,13 +23,13 @@ std::string PrimitivesPriority::getPrimitivesPriority() const { return primitives_priority; } -std::string ngraph::getPrimitivesPriority(const std::shared_ptr &node) { +std::string ov::getPrimitivesPriority(const std::shared_ptr &node) { const auto &rtInfo = node->get_rt_info(); using PrimitivesPriorityWrapper = VariantWrapper; - if (!rtInfo.count(PrimitivesPriorityWrapper::get_type_info_static().name)) return ""; + if (!rtInfo.count(PrimitivesPriorityWrapper::get_type_info_static())) return ""; - const auto &attr = rtInfo.at(PrimitivesPriorityWrapper::get_type_info_static().name); + const auto &attr = rtInfo.at(PrimitivesPriorityWrapper::get_type_info_static()); PrimitivesPriority pp = ov::as_type_ptr(attr)->get(); return pp.getPrimitivesPriority(); } @@ -59,7 +59,7 @@ std::shared_ptr VariantWrapper::merge(const } if (unique_pp.size() > 1) { - throw ngraph_error(std::string(get_type_info().name) + " no rule defined for multiple values."); + throw ngraph_error(std::string(get_type_info()) + " no rule defined for multiple values."); } std::string final_primitives_priority; @@ -70,5 +70,10 @@ std::shared_ptr VariantWrapper::merge(const } std::shared_ptr VariantWrapper::init(const std::shared_ptr & node) { - throw ngraph_error(std::string(get_type_info().name) + " has no default initialization."); + throw ngraph_error(std::string(get_type_info()) + " has no default initialization."); +} + +bool VariantWrapper::visit_attributes(AttributeVisitor &visitor) { + visitor.on_attribute("value", m_value.primitives_priority); + return true; } diff --git a/inference-engine/src/transformations/src/transformations/rt_info/strides_property.cpp b/inference-engine/src/transformations/src/transformations/rt_info/strides_property.cpp index 25bf68bbda4..36521512235 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/strides_property.cpp +++ b/inference-engine/src/transformations/src/transformations/rt_info/strides_property.cpp @@ -4,19 +4,19 @@ #include "transformations/rt_info/strides_property.hpp" -bool has_strides_prop(const ngraph::Input& node) { +bool ov::has_strides_prop(const ngraph::Input& node) { const auto& rt_map = node.get_rt_info(); - auto it = rt_map.find(ngraph::VariantWrapper::get_type_info_static().name); + auto it = rt_map.find(ngraph::VariantWrapper::get_type_info_static()); return it != rt_map.end(); } -ngraph::Strides get_strides_prop(const ngraph::Input& node) { +ngraph::Strides ov::get_strides_prop(const ngraph::Input& node) { const auto& rt_map = node.get_rt_info(); - const auto& var = rt_map.at(ngraph::VariantWrapper::get_type_info_static().name); + const auto& var = rt_map.at(ngraph::VariantWrapper::get_type_info_static()); return ngraph::as_type_ptr>(var)->get(); } -void insert_strides_prop(ngraph::Input& node, const ngraph::Strides& strides) { +void ov::insert_strides_prop(ngraph::Input& node, const ngraph::Strides& strides) { auto& rt_map = node.get_rt_info(); - rt_map[ngraph::VariantWrapper::get_type_info_static().name] = std::make_shared>(strides); + rt_map[ngraph::VariantWrapper::get_type_info_static()] = std::make_shared>(strides); } diff --git a/inference-engine/src/transformations/src/transformations/serialize.cpp b/inference-engine/src/transformations/src/transformations/serialize.cpp index 6a6eb461e85..a14e9aa1a96 100644 --- a/inference-engine/src/transformations/src/transformations/serialize.cpp +++ b/inference-engine/src/transformations/src/transformations/serialize.cpp @@ -18,6 +18,7 @@ #include "ngraph_ops/type_relaxed.hpp" #include "pugixml.hpp" #include "transformations/serialize.hpp" +#include "transformations/rt_info/attributes.hpp" using namespace ngraph; @@ -175,6 +176,82 @@ private: pugi::xml_node& m_xml_node; }; +class RTInfoSerializer : public ngraph::AttributeVisitor { + pugi::xml_node m_node; + +public: + RTInfoSerializer(const pugi::xml_node node) : m_node(node) {} + + void on_adapter(const std::string &name, ngraph::ValueAccessor &adapter) override { + check_attribute_name(name); + if (auto a = ngraph::as_type>>(&adapter)) { + const auto & value = join(a->get()); + m_node.append_attribute(name.c_str()).set_value(value.c_str()); + } else { + throw ngraph_error("Unsupported attribute type for serialization: " + name); + } + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor &adapter) override { + check_attribute_name(name); + m_node.append_attribute(name.c_str()).set_value(adapter.get()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor &adapter) override { + check_attribute_name(name); + m_node.append_attribute(name.c_str()).set_value(adapter.get().c_str()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor &adapter) override { + check_attribute_name(name); + m_node.append_attribute(name.c_str()).set_value(adapter.get()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor &adapter) override { + check_attribute_name(name); + m_node.append_attribute(name.c_str()).set_value(adapter.get()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor> &adapter) override { + check_attribute_name(name); + const auto & value = join(adapter.get()); + m_node.append_attribute(name.c_str()).set_value(value.c_str()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor> &adapter) override { + check_attribute_name(name); + const auto & value = join(adapter.get()); + m_node.append_attribute(name.c_str()).set_value(value.c_str()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor> &adapter) override { + check_attribute_name(name); + const auto & value = join(adapter.get()); + m_node.append_attribute(name.c_str()).set_value(value.c_str()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor> &adapter) override { + check_attribute_name(name); + const auto & value = join(adapter.get()); + m_node.append_attribute(name.c_str()).set_value(value.c_str()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor> &adapter) override { + check_attribute_name(name); + const auto & value = join(adapter.get()); + m_node.append_attribute(name.c_str()).set_value(value.c_str()); + } + + void on_adapter(const std::string &name, ngraph::ValueAccessor> &adapter) override { + throw ngraph_error("Function type is unsupported for rt info serialization"); + } + + void check_attribute_name(const std::string & name) const { + if (name == "name" || name == "version") { + throw ngraph_error("Attribute key with name: " + name + " is not allowed. Please use another name"); + } + } +}; } // namespace rt_info class XmlSerializer : public ngraph::AttributeVisitor { @@ -724,6 +801,27 @@ void ngfunction_2_irv10(pugi::xml_node& netXml, // general attributes pugi::xml_node data = layer.append_child("data"); + auto append_runtime_info = [](pugi::xml_node & node, const RTMap& attributes) { + pugi::xml_node rt_node = node.append_child("rt_info"); + bool has_attrs = false; + for (const auto &item : attributes) { + auto attribute_node = rt_node.append_child("attribute"); + attribute_node.append_attribute("name").set_value(item.second->get_type_info().name); + attribute_node.append_attribute("version").set_value(item.second->get_type_info().get_version().c_str()); + rt_info::RTInfoSerializer serializer(attribute_node); + if (!item.second->visit_attributes(serializer)) { + rt_node.remove_child(attribute_node); + } else { + has_attrs = true; + } + } + if (!has_attrs) { + node.remove_child(rt_node); + } + }; + + append_runtime_info(layer, node->get_rt_info()); + int port_id = 0; // if (node->get_input_size() > 0) { @@ -748,6 +846,7 @@ void ngfunction_2_irv10(pugi::xml_node& netXml, .set_value(std::to_string(d.get_length()).c_str()); } } + append_runtime_info(port, i.get_rt_info()); } if (node_type_name == "TensorIterator" || node_type_name == "Loop") { @@ -798,6 +897,7 @@ void ngfunction_2_irv10(pugi::xml_node& netXml, .set_value(std::to_string(d.get_length()).c_str()); } } + append_runtime_info(port, o.get_rt_info()); } if (node_type_name == "TensorIterator" || node_type_name == "Loop") { layer.insert_move_after(output, layer.first_child()); @@ -868,9 +968,6 @@ std::string provide_bin_path(const std::string &xmlPath, const std::string &binP } // namespace namespace ngraph { - -// ! [function_pass:serialize_cpp] -// serialize.cpp bool pass::Serialize::run_on_function(std::shared_ptr f) { RUN_ON_FUNCTION_SCOPE(Serialize); @@ -1014,5 +1111,4 @@ bool ngraph::pass::StreamSerialize::run_on_function(std::shared_ptr + +#include +#include + +#include +#include + +using namespace ngraph; + +TEST(RTInfoDeserialization, Node) { + std::string model = R"V0G0N( + + + + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + + + 1 + 3 + 22 + 22 + + + + + 1 + 3 + 22 + 22 + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + +)V0G0N"; + auto core = InferenceEngine::Core(); + auto net = core.ReadNetwork(model, InferenceEngine::Blob::Ptr()); + auto f = net.getFunction(); + + auto check_fused_names = [](const RTMap & info, const std::string & names) { + const std::string & key = VariantWrapper::get_type_info_static(); + ASSERT_TRUE(info.count(key)); + auto fused_names_attr = std::dynamic_pointer_cast>(info.at(key)); + ASSERT_TRUE(fused_names_attr); + ASSERT_EQ(fused_names_attr->get().getNames(), names); + }; + + auto param = f->get_parameters()[0]; + check_fused_names(param->get_rt_info(), "in1"); + + auto result = f->get_results()[0]; + auto round = result->get_input_node_ptr(0); + check_fused_names(round->get_rt_info(), "Round1,Round2"); +} + +TEST(RTInfoDeserialization, InputAndOutput) { + std::string model = R"V0G0N( + + + + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + + 1 + 3 + 22 + 22 + + + + + + 1 + 3 + 22 + 22 + + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + + 1 + 3 + 22 + 22 + + + + + + + + + + +)V0G0N"; + auto core = InferenceEngine::Core(); + auto net = core.ReadNetwork(model, InferenceEngine::Blob::Ptr()); + auto f = net.getFunction(); + + auto check_fused_names = [](const RTMap & info, const std::string & names) { + const std::string & key = VariantWrapper::get_type_info_static(); + ASSERT_TRUE(info.count(key)); + auto fused_names_attr = std::dynamic_pointer_cast>(info.at(key)); + ASSERT_TRUE(fused_names_attr); + ASSERT_EQ(fused_names_attr->get().getNames(), names); + }; + + auto param = f->get_parameters()[0]; + check_fused_names(param->output(0).get_rt_info(), "test1,test2"); + + auto result = f->get_results()[0]; + check_fused_names(result->input(0).get_rt_info(), "test5,test6"); + + auto add = result->get_input_node_ptr(0); + check_fused_names(add->input(0).get_rt_info(), "test2,test3"); + check_fused_names(add->input(1).get_rt_info(), "test3,test4"); + check_fused_names(add->output(0).get_rt_info(), "test4,test5"); +} diff --git a/inference-engine/tests/functional/inference_engine/ir_serialization/rt_info_serialization.cpp b/inference-engine/tests/functional/inference_engine/ir_serialization/rt_info_serialization.cpp new file mode 100644 index 00000000000..d7f6165735d --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/ir_serialization/rt_info_serialization.cpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include "common_test_utils/ngraph_test_utils.hpp" +#include "ie_core.hpp" +#include "ngraph/ngraph.hpp" +#include "transformations/serialize.hpp" +#include +#include + +using namespace ngraph; + +class RTInfoSerializationTest : public CommonTestUtils::TestsCommon { +protected: + std::string test_name = GetTestName() + "_" + GetTimestamp(); + std::string m_out_xml_path = test_name + ".xml"; + std::string m_out_bin_path = test_name + ".bin"; + + void TearDown() override { + std::remove(m_out_xml_path.c_str()); + std::remove(m_out_bin_path.c_str()); + } +}; + +TEST_F(RTInfoSerializationTest, all_attributes) { + auto init_info = [](RTMap & info) { + info[VariantWrapper::get_type_info_static()] = + std::make_shared>(ngraph::FusedNames("add")); + info[VariantWrapper::get_type_info_static()] = + std::make_shared>(ov::PrimitivesPriority("priority")); + }; + + std::shared_ptr function; + { + auto data = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); + auto add = std::make_shared(data, data); + init_info(add->get_rt_info()); + init_info(add->input(0).get_rt_info()); + init_info(add->input(1).get_rt_info()); + init_info(add->output(0).get_rt_info()); + function = std::make_shared(OutputVector{add}, ParameterVector{data}); + } + + pass::Manager m; + m.register_pass(m_out_xml_path, m_out_bin_path); + m.run_passes(function); + + auto core = InferenceEngine::Core(); + auto net = core.ReadNetwork(m_out_xml_path, m_out_bin_path); + auto f = net.getFunction(); + + auto check_info = [](const RTMap & info) { + const std::string & key = VariantWrapper::get_type_info_static(); + ASSERT_TRUE(info.count(key)); + auto fused_names_attr = std::dynamic_pointer_cast>(info.at(key)); + ASSERT_TRUE(fused_names_attr); + ASSERT_EQ(fused_names_attr->get().getNames(), "add"); + + const std::string & pkey = VariantWrapper::get_type_info_static(); + ASSERT_TRUE(info.count(pkey)); + auto primitives_priority_attr = std::dynamic_pointer_cast>(info.at(pkey)); + ASSERT_TRUE(primitives_priority_attr); + ASSERT_EQ(primitives_priority_attr->get().getPrimitivesPriority(), "priority"); + }; + + auto add = f->get_results()[0]->get_input_node_ptr(0); + check_info(add->get_rt_info()); + check_info(add->input(0).get_rt_info()); + check_info(add->input(1).get_rt_info()); + check_info(add->output(0).get_rt_info()); +} diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/convert_subtract_constant_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/convert_subtract_constant_transformation.cpp index 6e65c76c0d0..e910cbf6b8e 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/convert_subtract_constant_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/convert_subtract_constant_transformation.cpp @@ -169,7 +169,7 @@ const std::vector testValues = }, { { ngraph::element::f32, false }, - { {127.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "DISABLED_CONSTANT_FOLDING" } }, + { {127.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "disabled_constant_folding_0" } }, { {0.03f}, element::f32, {}, false } }, { std::vector{ 2.f }, ngraph::element::i8}, diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_backprop_data_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_backprop_data_transformation.cpp index d24a501dc0e..a2fc719a092 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_backprop_data_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_backprop_data_transformation.cpp @@ -284,7 +284,7 @@ const std::vector testValues = { ngraph::element::u8, {{}, { { 128.f }, ngraph::element::f32, {}, false }, {}}, - {{}, { { 2.f }, ngraph::element::f32, {1, 2, 1, 1}, true, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } }, {}}, + {{}, { { 2.f }, ngraph::element::f32, {1, 2, 1, 1}, true, 1ul, element::i8, false, { "disabled_constant_folding_0" } }, {}}, {{}, {}, {{ 0.0002f }, ngraph::element::f32, {}}}, op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector{ 2.f }), true diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_qdq_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_qdq_transformation.cpp index 40619545562..fb54eb596cb 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_qdq_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_qdq_transformation.cpp @@ -174,7 +174,7 @@ const std::vector testValues = { }, { {}, - { { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } }, + { { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } }, {} }, { std::vector{ 100.f }, ngraph::element::i8}, @@ -349,7 +349,7 @@ const std::vector testValues = { }, { {}, - { { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } }, + { { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } }, {} }, { std::vector{ 2.f }, ngraph::element::i8}, @@ -417,7 +417,7 @@ const std::vector testValues = { }, { {}, - { { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } }, + { { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } }, {} }, { std::vector{ 2.f }, ngraph::element::i8}, diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/disable_convert_on_const_path_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/disable_convert_on_const_path_transformation.cpp index 5a2fbe0f39a..cafdd3a76b8 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/disable_convert_on_const_path_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/disable_convert_on_const_path_transformation.cpp @@ -145,12 +145,12 @@ const std::vector testValues = { ngraph::element::u8, { {ngraph::element::f32}, - { {128.f}, element::f32, {}, false, 1ul, element::u8, true, {}, { "DISABLED_CONSTANT_FOLDING" } }, + { {128.f}, element::f32, {}, false, 1ul, element::u8, true, {}, { "disabled_constant_folding_0" } }, { {0.02f}, element::f32, {}, false } }, { { ngraph::element::f32, false }, - { {128.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "DISABLED_CONSTANT_FOLDING" } }, + { {128.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "disabled_constant_folding_0" } }, { {0.03f}, element::f32, {}, false } }, { std::vector{ 1.f }, ngraph::element::f32}, diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp index 34d55ee701d..2dddf5e7dcc 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/fake_quantize_with_dq_not_optimal_transformation.cpp @@ -198,7 +198,7 @@ const std::vector fakeQuanti {}, { {}, - { std::vector(64, 127.f), ngraph::element::f32, {64, 1, 1, 1}, false, 1ul, ngraph::element::i8, false, {"DISABLED_CONSTANT_FOLDING"}}, + { std::vector(64, 127.f), ngraph::element::f32, {64, 1, 1, 1}, false, 1ul, ngraph::element::i8, false, {"disabled_constant_folding_0"}}, {} }, { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/group_convolution_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/group_convolution_transformation.cpp index c66748b7a74..6ce4e41ef28 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/group_convolution_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/group_convolution_transformation.cpp @@ -473,7 +473,7 @@ const std::vector testValuesGroupConv = { 1, ngraph::element::i8, false, - {"DISABLED_CONSTANT_FOLDING"} + {"disabled_constant_folding_0"} }, {} }, diff --git a/inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp b/inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp index 40b660bca03..1bab10d9b4a 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/nop_elimination.cpp @@ -140,7 +140,7 @@ TEST(nop_elimination, squeeze_reshape_elimination_check_info) { if (node->get_friendly_name() == "reshape") { reshape_is_missing = false; ASSERT_TRUE(std::dynamic_pointer_cast(node)); - auto original_names = getFusedNamesVector(node); + auto original_names = ngraph::getFusedNamesVector(node); sort(original_names.begin(), original_names.end()); ASSERT_EQ(original_names, std::vector({"reshape", "squeeze"})); } diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp index 19b55d80bb0..281013b887b 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/ngraph_test_utils.cpp @@ -786,7 +786,7 @@ FunctionsComparator::Result FunctionsComparator::compare( } void check_rt_info(const std::shared_ptr& f) { - static const std::vector attrs_to_check{"Variant::RuntimeAttribute::FusedNames"}; + static const std::vector attrs_to_check{"fused_names_0"}; std::ostringstream err_log; for (auto& op : f->get_ops()) { diff --git a/inference-engine/tests/unit/inference_engine/ie_compilation_context_test.cpp b/inference-engine/tests/unit/inference_engine/ie_compilation_context_test.cpp index a8313062b5d..0ada2d8c9dd 100644 --- a/inference-engine/tests/unit/inference_engine/ie_compilation_context_test.cpp +++ b/inference-engine/tests/unit/inference_engine/ie_compilation_context_test.cpp @@ -225,24 +225,24 @@ TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriority) { TEST(NetworkContext_CNNNetwork, HashWithFusedNames) { auto setFusedEmpty = [&](Node::RTMap& rtInfo) { - rtInfo[VariantWrapper::get_type_info_static().name] = - std::make_shared>(FusedNames()); + rtInfo[VariantWrapper::get_type_info_static()] = + std::make_shared>(ngraph::FusedNames()); }; auto setFused = [&](Node::RTMap& rtInfo, const std::string& name) { - rtInfo[VariantWrapper::get_type_info_static().name] = - std::make_shared>(FusedNames(name)); + rtInfo[VariantWrapper::get_type_info_static()] = + std::make_shared>(ngraph::FusedNames(name)); }; checkCustomRt(setFusedEmpty, setFused); } TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriorityType) { auto setPrimEmpty = [&](Node::RTMap& rtInfo) { - rtInfo[VariantWrapper::get_type_info_static().name] = - std::make_shared>(PrimitivesPriority()); + rtInfo[VariantWrapper::get_type_info_static()] = + std::make_shared>(ov::PrimitivesPriority()); }; auto setPrim = [&](Node::RTMap& rtInfo, const std::string& name) { - rtInfo[VariantWrapper::get_type_info_static().name] = - std::make_shared>(PrimitivesPriority(name)); + rtInfo[VariantWrapper::get_type_info_static()] = + std::make_shared>(ov::PrimitivesPriority(name)); }; checkCustomRt(setPrimEmpty, setPrim); } diff --git a/model-optimizer/mo/utils/ir_engine/ir_engine.py b/model-optimizer/mo/utils/ir_engine/ir_engine.py index ec3c1cd4015..e1d1bc89707 100644 --- a/model-optimizer/mo/utils/ir_engine/ir_engine.py +++ b/model-optimizer/mo/utils/ir_engine/ir_engine.py @@ -213,7 +213,12 @@ class IREngine(object): if layer.attrib['type'] == 'Const': assert 'offset' in new_attrs and 'size' in new_attrs, \ 'Incorrect attributes for Const layer, {} instead of {}!'.format(new_attrs.keys(), ['offset', 'size']) - new_attrs.update(self.__prepare_bin_attrs(layer, 0, 'custom', new_attrs['offset'], new_attrs['size'], layer[1][0].attrib['precision'])) + precision = "" + for item in layer: + if item.tag == "output": + precision = item[0].attrib["precision"] + break + new_attrs.update(self.__prepare_bin_attrs(layer, 0, 'custom', new_attrs['offset'], new_attrs['size'], precision)) layer_attrs.update(new_attrs) elif attr.tag == 'input': inputs_counter = len(attr) @@ -223,7 +228,8 @@ class IREngine(object): port_id = int(port.attrib['id']) output_shape = [] for dim in port: - output_shape.append(int(dim.text)) + if dim.tag == "dim": + output_shape.append(int(dim.text)) output_shape = shape_array([d if d != -1 else dynamic_dimension_value for d in output_shape]) diff --git a/ngraph/core/include/openvino/core/type.hpp b/ngraph/core/include/openvino/core/type.hpp index 3a742d390ce..63f850e4199 100644 --- a/ngraph/core/include/openvino/core/type.hpp +++ b/ngraph/core/include/openvino/core/type.hpp @@ -54,6 +54,17 @@ struct OPENVINO_API DiscreteTypeInfo { return *this == target_type || (parent && parent->is_castable(target_type)); } + std::string get_version() const { + if (version_id) { + return std::string(version_id); + } + return std::to_string(version); + } + + operator std::string() const { + return std::string(name) + "_" + get_version(); + } + // For use as a key bool operator<(const DiscreteTypeInfo& b) const; bool operator<=(const DiscreteTypeInfo& b) const; diff --git a/ngraph/core/include/openvino/core/variant.hpp b/ngraph/core/include/openvino/core/variant.hpp index ca43c911b4d..450a517d456 100644 --- a/ngraph/core/include/openvino/core/variant.hpp +++ b/ngraph/core/include/openvino/core/variant.hpp @@ -14,6 +14,7 @@ namespace ov { class Node; +class AttributeVisitor; using VariantTypeInfo = DiscreteTypeInfo; class OPENVINO_API Variant { @@ -27,6 +28,11 @@ public: virtual std::string to_string() { return ""; } + virtual bool visit_attributes(AttributeVisitor&) { + return false; + } + + using type_info_t = DiscreteTypeInfo; }; template @@ -34,6 +40,8 @@ class VariantImpl : public Variant { public: using value_type = VT; + VariantImpl() = default; + VariantImpl(const value_type& value) : m_value(value) {} const value_type& get() const { diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index d87c5949b17..3e0dce90980 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -805,7 +805,7 @@ bool ov::Node::evaluate_upper(const HostTensorVector& output_values) const { bool ov::Node::constant_fold(OutputVector& output_values, const OutputVector& input_values) { OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "Node::constant_fold"); - if (m_rt_info.count("DISABLED_CONSTANT_FOLDING")) { + if (m_rt_info.count("disabled_constant_folding_0")) { return false; } diff --git a/ngraph/core/src/op/shape_of.cpp b/ngraph/core/src/op/shape_of.cpp index 25e5189c5b1..8d6c27369a9 100644 --- a/ngraph/core/src/op/shape_of.cpp +++ b/ngraph/core/src/op/shape_of.cpp @@ -174,7 +174,7 @@ bool op::v3::ShapeOf::evaluate_upper(const HostTensorVector& output_values) cons bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values) { OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "op::v3::ShapeOf::constant_fold"); - if (get_rt_info().count("DISABLED_CONSTANT_FOLDING")) + if (get_rt_info().count("disabled_constant_folding_0")) return false; return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0]); } @@ -233,7 +233,7 @@ bool op::v0::ShapeOf::has_evaluate() const { bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values) { OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "op::v0::ShapeOf::constant_fold"); - if (get_rt_info().count("DISABLED_CONSTANT_FOLDING")) + if (get_rt_info().count("disabled_constant_folding_0")) return false; return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0]); } diff --git a/ngraph/core/src/pass/constant_folding.cpp b/ngraph/core/src/pass/constant_folding.cpp index 232619bb5e4..9ef0c28d327 100644 --- a/ngraph/core/src/pass/constant_folding.cpp +++ b/ngraph/core/src/pass/constant_folding.cpp @@ -87,7 +87,7 @@ bool ngraph::pass::ConstantFolding::pre_calculated_values_folding(const std::sha if (status) { for (const auto& node : order) { const auto& rt_info = node->get_rt_info(); - if (rt_info.count("DISABLED_CONSTANT_FOLDING")) { + if (rt_info.count("disabled_constant_folding_0")) { status = false; break; } diff --git a/ngraph/frontend/ir/src/ir_deserializer.cpp b/ngraph/frontend/ir/src/ir_deserializer.cpp new file mode 100644 index 00000000000..6cbc7689602 --- /dev/null +++ b/ngraph/frontend/ir/src/ir_deserializer.cpp @@ -0,0 +1,739 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ir_frontend/model.hpp" + +using namespace ov; + +XmlDeserializer::IoMap XmlDeserializer::updated_io_map(const pugi::xml_node& node) { + auto body_node = node.child("body"); + + if (body_node.empty()) { + IE_THROW() << "Missing body part."; + } + // Fill map: parameter/result id to parameter/result number in Function + + auto extend_io_map = io_map; + + FOREACH_CHILD (layer, body_node.child("layers"), "layer") { + auto type = XMLParseUtils::GetStrAttr(layer, "type"); + + if (type == "Parameter") { + auto id = XMLParseUtils::GetUIntAttr(layer, "id"); + extend_io_map.inputs.insert({id, -1}); // try add as unconnected + } else if (type == "Result") { + auto id = XMLParseUtils::GetUIntAttr(layer, "id"); + extend_io_map.outputs.insert({id, -1}); // try add as unconnected + } + } + return extend_io_map; +} + +std::vector> XmlDeserializer::parseInputDescription( + const pugi::xml_node& node) { + std::vector> inputs; + const auto up_io_map = updated_io_map(node); + + // Parse PortMap: external_port_id for inputs does not always appear in consecutive order + std::map input_map; + FOREACH_CHILD (input, node.child("port_map"), "input") { + int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id"); + input_map.emplace(ext_port_id, input); + } + + for (const auto& input : input_map) { + auto& xml_input = input.second; + auto axis_attr = xml_input.attribute("axis"); + int64_t ti_input_index = XMLParseUtils::GetInt64Attr(xml_input, "external_port_id"); + size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id"); + + // if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput. + if (!axis_attr.empty()) { + size_t axis = XMLParseUtils::GetUIntAttr(xml_input, "axis"); + int64_t start = XMLParseUtils::GetInt64Attr(xml_input, "start", 0); + int64_t stride = XMLParseUtils::GetInt64Attr(xml_input, "stride", 1); + int64_t end = XMLParseUtils::GetInt64Attr(xml_input, "end", -1); + int64_t part_size = XMLParseUtils::GetInt64Attr(xml_input, "part_size", 1); + + const auto input_index = up_io_map.inputs.at(body_parameter_index); + + inputs.push_back(std::make_shared(ti_input_index, + input_index, + start, + stride, + part_size, + end, + axis)); + } else { + // otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput + bool is_back_edge_exist = false; + FOREACH_CHILD (xml_edge, node.child("back_edges"), "edge") { + size_t to_layer = XMLParseUtils::GetUIntAttr(xml_edge, "to-layer"); + + if (to_layer == body_parameter_index) { + size_t from_layer = XMLParseUtils::GetUIntAttr(xml_edge, "from-layer"); + + const auto input_index = up_io_map.inputs.at(body_parameter_index); + const auto output_index = up_io_map.outputs.at(from_layer); + + inputs.push_back( + std::make_shared(ti_input_index, + input_index, + output_index)); + + is_back_edge_exist = true; + break; + } + } + + // ti_input_index = -1 means that Parameter of the body is not connected to inputs of + // TensorIterator and is used only for internal needs. + if (!is_back_edge_exist && ti_input_index >= 0) { + const auto input_index = up_io_map.inputs.at(body_parameter_index); + + inputs.push_back( + std::make_shared(ti_input_index, + input_index)); + } + } + } + return inputs; +} + +std::vector> XmlDeserializer::parseOutputDescription( + const pugi::xml_node& node) { + std::vector> outputs; + const auto up_io_map = updated_io_map(node); + + // Parse PortMap: outputs + std::map output_map; + FOREACH_CHILD (output, node.child("port_map"), "output") { + int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id"); + output_map.emplace(ext_port_id, output); + } + + uint64_t output_number = 0; + for (const auto& output : output_map) { + auto& xml_output = output.second; + auto axis_attr = xml_output.attribute("axis"); + size_t body_result_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id"); + + // if external_port_id < 0 it means that this body result isn't connected to the Loop output + // and is used only for internal needs. For TensorIterator external_port_id is always > 0. + if (XMLParseUtils::GetInt64Attr(xml_output, "external_port_id") >= 0) { + // if axis is set, then concatenation is enabled. Create + // ngraph::TensorIterator::ConcatOutput. + if (!axis_attr.empty()) { + int64_t axis = XMLParseUtils::GetInt64Attr(xml_output, "axis"); + int64_t start = XMLParseUtils::GetInt64Attr(xml_output, "start", 0); + int64_t stride = XMLParseUtils::GetInt64Attr(xml_output, "stride", 1); + int64_t end = XMLParseUtils::GetInt64Attr(xml_output, "end", -1); + int64_t part_size = XMLParseUtils::GetInt64Attr(xml_output, "part_size", 1); + + const auto output_index = up_io_map.outputs.at(body_result_index); + + outputs.push_back(std::make_shared(output_index, + output_number, + start, + stride, + part_size, + end, + axis)); + } else { + // otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration. + const auto output_index = up_io_map.outputs.at(body_result_index); + + outputs.push_back(std::make_shared(output_index, + output_number, + -1)); + } + output_number++; + } + } + return outputs; +} + +ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(const pugi::xml_node& node) { + ngraph::op::v5::Loop::SpecialBodyPorts result = {-1, -1}; + const auto up_io_map = updated_io_map(node); + + NGRAPH_CHECK(!up_io_map.inputs.empty() || !up_io_map.outputs.empty(), + "No parameters or results found in body Function."); + + // Parse PortMap: external_port_id for inputs/outputs does not always appear in consecutive + // order + std::map input_map; + FOREACH_CHILD (input, node.child("port_map"), "input") { + int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id"); + input_map.emplace(ext_port_id, input); + } + std::map output_map; + FOREACH_CHILD (output, node.child("port_map"), "output") { + int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id"); + output_map.emplace(ext_port_id, output); + } + + for (const auto& input : input_map) { + auto& xml_input = input.second; + auto purpose = XMLParseUtils::GetStrAttr(xml_input, "purpose", ""); + size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id"); + if (purpose == "current_iteration") { + result.current_iteration_input_idx = up_io_map.inputs.at(body_parameter_index); + } + } + + for (const auto& output : output_map) { + auto& xml_output = output.second; + auto purpose = XMLParseUtils::GetStrAttr(xml_output, "purpose", ""); + size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id"); + if (purpose == "execution_condition") { + result.body_condition_output_idx = up_io_map.outputs.at(body_parameter_index); + } + } + + return result; +} + +void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { + static const std::unordered_set skip_names = {"input_descriptions", + "output_descriptions", + "special_body_ports"}; + std::string val; + + // for TensorIterator look for 'port_map' as 'data' does not exist + if (m_node.child("port_map")) { + if (auto a = ngraph::as_type< + ngraph::AttributeAdapter>>>( + &adapter)) { + a->set(parseInputDescription(m_node)); + } else if (auto a = ngraph::as_type>>>(&adapter)) { + a->set(parseOutputDescription(m_node)); + } else if (auto a = + ngraph::as_type>(&adapter)) { + a->set(parsePurposeAttribute(m_node)); + } + } + + if (skip_names.count(name) && !getStrAttribute(m_node.child("data"), name, val)) + return; + if (auto a = ngraph::as_type>(&adapter)) { + static_cast(*a) = InferenceEngine::details::convertPrecision(val); + } else if (auto a = ngraph::as_type>(&adapter)) { + std::vector shape; + std::vector dims; + if (!getParameters(m_node.child("data"), name, shape)) + return; + for (const auto& dim : shape) + dims.emplace_back(dim); + static_cast(*a) = ngraph::PartialShape(dims); + } else if (auto a = ngraph::as_type>(&adapter)) { + std::vector shape; + if (!getParameters(m_node.child("data"), name, shape)) + return; + static_cast(*a) = ngraph::Shape(shape); + } else if (auto a = ngraph::as_type>(&adapter)) { + std::vector shape; + if (!getParameters(m_node.child("data"), name, shape)) + return; + static_cast(*a) = ngraph::Strides(shape); +#ifdef __APPLE__ + } else if (auto a = ngraph::as_type>>(&adapter)) { + std::vector result; + if (!getParameters(m_node.child("data"), name, result)) + return; + static_cast&>(*a) = result; +#else + } else if (auto a = ngraph::as_type>>(&adapter)) { + std::vector result; + if (!getParameters(m_node.child("data"), name, result)) + return; + a->set(result); +#endif + } else if (auto a = ngraph::as_type>(&adapter)) { + std::vector axes; + if (!getParameters(m_node.child("data"), name, axes)) + return; + static_cast(*a) = ngraph::AxisSet(axes); + } else if (auto a = ngraph::as_type>(&adapter)) { + if (!getStrAttribute(m_node.child("data"), name, val)) + return; + static_cast(*a) = ngraph::as_enum(val); + } else if (auto a = ngraph::as_type>(&adapter)) { + if (!getStrAttribute(m_node.child("data"), name, val)) + return; + static_cast(*a) = ngraph::as_enum(val); + } else if (auto a = ngraph::as_type>(&adapter)) { + std::vector shape; + if (!getParameters(m_node.child("data"), name, shape)) + return; + std::vector coord_diff(shape.begin(), shape.end()); + static_cast(*a) = ngraph::CoordinateDiff(coord_diff); + } else if (auto a = ngraph::as_type>>(&adapter)) { + std::string variable_id; + if (!getStrAttribute(m_node.child("data"), name, variable_id)) + return; + if (!m_variables.count(variable_id)) { + m_variables[variable_id] = std::make_shared( + ngraph::VariableInfo{ngraph::PartialShape::dynamic(), ngraph::element::dynamic, variable_id}); + } + a->set(m_variables[variable_id]); + } else if (auto a = ngraph::as_type>>( + &adapter)) { + std::string value; + pugi::xml_node dn = m_node.child("data"); + auto type = XMLParseUtils::GetStrAttr(m_node, "type"); + + if (dn.empty()) + IE_THROW() << "No attrtibutes defined for " << type << " op!"; + + if (getStrAttribute(dn, name, value)) { + auto buffer = std::make_shared(value.size()); + auto data = static_cast(buffer->get_ptr()); + value.copy(data, value.size()); + a->set(buffer); + } else if (name == "value" && type == "Const") { + std::vector shape; + std::string el_type_str; + + size_t offset = XMLParseUtils::GetUInt64Attr(dn, "offset"); + size_t size = XMLParseUtils::GetUInt64Attr(dn, "size"); + if (!getStrAttribute(dn, "element_type", el_type_str)) + return; + if (!getParameters(dn, "shape", shape)) + return; + + ngraph::element::Type el_type = InferenceEngine::details::convertPrecision(el_type_str); + + if (!m_weights) + IE_THROW() << "Empty weights data in bin file or bin file cannot be found!"; + if (m_weights->size() < offset + size) + IE_THROW() << "Incorrect weights in bin file!"; + if (size < std::ceil(ngraph::shape_size(shape) * el_type.bitwidth() / 8.f)) + IE_THROW() << "Attribute and shape size are inconsistent for " << type << " op!"; + + char* data = m_weights->get_ptr() + offset; + auto buffer = + std::make_shared>>( + data, + size, + m_weights); + a->set(buffer); + } + } else if (auto a = ngraph::as_type>(&adapter)) { + const auto& type = XMLParseUtils::GetStrAttr(m_node, "type"); + const auto& version = XMLParseUtils::GetStrAttr(m_node, "version"); + + ngraph::op::FrameworkNodeAttrs node_attrs; + node_attrs.set_opset_name(version); + node_attrs.set_type_name(type); + + pugi::xml_node dn = m_node.child("data"); + + if (!dn.empty()) { + for (const auto& data_attr : dn.attributes()) { + node_attrs[data_attr.name()] = data_attr.as_string(); + } + } + + a->set(node_attrs); + } else if (const auto& a = ngraph::as_type>(&adapter)) { + ngraph::element::TypeVector types; + if (!getParameters(m_node.child("data"), name, types)) + return; + a->set(types); + } else { + IE_THROW() << "Error IR reading. Attribute adapter can not be found for " << name << " parameter"; + } +} + +void XmlDeserializer::on_adapter(const std::string& name, + ngraph::ValueAccessor>& adapter) { + std::shared_ptr ngraph_function; + if (!name.compare("body")) { + auto body_node = m_node.child(name.c_str()); + if (body_node.empty()) { + IE_THROW() << "TensorIterator has no body."; + } + ngraph_function = parse_function(m_node.child(name.c_str()), m_weights); + } else if (!name.compare("net")) { + ngraph_function = parse_function(m_node, m_weights); + } else { + IE_THROW() << "Error: not recognized adapter name: " << name << "."; + } + adapter.set(ngraph_function); +} + +std::shared_ptr XmlDeserializer::parse_function(const pugi::xml_node& root, + const ov::Weights& weights) { + // OV_ITT_SCOPE_CHAIN(FIRST_INFERENCE, taskChain, itt::domains::V10Reader_RT, "V10Parser", "Parse"); + + struct FunctionNodes { + ngraph::ParameterVector parameters; + ngraph::ResultVector results; + ngraph::NodeVector all; + ngraph::SinkVector sinks; + }; + + struct edge { + size_t fromLayerId, fromPortId, toPortId; + }; + struct node_params { + pugi::xml_node xml; + GenericLayerParams params; + }; + + std::map params; + + std::vector outputs; + std::unordered_set opName; + + // Read all layers and store their parameters in params map + FOREACH_CHILD (node, root.child("layers"), "layer") { + auto node_param = parseGenericParams(node); + if (opName.find(node_param.name) != opName.end() && node_param.type != "Result") + IE_THROW() << "Invalid IR! " << node_param.name << " name is not unique!"; + opName.insert(node_param.name); + params[node_param.layerId] = {node, node_param}; + if (node_param.type == "Result" || node_param.type == "Assign") { + outputs.push_back(node_param.layerId); + } + } + + std::map> edges; + std::map> id_to_node; + + // Read all edges and store them for further usage + FOREACH_CHILD (_ec, root.child("edges"), "edge") { + size_t fromLayer = XMLParseUtils::GetUIntAttr(_ec, "from-layer"); + size_t fromPort = XMLParseUtils::GetUIntAttr(_ec, "from-port"); + size_t toLayer = XMLParseUtils::GetUIntAttr(_ec, "to-layer"); + size_t toPort = XMLParseUtils::GetUIntAttr(_ec, "to-port"); + edges[toLayer].push_back({fromLayer, fromPort, toPort}); + } + + // Run DFS starting from outputs to get nodes topological order + std::set used; + std::vector order; + std::function dfs = [&edges, &order, &used, &dfs](const size_t id) { + if (used.count(id)) + return; + used.insert(id); + for (auto& edge : edges[id]) { + dfs(edge.fromLayerId); + } + order.push_back(id); + }; + std::for_each(outputs.begin(), outputs.end(), dfs); + + // OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "ConstructNgraphNodes"); + + FunctionNodes func_nodes; + + std::map> variable_id_to_read_value; + + // Following topological order create nGraph operations + for (auto& layer_id : order) { + auto& p = params[layer_id]; + const auto& edgeIt = edges.find(layer_id); + if (edgeIt == edges.end()) + continue; + ngraph::OutputVector inputs(edgeIt->second.size()); + for (auto& e : edgeIt->second) { + auto input_node = id_to_node[e.fromLayerId]; + if (!input_node) { + IE_THROW() << "Attempt to access node " << e.fromLayerId << " that not in graph."; + } + auto& p_output = params[e.fromLayerId].params; + size_t const realInputPortId = p.params.getRealInputPortId(e.toPortId); + if (realInputPortId >= inputs.size()) + IE_THROW() << p.params.type << " layer " << p.params.name << " with id: " << p.params.layerId + << " is inconsistent!"; + inputs[realInputPortId] = input_node->output(p_output.getRealOutputPortId(e.fromPortId)); + } + + auto node = createNode(inputs, p.xml, weights, p.params); + id_to_node[layer_id] = node; + + // Check that output shape after nGraph node validation the same as in IR + // because IR always right! + // Temporary disabled! + // for (size_t i = 0; i < p.params.outputPorts.size(); ++i) { + // if (p.params.outputPorts[i].dims != node->output(i).get_shape()) { + // IE_THROW() << "Shape after nGraph infer " << + // details::dumpVec(node->output(i).get_shape()) + // << " differ from IR shapes: " << + // details::dumpVec(p.params.outputPorts[i].dims); + // } + // } + + if (const auto& parameter_node = std::dynamic_pointer_cast(node)) { + io_map.inputs.insert({layer_id, func_nodes.parameters.size()}); + func_nodes.parameters.emplace_back(parameter_node); + } + + if (const auto& result_node = std::dynamic_pointer_cast(node)) { + io_map.outputs.insert({layer_id, func_nodes.results.size()}); + func_nodes.results.emplace_back(result_node); + } + + if (const auto& sink = std::dynamic_pointer_cast(node)) { + func_nodes.sinks.emplace_back(sink); + } + + if (const auto& read_value = std::dynamic_pointer_cast(node)) { + variable_id_to_read_value[read_value->get_variable_id()] = read_value; + } + + func_nodes.all.emplace_back(node); + } + + // OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "ConstructNgraphFunction"); + + auto function = std::make_shared(func_nodes.results, + func_nodes.sinks, + func_nodes.parameters, + XMLParseUtils::GetStrAttr(root, "name", "")); + for (const auto& sink : func_nodes.sinks) { + if (const auto& assign = std::dynamic_pointer_cast(sink)) { + assign->add_control_dependency(variable_id_to_read_value.at(assign->get_variable_id())); + } + } + + return function; +} + +GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& node) { + const auto parsePort = [this](const pugi::xml_node& parentNode, + const GenericLayerParams& params, + bool input) -> GenericLayerParams::LayerPortData { + GenericLayerParams::LayerPortData port; + + port.portId = XMLParseUtils::GetIntAttr(parentNode, "id"); + + FOREACH_CHILD (node, parentNode, "dim") { + int64_t dim = 0; + const pugi::char_t* dimVal = node.child_value(); + std::stringstream ss(dimVal); + if (!(ss >> dim) || dim < -1) { + IE_THROW() << "dimension (" << dimVal << ") in node " << node.name() + << " must be greater or equal to -1: at offset " << node.offset_debug(); + } + port.dims.push_back(dim); + } + + ngraph::element::Type type(ngraph::element::Type_t::undefined); + // Input port hasn't precision + if (!input) { + const std::string& preStr = XMLParseUtils::GetStrAttr(parentNode, "precision"); + type = InferenceEngine::details::convertPrecision(preStr); + } + port.precision = type; + std::vector names; + if (getParameters(parentNode, "names", names)) { + for (size_t i = 0; i < names.size(); i++) { + std::string name = names[i]; + // Restore original name if it contains delimiter + // getParameters(...) returns the vector of names which were split by delimiter ',' + // but some names can contain ',' as a part of name, in this case we use '\' to + // escape delimiter the cycle below is needed in order to find names which contained + // delimiter and restore the original name + while (i < names.size() && names[i].at(names[i].length() - 1) == '\\') { + name.replace(names[i].length() - 1, 1, ","); + name += names[++i]; + } + port.names.emplace(name); + } + } + return port; + }; + GenericLayerParams params; + + params.layerId = XMLParseUtils::GetIntAttr(node, "id"); + params.version = XMLParseUtils::GetStrAttr(node, "version"); + + params.type = XMLParseUtils::GetStrAttr(node, "type"); + + params.name = XMLParseUtils::GetStrAttr(node, "name"); + + auto outNode = node.child("output"); + if (!outNode.empty()) { + FOREACH_CHILD (_cn, outNode, "port") { params.outputPorts.emplace_back(parsePort(_cn, params, false)); } + } + auto inpNode = node.child("input"); + if (!inpNode.empty()) { + FOREACH_CHILD (_cn, inpNode, "port") { params.inputPorts.emplace_back(parsePort(_cn, params, true)); } + } + return params; +} + +std::shared_ptr XmlDeserializer::createNode(const std::vector>& inputs, + const pugi::xml_node& node, + const ov::Weights& weights, + const GenericLayerParams& params) { + // Check that inputs are correctly defined + for (size_t i = 0; i < inputs.size(); i++) { + if (!inputs[i].get_node()) + IE_THROW() << params.type << " layer " << params.name << " with id: " << params.layerId + << " has incorrect input with index " << i << "!"; + if (ngraph::element::Type_t::undefined == inputs[i].get_element_type()) + IE_THROW() << params.type << " layer " << params.name << " with id: " << params.layerId + << " has undefined element type for input with index " << i << "!"; + } + + std::shared_ptr ngraphNode; + + // Find registered opset + auto opsetIt = m_opsets.find(params.version); + + // Try to create operation from loaded opsets + static const std::unordered_set experimental_ops_added_to_opset = { + "ExperimentalDetectronDetectionOutput", + "ExperimentalDetectronGenerateProposalsSingleImage", + "ExperimentalDetectronPriorGridGenerator", + "ExperimentalDetectronROIFeatureExtractor", + "ExperimentalDetectronTopKROIs", + "GRUCell", + "RNNCell", + "Proposal"}; + + if (experimental_ops_added_to_opset.count(params.type) && + (params.version == "experimental" || params.version == "extension")) { + opsetIt = m_opsets.find("opset6"); + } + + if (!ngraphNode && opsetIt != m_opsets.end()) { + auto const& type = params.type == "Const" ? "Constant" : params.type; + + if (params.version == "opset1") { + // MVN, ROIPooling and ReorgYolo were missing in opset1 + if (type == "MVN" || type == "ROIPooling" || type == "ReorgYolo") { + opsetIt = m_opsets.find("opset2"); + if (opsetIt == m_opsets.end()) { + IE_THROW() << "Cannot create " << params.type << " layer " << params.name + << " id:" << params.layerId << " from unsupported opset: " << params.version; + } + } + } + + auto const& opset = opsetIt->second; + + ngraphNode = std::shared_ptr(opset.create_insensitive(type)); + if (!ngraphNode) { + IE_THROW() << "Opset " << params.version << " doesn't contain the operation with type: " << type; + } + // Share Weights form constant blob + if (auto constant = std::dynamic_pointer_cast(ngraphNode)) { + constant->alloc_buffer_on_visit_attributes(false); + } + ngraphNode->set_arguments(inputs); + XmlDeserializer visitor(node, weights, m_opsets, m_variables); + + if (ngraphNode->visit_attributes(visitor)) { + ngraphNode->constructor_validate_and_infer_types(); + } + + // To be sure that all default values will be initialized: + ngraphNode = ngraphNode->clone_with_new_inputs(ngraphNode->input_values()); + } + + if (!ngraphNode && m_use_framework_node) { + ngraphNode = std::make_shared(inputs); + XmlDeserializer visitor(node, weights, m_opsets, m_variables); + ngraphNode->visit_attributes(visitor); + + size_t index{0}; + for (const auto& output_params : params.outputPorts) { + ngraphNode->set_output_type(index, output_params.precision, ngraph::PartialShape(output_params.dims)); + ++index; + } + } + + if (!ngraphNode) { + IE_THROW() << "Cannot create " << params.type << " layer " << params.name << " id:" << params.layerId + << " from unsupported opset: " << params.version; + } + + // Save run time info + auto& rtInfo = ngraphNode->get_rt_info(); + pugi::xml_node dn = node.child("data"); + if (dn) { + const auto pr_data = dn.attribute("PrimitivesPriority"); + if (pr_data) { + rtInfo["PrimitivesPriority"] = std::make_shared<::ngraph::VariantWrapper>(pr_data.value()); + } + const auto aw_data = dn.attribute("alt_width"); + if (aw_data) { + rtInfo["alt_width"] = std::make_shared<::ngraph::VariantWrapper>(aw_data.value()); + } + } + + ngraphNode->set_friendly_name(params.name); + for (size_t i = 0; i < params.outputPorts.size() && i < ngraphNode->get_output_size(); ++i) { + if (!params.outputPorts[i].names.empty()) + ngraphNode->get_output_tensor(i).set_names(params.outputPorts[i].names); + } + + ov::pass::Attributes attrs_factory; + auto set_runtime_info = [&attrs_factory](RTMap& rt_info, const pugi::xml_node& rt_attrs) { + if (!rt_attrs) + return; + for (const auto& item : rt_attrs) { + std::string attribute_name, attribute_version; + if (!getStrAttribute(item, "name", attribute_name)) { + IE_THROW() << "rt_info attribute has no \"name\" field"; + } + if (!getStrAttribute(item, "version", attribute_version)) { + IE_THROW() << "rt_info attribute: " << attribute_name << " has no \"version\" field"; + } + const auto& type_info = ov::DiscreteTypeInfo(attribute_name.c_str(), 0, attribute_version.c_str()); + if (auto attr = attrs_factory.create_by_type_info(type_info)) { + RTInfoDeserializer attribute_visitor(item); + if (attr->visit_attributes(attribute_visitor)) { + rt_info[type_info] = std::shared_ptr(attr); + } else { + IE_THROW() << "VisitAttributes is not supported for: " << item.name() << " attribute"; + } + } else { + IE_THROW() << "Attribute: " << item.name() << " is not recognized"; + } + } + }; + + // set node runtime info attributes + set_runtime_info(ngraphNode->get_rt_info(), node.child("rt_info")); + + // set output ports runtime info attributes + auto out_node = node.child("output"); + if (!out_node.empty()) { + size_t index{0}; + FOREACH_CHILD (rt_node, out_node, "port") { + set_runtime_info(ngraphNode->output(index).get_rt_info(), rt_node.child("rt_info")); + ++index; + } + } + + // set input ports runtime info attributes + auto in_node = node.child("input"); + if (!in_node.empty()) { + size_t index{0}; + FOREACH_CHILD (rt_node, in_node, "port") { + set_runtime_info(ngraphNode->input(index).get_rt_info(), rt_node.child("rt_info")); + ++index; + } + } + + return ngraphNode; +} diff --git a/ngraph/frontend/ir/src/ir_deserializer.hpp b/ngraph/frontend/ir/src/ir_deserializer.hpp new file mode 100644 index 00000000000..9ebef0b9062 --- /dev/null +++ b/ngraph/frontend/ir/src/ir_deserializer.hpp @@ -0,0 +1,191 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace ov { +struct GenericLayerParams { + struct LayerPortData { + size_t portId; + std::vector dims; + ngraph::element::Type_t precision; + std::unordered_set names; + }; + size_t layerId; + std::string version; + std::string name; + std::string type; + std::vector inputPorts; + std::vector outputPorts; + + size_t getRealInputPortId(size_t id) const { + size_t real_id = 0; + for (auto& it : inputPorts) { + if (it.portId == id) { + return real_id; + } + ++real_id; + } + IE_THROW() << "Can not find input port with id " << id << " in layer " << name; + } + + size_t getRealOutputPortId(size_t id) const { + size_t real_id = 0; + for (auto& it : outputPorts) { + if (it.portId == id) { + return real_id; + } + ++real_id; + } + IE_THROW() << "Can not find output port with id " << id << " in layer " << name; + } +}; + +class XmlDeserializer : public ngraph::AttributeVisitor { +public: + explicit XmlDeserializer(const pugi::xml_node& node, + const ov::Weights& weights, + const std::unordered_map& opsets, + std::unordered_map>& variables) + : m_node(node), + m_weights(weights), + m_opsets(opsets), + m_variables(variables) {} + + void on_adapter(const std::string& name, ngraph::ValueAccessor& value) override { + std::string val; + if (!getStrAttribute(m_node.child("data"), name, val)) + return; + value.set(val); + } + void on_adapter(const std::string& name, ngraph::ValueAccessor& value) override { + std::string val; + if (!getStrAttribute(m_node.child("data"), name, val)) + return; + std::transform(val.begin(), val.end(), val.begin(), [](char ch) { + return std::tolower(static_cast(ch)); + }); + std::set true_names{"true", "1"}; + std::set false_names{"false", "0"}; + + bool is_true = true_names.find(val) != true_names.end(); + bool is_false = false_names.find(val) != false_names.end(); + + if (!is_true && !is_false) + return; + value.set(is_true); + } + void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; + + void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override { + std::string val; + if (!getStrAttribute(m_node.child("data"), name, val)) + return; + adapter.set(stringToType(val)); + } + void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override { + std::string val; + if (!getStrAttribute(m_node.child("data"), name, val)) + return; + adapter.set(stringToType(val)); + } + + void on_adapter(const std::string& name, + ngraph::ValueAccessor>& adapter) override; + + void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { + std::vector value; + if (!getParameters(m_node.child("data"), name, value)) + return; + adapter.set(value); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { + std::vector value; + if (!getParameters(m_node.child("data"), name, value)) + return; + adapter.set(value); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { + std::vector value; + if (!getParameters(m_node.child("data"), name, value)) + return; + adapter.set(value); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { + std::vector value; + if (!getParameters(m_node.child("data"), name, value)) + return; + adapter.set(value); + } + + void use_framework_node(bool flag) { + m_use_framework_node = flag; + } + +private: + struct IoMap { + using NodeIdToIoIndex = std::unordered_map; + NodeIdToIoIndex inputs; + NodeIdToIoIndex outputs; + }; + + /// \brief Traverses port_map in order to create vector of InputDescription shared_ptrs. + /// Shall be used only for ops which have port_map attribute. + /// \param node xml op representation + std::vector> parseInputDescription( + const pugi::xml_node& node); + /// \brief Traverses port_map in order to create vector of OutputDescription shared_ptrs. + /// Shall be used only for ops which have port_map attribute. + /// \param node xml op representation + std::vector> parseOutputDescription( + const pugi::xml_node& node); + + // TODO consider to call only once per layer/TI-Loop node + IoMap updated_io_map(const pugi::xml_node& node); + + /// \brief Traverses xml node representation in order to create nGraph function for it. + /// \param node xml node representation + /// \param weights weights attached to current node + /// \return shared pointer to function representing input node + std::shared_ptr parse_function(const pugi::xml_node& root, const ov::Weights& weights); + /// \brief Traverses xml node representation in order to get the purpose attribute of + /// inputs/outputs in the body of Loop op. \param node xml node representation \return struct + /// with value of purpuse attribute + ngraph::op::v5::Loop::SpecialBodyPorts parsePurposeAttribute(const pugi::xml_node& node); + + GenericLayerParams parseGenericParams(const pugi::xml_node& node); + + std::shared_ptr createNode(const ngraph::OutputVector& inputs, + const pugi::xml_node& node, + const ov::Weights& weights, + const GenericLayerParams& params); + + // -- DATA -- + const pugi::xml_node m_node; + const ov::Weights& m_weights; + const std::unordered_map& m_opsets; + std::unordered_map>& m_variables; + + /// + /// store information about parameters/results order during function creation + /// it will be used during Inputs/Outputs Description creation in SubGraph processing + /// + IoMap io_map; + + bool m_use_framework_node{false}; +}; +} // namespace ov \ No newline at end of file diff --git a/ngraph/frontend/ir/src/model.cpp b/ngraph/frontend/ir/src/model.cpp index 940d695fcf1..a3106a97667 100644 --- a/ngraph/frontend/ir/src/model.cpp +++ b/ngraph/frontend/ir/src/model.cpp @@ -6,7 +6,7 @@ #include -#include +#include #include #include #include @@ -15,893 +15,6 @@ using namespace ngraph; using namespace InferenceEngine; namespace { - -struct GenericLayerParams { - struct LayerPortData { - size_t portId; - std::vector dims; - ngraph::element::Type_t precision; - std::unordered_set names; - }; - size_t layerId; - std::string version; - std::string name; - std::string type; - std::vector inputPorts; - std::vector outputPorts; - - size_t getRealInputPortId(size_t id) const { - size_t real_id = 0; - for (auto& it : inputPorts) { - if (it.portId == id) { - return real_id; - } - ++real_id; - } - IE_THROW() << "Can not find input port with id " << id << " in layer " << name; - } - - size_t getRealOutputPortId(size_t id) const { - size_t real_id = 0; - for (auto& it : outputPorts) { - if (it.portId == id) { - return real_id; - } - ++real_id; - } - IE_THROW() << "Can not find output port with id " << id << " in layer " << name; - } -}; - -void operator>>(const std::stringstream& in, ngraph::element::Type& type) { - type = details::convertPrecision(ngraph::trim(in.str())); -} - -bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::string& value) { - if (!node) - return false; - - auto attr = node.attribute(name.c_str()); - if (attr.empty()) - return false; - value = std::string(attr.value()); - return true; -} - -template -bool getParameters(const pugi::xml_node& node, const std::string& name, std::vector& value) { - std::string param; - if (!getStrAttribute(node, name, param)) - return false; - std::stringstream ss(param); - std::string field; - while (getline(ss, field, ',')) { - if (field.empty()) - IE_THROW() << "Cannot get vector of parameters! \"" << param << "\" is incorrect"; - std::stringstream fs(field); - T val; - fs >> val; - value.emplace_back(val); - } - return true; -} - -template -T stringToType(const std::string& valStr) { - T ret{0}; - std::istringstream ss(valStr); - if (!ss.eof()) { - ss >> ret; - } - return ret; -} - -class XmlDeserializer : public ngraph::AttributeVisitor { -public: - explicit XmlDeserializer(const pugi::xml_node& node, - const ov::Weights& weights, - const std::unordered_map& opsets, - std::unordered_map>& variables) - : m_node(node), - m_weights(weights), - m_opsets(opsets), - m_variables(variables) {} - - void on_adapter(const std::string& name, ngraph::ValueAccessor& value) override { - std::string val; - if (!getStrAttribute(m_node.child("data"), name, val)) - return; - value.set(val); - } - void on_adapter(const std::string& name, ngraph::ValueAccessor& value) override { - std::string val; - if (!getStrAttribute(m_node.child("data"), name, val)) - return; - std::transform(val.begin(), val.end(), val.begin(), [](char ch) { - return std::tolower(static_cast(ch)); - }); - std::set true_names{"true", "1"}; - std::set false_names{"false", "0"}; - - bool is_true = true_names.find(val) != true_names.end(); - bool is_false = false_names.find(val) != false_names.end(); - - if (!is_true && !is_false) - return; - value.set(is_true); - } - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; - - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override { - std::string val; - if (!getStrAttribute(m_node.child("data"), name, val)) - return; - adapter.set(stringToType(val)); - } - void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override { - std::string val; - if (!getStrAttribute(m_node.child("data"), name, val)) - return; - adapter.set(stringToType(val)); - } - - void on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) override; - - void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { - std::vector value; - if (!getParameters(m_node.child("data"), name, value)) - return; - adapter.set(value); - } - - void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { - std::vector value; - if (!getParameters(m_node.child("data"), name, value)) - return; - adapter.set(value); - } - - void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { - std::vector value; - if (!getParameters(m_node.child("data"), name, value)) - return; - adapter.set(value); - } - - void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { - std::vector value; - if (!getParameters(m_node.child("data"), name, value)) - return; - adapter.set(value); - } - - void use_framework_node(bool flag) { - m_use_framework_node = flag; - } - -private: - struct IoMap { - using NodeIdToIoIndex = std::unordered_map; - NodeIdToIoIndex inputs; - NodeIdToIoIndex outputs; - }; - - /// \brief Traverses port_map in order to create vector of InputDescription shared_ptrs. - /// Shall be used only for ops which have port_map attribute. - /// \param node xml op representation - std::vector> parseInputDescription( - const pugi::xml_node& node); - /// \brief Traverses port_map in order to create vector of OutputDescription shared_ptrs. - /// Shall be used only for ops which have port_map attribute. - /// \param node xml op representation - std::vector> parseOutputDescription( - const pugi::xml_node& node); - - // TODO consider to call only once per layer/TI-Loop node - IoMap updated_io_map(const pugi::xml_node& node); - - /// \brief Traverses xml node representation in order to create nGraph function for it. - /// \param node xml node representation - /// \param weights weights attached to current node - /// \return shared pointer to function representing input node - std::shared_ptr parse_function(const pugi::xml_node& root, const ov::Weights& weights); - /// \brief Traverses xml node representation in order to get the purpose attribute of - /// inputs/outputs in the body of Loop op. \param node xml node representation \return struct - /// with value of purpuse attribute - ngraph::op::v5::Loop::SpecialBodyPorts parsePurposeAttribute(const pugi::xml_node& node); - - GenericLayerParams parseGenericParams(const pugi::xml_node& node); - - std::shared_ptr createNode(const ngraph::OutputVector& inputs, - const pugi::xml_node& node, - const ov::Weights& weights, - const GenericLayerParams& params); - - // -- DATA -- - const pugi::xml_node m_node; - const ov::Weights& m_weights; - const std::unordered_map& m_opsets; - std::unordered_map>& m_variables; - - /// - /// store information about parameters/results order during function creation - /// it will be used during Inputs/Outputs Description creation in SubGraph processing - /// - IoMap io_map; - - bool m_use_framework_node{false}; -}; - -XmlDeserializer::IoMap XmlDeserializer::updated_io_map(const pugi::xml_node& node) { - auto body_node = node.child("body"); - - if (body_node.empty()) { - IE_THROW() << "Missing body part."; - } - // Fill map: parameter/result id to parameter/result number in Function - - auto extend_io_map = io_map; - - FOREACH_CHILD (layer, body_node.child("layers"), "layer") { - auto type = XMLParseUtils::GetStrAttr(layer, "type"); - - if (type == "Parameter") { - auto id = XMLParseUtils::GetUIntAttr(layer, "id"); - extend_io_map.inputs.insert({id, -1}); // try add as unconnected - } else if (type == "Result") { - auto id = XMLParseUtils::GetUIntAttr(layer, "id"); - extend_io_map.outputs.insert({id, -1}); // try add as unconnected - } - } - return extend_io_map; -} - -std::vector> XmlDeserializer::parseInputDescription( - const pugi::xml_node& node) { - std::vector> inputs; - const auto up_io_map = updated_io_map(node); - - // Parse PortMap: external_port_id for inputs does not always appear in consecutive order - std::map input_map; - FOREACH_CHILD (input, node.child("port_map"), "input") { - int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id"); - input_map.emplace(ext_port_id, input); - } - - for (const auto& input : input_map) { - auto& xml_input = input.second; - auto axis_attr = xml_input.attribute("axis"); - int64_t ti_input_index = XMLParseUtils::GetInt64Attr(xml_input, "external_port_id"); - size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id"); - - // if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput. - if (!axis_attr.empty()) { - size_t axis = XMLParseUtils::GetUIntAttr(xml_input, "axis"); - int64_t start = XMLParseUtils::GetInt64Attr(xml_input, "start", 0); - int64_t stride = XMLParseUtils::GetInt64Attr(xml_input, "stride", 1); - int64_t end = XMLParseUtils::GetInt64Attr(xml_input, "end", -1); - int64_t part_size = XMLParseUtils::GetInt64Attr(xml_input, "part_size", 1); - - const auto input_index = up_io_map.inputs.at(body_parameter_index); - - inputs.push_back(std::make_shared(ti_input_index, - input_index, - start, - stride, - part_size, - end, - axis)); - } else { - // otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput - bool is_back_edge_exist = false; - FOREACH_CHILD (xml_edge, node.child("back_edges"), "edge") { - size_t to_layer = XMLParseUtils::GetUIntAttr(xml_edge, "to-layer"); - - if (to_layer == body_parameter_index) { - size_t from_layer = XMLParseUtils::GetUIntAttr(xml_edge, "from-layer"); - - const auto input_index = up_io_map.inputs.at(body_parameter_index); - const auto output_index = up_io_map.outputs.at(from_layer); - - inputs.push_back( - std::make_shared(ti_input_index, - input_index, - output_index)); - - is_back_edge_exist = true; - break; - } - } - - // ti_input_index = -1 means that Parameter of the body is not connected to inputs of - // TensorIterator and is used only for internal needs. - if (!is_back_edge_exist && ti_input_index >= 0) { - const auto input_index = up_io_map.inputs.at(body_parameter_index); - - inputs.push_back( - std::make_shared(ti_input_index, - input_index)); - } - } - } - return inputs; -} - -std::vector> XmlDeserializer::parseOutputDescription( - const pugi::xml_node& node) { - std::vector> outputs; - const auto up_io_map = updated_io_map(node); - - // Parse PortMap: outputs - std::map output_map; - FOREACH_CHILD (output, node.child("port_map"), "output") { - int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id"); - output_map.emplace(ext_port_id, output); - } - - uint64_t output_number = 0; - for (const auto& output : output_map) { - auto& xml_output = output.second; - auto axis_attr = xml_output.attribute("axis"); - size_t body_result_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id"); - - // if external_port_id < 0 it means that this body result isn't connected to the Loop output - // and is used only for internal needs. For TensorIterator external_port_id is always > 0. - if (XMLParseUtils::GetInt64Attr(xml_output, "external_port_id") >= 0) { - // if axis is set, then concatenation is enabled. Create - // ngraph::TensorIterator::ConcatOutput. - if (!axis_attr.empty()) { - int64_t axis = XMLParseUtils::GetInt64Attr(xml_output, "axis"); - int64_t start = XMLParseUtils::GetInt64Attr(xml_output, "start", 0); - int64_t stride = XMLParseUtils::GetInt64Attr(xml_output, "stride", 1); - int64_t end = XMLParseUtils::GetInt64Attr(xml_output, "end", -1); - int64_t part_size = XMLParseUtils::GetInt64Attr(xml_output, "part_size", 1); - - const auto output_index = up_io_map.outputs.at(body_result_index); - - outputs.push_back(std::make_shared(output_index, - output_number, - start, - stride, - part_size, - end, - axis)); - } else { - // otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration. - const auto output_index = up_io_map.outputs.at(body_result_index); - - outputs.push_back(std::make_shared(output_index, - output_number, - -1)); - } - output_number++; - } - } - return outputs; -} - -ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(const pugi::xml_node& node) { - ngraph::op::v5::Loop::SpecialBodyPorts result = {-1, -1}; - const auto up_io_map = updated_io_map(node); - - NGRAPH_CHECK(!up_io_map.inputs.empty() || !up_io_map.outputs.empty(), - "No parameters or results found in body Function."); - - // Parse PortMap: external_port_id for inputs/outputs does not always appear in consecutive - // order - std::map input_map; - FOREACH_CHILD (input, node.child("port_map"), "input") { - int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id"); - input_map.emplace(ext_port_id, input); - } - std::map output_map; - FOREACH_CHILD (output, node.child("port_map"), "output") { - int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id"); - output_map.emplace(ext_port_id, output); - } - - for (const auto& input : input_map) { - auto& xml_input = input.second; - auto purpose = XMLParseUtils::GetStrAttr(xml_input, "purpose", ""); - size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id"); - if (purpose == "current_iteration") { - result.current_iteration_input_idx = up_io_map.inputs.at(body_parameter_index); - } - } - - for (const auto& output : output_map) { - auto& xml_output = output.second; - auto purpose = XMLParseUtils::GetStrAttr(xml_output, "purpose", ""); - size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id"); - if (purpose == "execution_condition") { - result.body_condition_output_idx = up_io_map.outputs.at(body_parameter_index); - } - } - - return result; -} - -void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { - static const std::unordered_set skip_names = {"input_descriptions", - "output_descriptions", - "special_body_ports"}; - std::string val; - - // for TensorIterator look for 'port_map' as 'data' does not exist - if (m_node.child("port_map")) { - if (auto a = ngraph::as_type< - ngraph::AttributeAdapter>>>( - &adapter)) { - a->set(parseInputDescription(m_node)); - } else if (auto a = ngraph::as_type>>>(&adapter)) { - a->set(parseOutputDescription(m_node)); - } else if (auto a = - ngraph::as_type>(&adapter)) { - a->set(parsePurposeAttribute(m_node)); - } - } - - if (skip_names.count(name) && !getStrAttribute(m_node.child("data"), name, val)) - return; - if (auto a = ngraph::as_type>(&adapter)) { - static_cast(*a) = details::convertPrecision(val); - } else if (auto a = ngraph::as_type>(&adapter)) { - std::vector shape; - std::vector dims; - if (!getParameters(m_node.child("data"), name, shape)) - return; - for (const auto& dim : shape) - dims.emplace_back(dim); - static_cast(*a) = ngraph::PartialShape(dims); - } else if (auto a = ngraph::as_type>(&adapter)) { - std::vector shape; - if (!getParameters(m_node.child("data"), name, shape)) - return; - static_cast(*a) = ngraph::Shape(shape); - } else if (auto a = ngraph::as_type>(&adapter)) { - std::vector shape; - if (!getParameters(m_node.child("data"), name, shape)) - return; - static_cast(*a) = ngraph::Strides(shape); -#ifdef __APPLE__ - } else if (auto a = ngraph::as_type>>(&adapter)) { - std::vector result; - if (!getParameters(m_node.child("data"), name, result)) - return; - static_cast&>(*a) = result; -#else - } else if (auto a = ngraph::as_type>>(&adapter)) { - std::vector result; - if (!getParameters(m_node.child("data"), name, result)) - return; - a->set(result); -#endif - } else if (auto a = ngraph::as_type>(&adapter)) { - std::vector axes; - if (!getParameters(m_node.child("data"), name, axes)) - return; - static_cast(*a) = ngraph::AxisSet(axes); - } else if (auto a = ngraph::as_type>(&adapter)) { - if (!getStrAttribute(m_node.child("data"), name, val)) - return; - static_cast(*a) = ngraph::as_enum(val); - } else if (auto a = ngraph::as_type>(&adapter)) { - if (!getStrAttribute(m_node.child("data"), name, val)) - return; - static_cast(*a) = ngraph::as_enum(val); - } else if (auto a = ngraph::as_type>(&adapter)) { - std::vector shape; - if (!getParameters(m_node.child("data"), name, shape)) - return; - std::vector coord_diff(shape.begin(), shape.end()); - static_cast(*a) = ngraph::CoordinateDiff(coord_diff); - } else if (auto a = ngraph::as_type>>(&adapter)) { - std::string variable_id; - if (!getStrAttribute(m_node.child("data"), name, variable_id)) - return; - if (!m_variables.count(variable_id)) { - m_variables[variable_id] = std::make_shared( - ngraph::VariableInfo{ngraph::PartialShape::dynamic(), ngraph::element::dynamic, variable_id}); - } - a->set(m_variables[variable_id]); - } else if (auto a = ngraph::as_type>>( - &adapter)) { - std::string value; - pugi::xml_node dn = m_node.child("data"); - auto type = XMLParseUtils::GetStrAttr(m_node, "type"); - - if (dn.empty()) - IE_THROW() << "No attrtibutes defined for " << type << " op!"; - - if (getStrAttribute(dn, name, value)) { - auto buffer = std::make_shared(value.size()); - auto data = static_cast(buffer->get_ptr()); - value.copy(data, value.size()); - a->set(buffer); - } else if (name == "value" && type == "Const") { - std::vector shape; - std::string el_type_str; - - size_t offset = XMLParseUtils::GetUInt64Attr(dn, "offset"); - size_t size = XMLParseUtils::GetUInt64Attr(dn, "size"); - if (!getStrAttribute(dn, "element_type", el_type_str)) - return; - if (!getParameters(dn, "shape", shape)) - return; - - ngraph::element::Type el_type = details::convertPrecision(el_type_str); - - if (!m_weights) - IE_THROW() << "Empty weights data in bin file or bin file cannot be found!"; - if (m_weights->size() < offset + size) - IE_THROW() << "Incorrect weights in bin file!"; - if (size < std::ceil(ngraph::shape_size(shape) * el_type.bitwidth() / 8.f)) - IE_THROW() << "Attribute and shape size are inconsistent for " << type << " op!"; - - char* data = m_weights->get_ptr() + offset; - auto buffer = - std::make_shared>>(data, size, m_weights); - a->set(buffer); - } - } else if (auto a = ngraph::as_type>(&adapter)) { - const auto& type = XMLParseUtils::GetStrAttr(m_node, "type"); - const auto& version = XMLParseUtils::GetStrAttr(m_node, "version"); - - ngraph::op::FrameworkNodeAttrs node_attrs; - node_attrs.set_opset_name(version); - node_attrs.set_type_name(type); - - pugi::xml_node dn = m_node.child("data"); - - if (!dn.empty()) { - for (const auto& data_attr : dn.attributes()) { - node_attrs[data_attr.name()] = data_attr.as_string(); - } - } - - a->set(node_attrs); - } else if (const auto& a = ngraph::as_type>(&adapter)) { - ngraph::element::TypeVector types; - if (!getParameters(m_node.child("data"), name, types)) - return; - a->set(types); - } else { - IE_THROW() << "Error IR reading. Attribute adapter can not be found for " << name << " parameter"; - } -} - -void XmlDeserializer::on_adapter(const std::string& name, - ngraph::ValueAccessor>& adapter) { - std::shared_ptr ngraph_function; - if (!name.compare("body")) { - auto body_node = m_node.child(name.c_str()); - if (body_node.empty()) { - IE_THROW() << "TensorIterator has no body."; - } - ngraph_function = parse_function(m_node.child(name.c_str()), m_weights); - } else if (!name.compare("net")) { - ngraph_function = parse_function(m_node, m_weights); - } else { - IE_THROW() << "Error: not recognized adapter name: " << name << "."; - } - adapter.set(ngraph_function); -} - -std::shared_ptr XmlDeserializer::parse_function(const pugi::xml_node& root, - const ov::Weights& weights) { - // OV_ITT_SCOPE_CHAIN(FIRST_INFERENCE, taskChain, itt::domains::V10Reader_RT, "V10Parser", "Parse"); - - struct FunctionNodes { - ngraph::ParameterVector parameters; - ngraph::ResultVector results; - ngraph::NodeVector all; - ngraph::SinkVector sinks; - }; - - struct edge { - size_t fromLayerId, fromPortId, toPortId; - }; - struct node_params { - pugi::xml_node xml; - GenericLayerParams params; - }; - - std::map params; - - std::vector outputs; - std::unordered_set opName; - - // Read all layers and store their parameters in params map - FOREACH_CHILD (node, root.child("layers"), "layer") { - auto node_param = parseGenericParams(node); - if (opName.find(node_param.name) != opName.end() && node_param.type != "Result") - IE_THROW() << "Invalid IR! " << node_param.name << " name is not unique!"; - opName.insert(node_param.name); - params[node_param.layerId] = {node, node_param}; - if (node_param.type == "Result" || node_param.type == "Assign") { - outputs.push_back(node_param.layerId); - } - } - - std::map> edges; - std::map> id_to_node; - - // Read all edges and store them for further usage - FOREACH_CHILD (_ec, root.child("edges"), "edge") { - size_t fromLayer = XMLParseUtils::GetUIntAttr(_ec, "from-layer"); - size_t fromPort = XMLParseUtils::GetUIntAttr(_ec, "from-port"); - size_t toLayer = XMLParseUtils::GetUIntAttr(_ec, "to-layer"); - size_t toPort = XMLParseUtils::GetUIntAttr(_ec, "to-port"); - edges[toLayer].push_back({fromLayer, fromPort, toPort}); - } - - // Run DFS starting from outputs to get nodes topological order - std::set used; - std::vector order; - std::function dfs = [&edges, &order, &used, &dfs](const size_t id) { - if (used.count(id)) - return; - used.insert(id); - for (auto& edge : edges[id]) { - dfs(edge.fromLayerId); - } - order.push_back(id); - }; - std::for_each(outputs.begin(), outputs.end(), dfs); - - // OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "ConstructNgraphNodes"); - - FunctionNodes func_nodes; - - std::map> variable_id_to_read_value; - - // Following topological order create nGraph operations - for (auto& layer_id : order) { - auto& p = params[layer_id]; - const auto& edgeIt = edges.find(layer_id); - if (edgeIt == edges.end()) - continue; - ngraph::OutputVector inputs(edgeIt->second.size()); - for (auto& e : edgeIt->second) { - auto input_node = id_to_node[e.fromLayerId]; - if (!input_node) { - IE_THROW() << "Attempt to access node " << e.fromLayerId << " that not in graph."; - } - auto& p_output = params[e.fromLayerId].params; - size_t const realInputPortId = p.params.getRealInputPortId(e.toPortId); - if (realInputPortId >= inputs.size()) - IE_THROW() << p.params.type << " layer " << p.params.name << " with id: " << p.params.layerId - << " is inconsistent!"; - inputs[realInputPortId] = input_node->output(p_output.getRealOutputPortId(e.fromPortId)); - } - - auto node = createNode(inputs, p.xml, weights, p.params); - id_to_node[layer_id] = node; - - // Check that output shape after nGraph node validation the same as in IR - // because IR always right! - // Temporary disabled! - // for (size_t i = 0; i < p.params.outputPorts.size(); ++i) { - // if (p.params.outputPorts[i].dims != node->output(i).get_shape()) { - // IE_THROW() << "Shape after nGraph infer " << - // details::dumpVec(node->output(i).get_shape()) - // << " differ from IR shapes: " << - // details::dumpVec(p.params.outputPorts[i].dims); - // } - // } - - if (const auto& parameter_node = std::dynamic_pointer_cast(node)) { - io_map.inputs.insert({layer_id, func_nodes.parameters.size()}); - func_nodes.parameters.emplace_back(parameter_node); - } - - if (const auto& result_node = std::dynamic_pointer_cast(node)) { - io_map.outputs.insert({layer_id, func_nodes.results.size()}); - func_nodes.results.emplace_back(result_node); - } - - if (const auto& sink = std::dynamic_pointer_cast(node)) { - func_nodes.sinks.emplace_back(sink); - } - - if (const auto& read_value = std::dynamic_pointer_cast(node)) { - variable_id_to_read_value[read_value->get_variable_id()] = read_value; - } - - func_nodes.all.emplace_back(node); - } - - // OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "ConstructNgraphFunction"); - - auto function = std::make_shared(func_nodes.results, - func_nodes.sinks, - func_nodes.parameters, - XMLParseUtils::GetStrAttr(root, "name", "")); - for (const auto& sink : func_nodes.sinks) { - if (const auto& assign = std::dynamic_pointer_cast(sink)) { - assign->add_control_dependency(variable_id_to_read_value.at(assign->get_variable_id())); - } - } - - return function; -} - -GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& node) { - const auto parsePort = [this](const pugi::xml_node& parentNode, - const GenericLayerParams& params, - bool input) -> GenericLayerParams::LayerPortData { - GenericLayerParams::LayerPortData port; - - port.portId = XMLParseUtils::GetIntAttr(parentNode, "id"); - - FOREACH_CHILD (node, parentNode, "dim") { - int64_t dim = 0; - const pugi::char_t* dimVal = node.child_value(); - std::stringstream ss(dimVal); - if (!(ss >> dim) || dim < -1) { - IE_THROW() << "dimension (" << dimVal << ") in node " << node.name() - << " must be greater or equal to -1: at offset " << node.offset_debug(); - } - port.dims.push_back(dim); - } - - ngraph::element::Type type(ngraph::element::Type_t::undefined); - // Input port hasn't precision - if (!input) { - const std::string& preStr = XMLParseUtils::GetStrAttr(parentNode, "precision"); - type = InferenceEngine::details::convertPrecision(preStr); - } - port.precision = type; - std::vector names; - if (getParameters(parentNode, "names", names)) { - for (size_t i = 0; i < names.size(); i++) { - std::string name = names[i]; - // Restore original name if it contains delimiter - // getParameters(...) returns the vector of names which were split by delimiter ',' - // but some names can contain ',' as a part of name, in this case we use '\' to - // escape delimiter the cycle below is needed in order to find names which contained - // delimiter and restore the original name - while (i < names.size() && names[i].at(names[i].length() - 1) == '\\') { - name.replace(names[i].length() - 1, 1, ","); - name += names[++i]; - } - port.names.emplace(name); - } - } - return port; - }; - GenericLayerParams params; - - params.layerId = XMLParseUtils::GetIntAttr(node, "id"); - params.version = XMLParseUtils::GetStrAttr(node, "version"); - - params.type = XMLParseUtils::GetStrAttr(node, "type"); - - params.name = XMLParseUtils::GetStrAttr(node, "name"); - - auto outNode = node.child("output"); - if (!outNode.empty()) { - FOREACH_CHILD (_cn, outNode, "port") { params.outputPorts.emplace_back(parsePort(_cn, params, false)); } - } - auto inpNode = node.child("input"); - if (!inpNode.empty()) { - FOREACH_CHILD (_cn, inpNode, "port") { params.inputPorts.emplace_back(parsePort(_cn, params, true)); } - } - return params; -} - -std::shared_ptr XmlDeserializer::createNode(const std::vector>& inputs, - const pugi::xml_node& node, - const ov::Weights& weights, - const GenericLayerParams& params) { - // Check that inputs are correctly defined - for (size_t i = 0; i < inputs.size(); i++) { - if (!inputs[i].get_node()) - IE_THROW() << params.type << " layer " << params.name << " with id: " << params.layerId - << " has incorrect input with index " << i << "!"; - if (ngraph::element::Type_t::undefined == inputs[i].get_element_type()) - IE_THROW() << params.type << " layer " << params.name << " with id: " << params.layerId - << " has undefined element type for input with index " << i << "!"; - } - - std::shared_ptr ngraphNode; - - // Find registered opset - auto opsetIt = m_opsets.find(params.version); - - // Try to create operation from loaded opsets - static const std::unordered_set experimental_ops_added_to_opset = { - "ExperimentalDetectronDetectionOutput", - "ExperimentalDetectronGenerateProposalsSingleImage", - "ExperimentalDetectronPriorGridGenerator", - "ExperimentalDetectronROIFeatureExtractor", - "ExperimentalDetectronTopKROIs", - "GRUCell", - "RNNCell", - "Proposal"}; - - if (experimental_ops_added_to_opset.count(params.type) && - (params.version == "experimental" || params.version == "extension")) { - opsetIt = m_opsets.find("opset6"); - } - - if (!ngraphNode && opsetIt != m_opsets.end()) { - auto const& type = params.type == "Const" ? "Constant" : params.type; - - if (params.version == "opset1") { - // MVN, ROIPooling and ReorgYolo were missing in opset1 - if (type == "MVN" || type == "ROIPooling" || type == "ReorgYolo") { - opsetIt = m_opsets.find("opset2"); - if (opsetIt == m_opsets.end()) { - IE_THROW() << "Cannot create " << params.type << " layer " << params.name - << " id:" << params.layerId << " from unsupported opset: " << params.version; - } - } - } - - auto const& opset = opsetIt->second; - - ngraphNode = std::shared_ptr(opset.create_insensitive(type)); - if (!ngraphNode) { - IE_THROW() << "Opset " << params.version << " doesn't contain the operation with type: " << type; - } - // Share Weights form constant blob - if (auto constant = std::dynamic_pointer_cast(ngraphNode)) { - constant->alloc_buffer_on_visit_attributes(false); - } - ngraphNode->set_arguments(inputs); - XmlDeserializer visitor(node, weights, m_opsets, m_variables); - - if (ngraphNode->visit_attributes(visitor)) { - ngraphNode->constructor_validate_and_infer_types(); - } - - // To be sure that all default values will be initialized: - ngraphNode = ngraphNode->clone_with_new_inputs(ngraphNode->input_values()); - } - - if (!ngraphNode && m_use_framework_node) { - ngraphNode = std::make_shared(inputs); - XmlDeserializer visitor(node, weights, m_opsets, m_variables); - ngraphNode->visit_attributes(visitor); - - size_t index{0}; - for (const auto& output_params : params.outputPorts) { - ngraphNode->set_output_type(index, output_params.precision, ngraph::PartialShape(output_params.dims)); - ++index; - } - } - - if (!ngraphNode) { - IE_THROW() << "Cannot create " << params.type << " layer " << params.name << " id:" << params.layerId - << " from unsupported opset: " << params.version; - } - - // Save run time info - auto& rtInfo = ngraphNode->get_rt_info(); - pugi::xml_node dn = node.child("data"); - if (dn) { - const auto pr_data = dn.attribute("PrimitivesPriority"); - if (pr_data) { - rtInfo["PrimitivesPriority"] = std::make_shared<::ngraph::VariantWrapper>(pr_data.value()); - } - const auto aw_data = dn.attribute("alt_width"); - if (aw_data) { - rtInfo["alt_width"] = std::make_shared<::ngraph::VariantWrapper>(aw_data.value()); - } - } - - ngraphNode->set_friendly_name(params.name); - for (size_t i = 0; i < params.outputPorts.size() && i < ngraphNode->get_output_size(); ++i) { - if (!params.outputPorts[i].names.empty()) - ngraphNode->get_output_tensor(i).set_names(params.outputPorts[i].names); - } - - return ngraphNode; -} - void ParsePreProcess(pugi::xml_node& root, ov::Weights weights, std::shared_ptr f) { /* Preprocessing block can have two preprocessing types: * @@ -1065,22 +178,10 @@ void ParsePreProcess(pugi::xml_node& root, ov::Weights weights, std::shared_ptr< } } } - } // namespace namespace ngraph { namespace frontend { - -namespace { -void loadXml(pugi::xml_document& xmlDoc, std::istream& model) { - // OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::V10Reader_RT, "loadXml"); - pugi::xml_parse_result res = xmlDoc.load(model); - if (res.status != pugi::status_ok) { - IE_THROW() << res.description() << "at offset " << res.offset; - } -} -} // namespace - class InputModelIR::InputModelIRImpl { ov::Weights m_weights; ov::Extensions m_extensions; @@ -1091,7 +192,10 @@ public: InputModelIRImpl(std::istream& stream, const ov::Weights& weights, const ov::Extensions& extensions) : m_weights(weights), m_extensions(extensions) { - loadXml(m_xml_doc, stream); + pugi::xml_parse_result res = m_xml_doc.load(stream); + if (res.status != pugi::status_ok) { + IE_THROW() << res.description() << "at offset " << res.offset; + } m_root = m_xml_doc.document_element(); } @@ -1127,7 +231,7 @@ std::shared_ptr InputModelIR::InputModelIRImpl::convert() { opsets[it.first] = it.second; } - XmlDeserializer visitor(m_root, m_weights, opsets, variables); + ov::XmlDeserializer visitor(m_root, m_weights, opsets, variables); visitor.use_framework_node(opsets.count("framework_node_ext")); std::shared_ptr function; visitor.on_attribute("net", function); diff --git a/ngraph/frontend/ir/src/rt_info_deserializer.cpp b/ngraph/frontend/ir/src/rt_info_deserializer.cpp new file mode 100644 index 00000000000..739432e594f --- /dev/null +++ b/ngraph/frontend/ir/src/rt_info_deserializer.cpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include +#include + +using namespace ov; + +void RTInfoDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + if (auto a = ngraph::as_type>>(&adapter)) { + std::set ss; + str_to_container(val, ss); + a->set(ss); + } else { + IR_THROW("Not implemented"); + } +} \ No newline at end of file diff --git a/ngraph/frontend/ir/src/rt_info_deserializer.hpp b/ngraph/frontend/ir/src/rt_info_deserializer.hpp new file mode 100644 index 00000000000..fbe181c9cc2 --- /dev/null +++ b/ngraph/frontend/ir/src/rt_info_deserializer.hpp @@ -0,0 +1,116 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include + +namespace ov { +class RTInfoDeserializer : public ngraph::AttributeVisitor { +public: + explicit RTInfoDeserializer(const pugi::xml_node& node) : m_node(node) {} + + void on_adapter(const std::string& name, ngraph::ValueAccessor& value) override { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + value.set(val); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor& value) override { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + std::transform(val.begin(), val.end(), val.begin(), [](char ch) { + return std::tolower(static_cast(ch)); + }); + std::set true_names{"true", "1"}; + std::set false_names{"false", "0"}; + + bool is_true = true_names.find(val) != true_names.end(); + bool is_false = false_names.find(val) != false_names.end(); + + if (!is_true && !is_false) + return; + value.set(is_true); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override; + + void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + adapter.set(stringToType(val)); + } + void on_adapter(const std::string& name, ngraph::ValueAccessor& adapter) override { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + adapter.set(stringToType(val)); + } + + void on_adapter(const std::string& name, + ngraph::ValueAccessor>& adapter) override { + throw ngraph::ngraph_error("Function type is unsupported for rt info deserialization"); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + std::vector value; + str_to_container(val, value); + adapter.set(value); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + std::vector value; + str_to_container(val, value); + adapter.set(value); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + std::vector value; + str_to_container(val, value); + adapter.set(value); + } + + void on_adapter(const std::string& name, ngraph::ValueAccessor>& adapter) override { + check_attribute_name(name); + std::string val; + if (!getStrAttribute(m_node, name, val)) + return; + std::vector value; + str_to_container(val, value); + adapter.set(value); + } + + void check_attribute_name(const std::string& name) const { + if (name == "name" || name == "version") { + throw ngraph::ngraph_error("Attribute key with name: " + name + " is not allowed. Please use another name"); + } + } + +private: + pugi::xml_node m_node; +}; +} // namespace ov \ No newline at end of file diff --git a/ngraph/frontend/ir/src/utils.cpp b/ngraph/frontend/ir/src/utils.cpp new file mode 100644 index 00000000000..37bd3ff1315 --- /dev/null +++ b/ngraph/frontend/ir/src/utils.cpp @@ -0,0 +1,22 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "utils.hpp" + +namespace ov { +void operator>>(const std::stringstream& in, ngraph::element::Type& type) { + type = InferenceEngine::details::convertPrecision(ngraph::trim(in.str())); +} + +bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::string& value) { + if (!node) + return false; + + auto attr = node.attribute(name.c_str()); + if (attr.empty()) + return false; + value = std::string(attr.value()); + return true; +} +} // namespace ov \ No newline at end of file diff --git a/ngraph/frontend/ir/src/utils.hpp b/ngraph/frontend/ir/src/utils.hpp new file mode 100644 index 00000000000..31486ead3a6 --- /dev/null +++ b/ngraph/frontend/ir/src/utils.hpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include +#include +#include +#include +#include + +namespace ov { +void operator>>(const std::stringstream& in, ngraph::element::Type& type); + +bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::string& value); + +template +void str_to_container(const std::string& value, T& res) { + std::stringstream ss(value); + std::string field; + while (getline(ss, field, ',')) { + if (field.empty()) + IE_THROW() << "Cannot get vector of parameters! \"" << value << "\" is incorrect"; + std::stringstream fs(field); + typename T::value_type val; + fs >> val; + res.insert(res.end(), val); + } +} + +template +bool getParameters(const pugi::xml_node& node, const std::string& name, std::vector& value) { + std::string param; + if (!getStrAttribute(node, name, param)) + return false; + str_to_container(param, value); + return true; +} + +template +T stringToType(const std::string& valStr) { + T ret{0}; + std::istringstream ss(valStr); + if (!ss.eof()) { + ss >> ret; + } + return ret; +} +} // namespace ov \ No newline at end of file diff --git a/ngraph/test/constant_folding.cpp b/ngraph/test/constant_folding.cpp index 2881549eaff..8c1fbc4ed00 100644 --- a/ngraph/test/constant_folding.cpp +++ b/ngraph/test/constant_folding.cpp @@ -3157,7 +3157,7 @@ TEST(constant_folding, disable_constant_folding) { auto constant_shape = op::Constant::create(element::i64, Shape{1}, {3}); auto dyn_reshape = make_shared(input, constant_shape, true); auto& rt_info = dyn_reshape->get_rt_info(); - rt_info["DISABLED_CONSTANT_FOLDING"]; + rt_info["disabled_constant_folding_0"]; auto f = make_shared(dyn_reshape, ParameterVector{input}); pass::Manager pass_manager;