RT Info Attributes Serialization/Deserialization (#7603)
* Init rt info deserialization * Add RT Info attributes deserialization * Add RT Info attributes serialization * Add Serialization and Deserialization tests * Code style * Fix AttributeAdapter place * Extended rt_info section; added support for multi-field attrs; use version for attr serialization/deserialization * Align attributes and their usage * Fix LPT * Add missing #pragma one * Fix build * Change version delimiter to have valid xml * Fix PromitivesPriority * Fix tests * Fix test * Fix IR Reader; remove empty rt_info * More explicit attribute name and version representation
This commit is contained in:
parent
04158afc21
commit
62ed238f54
@ -123,7 +123,7 @@ std::string NetworkCompilationContext::computeHash(const CNNNetwork& network,
|
||||
} else if (auto fNames =
|
||||
std::dynamic_pointer_cast<ngraph::VariantWrapper<ngraph::FusedNames>>(rtMapData.second)) {
|
||||
seed = hash_combine(seed, fNames->get().getNames());
|
||||
} else if (auto prim = std::dynamic_pointer_cast<ngraph::VariantWrapper<ngraph::PrimitivesPriority>>(
|
||||
} else if (auto prim = std::dynamic_pointer_cast<ngraph::VariantWrapper<ov::PrimitivesPriority>>(
|
||||
rtMapData.second)) {
|
||||
seed = hash_combine(seed, prim->get().getPrimitivesPriority());
|
||||
}
|
||||
|
@ -1981,12 +1981,12 @@ void convertFunctionToICNNNetwork(const std::shared_ptr<const ::ngraph::Function
|
||||
CNNLayerPtr cnnLayer = createCNNLayer(layer);
|
||||
|
||||
// Set originalLayersNames from FusedNames
|
||||
std::string originalNames = ::ngraph::getFusedNames(layer);
|
||||
std::string originalNames = ngraph::getFusedNames(layer);
|
||||
if (!originalNames.empty()) {
|
||||
cnnLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES] = originalNames;
|
||||
}
|
||||
|
||||
std::string primitivesPriority = ::ngraph::getPrimitivesPriority(layer);
|
||||
std::string primitivesPriority = ov::getPrimitivesPriority(layer);
|
||||
if (!primitivesPriority.empty()) {
|
||||
cnnLayer->params["PrimitivesPriority"] = primitivesPriority;
|
||||
}
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
#include <transformations/rt_info/disable_constant_folding.hpp>
|
||||
#include "low_precision/network_helper.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
@ -82,8 +83,7 @@ ngraph::pass::low_precision::ConvertSubtractConstant::ConvertSubtractConstant(co
|
||||
NetworkHelper::copyInfo(subtractConstant, resultConvert);
|
||||
resultConvert->set_friendly_name(subtractConstant->get_friendly_name() + "/Convert");
|
||||
|
||||
auto& rtInfo = resultConvert->get_rt_info();
|
||||
rtInfo["DISABLED_CONSTANT_FOLDING"] = std::make_shared<VariantWrapper<std::string>>("");
|
||||
ov::disable_constant_folding(resultConvert);
|
||||
|
||||
const auto newSubtract = std::make_shared<opset1::Subtract>(opsMap.at(weightsConvertWrapper).get_node_shared_ptr(), resultConvert);
|
||||
NetworkHelper::copyInfo(subtract, newSubtract);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
#include <ngraph/pattern/op/or.hpp>
|
||||
#include "low_precision/network_helper.hpp"
|
||||
#include <transformations/rt_info/disable_constant_folding.hpp>
|
||||
|
||||
namespace ngraph {
|
||||
namespace pass {
|
||||
@ -321,8 +322,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph
|
||||
}
|
||||
|
||||
if (ov::is_type<opset1::Subtract>(onWeights)) {
|
||||
auto& rt = onWeights->get_rt_info();
|
||||
rt["DISABLED_CONSTANT_FOLDING"] = std::make_shared<ngraph::VariantWrapper<std::string>>("");
|
||||
ov::disable_constant_folding(onWeights);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
#include <ngraph/pattern/op/or.hpp>
|
||||
#include "low_precision/network_helper.hpp"
|
||||
#include <transformations/rt_info/disable_constant_folding.hpp>
|
||||
|
||||
namespace ngraph {
|
||||
namespace pass {
|
||||
@ -212,8 +213,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con
|
||||
}
|
||||
|
||||
if (ov::is_type<opset1::Subtract>(onWeights)) {
|
||||
auto& rt = onWeights->get_rt_info();
|
||||
rt["DISABLED_CONSTANT_FOLDING"] = std::make_shared<ngraph::VariantWrapper<std::string>>("");
|
||||
ov::disable_constant_folding(onWeights);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -22,12 +22,12 @@ inline std::string getRTInfoValue(const std::map<std::string, std::shared_ptr<ng
|
||||
|
||||
inline std::string getPrimitivesPriorityValue(const std::shared_ptr<ngraph::Node> &node) {
|
||||
const auto &rtInfo = node->get_rt_info();
|
||||
using PrimitivesPriorityWraper = ngraph::VariantWrapper<ngraph::PrimitivesPriority>;
|
||||
using PrimitivesPriorityWraper = ngraph::VariantWrapper<ov::PrimitivesPriority>;
|
||||
|
||||
if (!rtInfo.count(PrimitivesPriorityWraper::get_type_info_static().name)) return "";
|
||||
if (!rtInfo.count(PrimitivesPriorityWraper::get_type_info_static())) return "";
|
||||
|
||||
const auto &attr = rtInfo.at(PrimitivesPriorityWraper::get_type_info_static().name);
|
||||
ngraph::PrimitivesPriority pp = ngraph::as_type_ptr<PrimitivesPriorityWraper>(attr)->get();
|
||||
const auto &attr = rtInfo.at(PrimitivesPriorityWraper::get_type_info_static());
|
||||
ov::PrimitivesPriority pp = ngraph::as_type_ptr<PrimitivesPriorityWraper>(attr)->get();
|
||||
return pp.getPrimitivesPriority();
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,49 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <locale>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
#include <utility>
|
||||
|
||||
#include <transformations_visibility.hpp>
|
||||
|
||||
#include <openvino/core/variant.hpp>
|
||||
#include <ngraph/node.hpp>
|
||||
#include <ngraph/factory.hpp>
|
||||
|
||||
#include <transformations/rt_info/disable_constant_folding.hpp>
|
||||
#include <transformations/rt_info/fused_names_attribute.hpp>
|
||||
#include <transformations/rt_info/nms_selected_indices.hpp>
|
||||
#include <transformations/rt_info/primitives_priority_attribute.hpp>
|
||||
#include <transformations/rt_info/strides_property.hpp>
|
||||
|
||||
namespace ov {
|
||||
namespace pass {
|
||||
class TRANSFORMATIONS_API Attributes {
|
||||
public:
|
||||
Attributes() {
|
||||
register_factory<VariantWrapper<ngraph::FusedNames>>();
|
||||
register_factory<VariantWrapper<PrimitivesPriority>>();
|
||||
register_factory<VariantWrapper<DisableConstantFolding>>();
|
||||
register_factory<VariantWrapper<NmsSelectedIndices>>();
|
||||
register_factory<VariantWrapper<Strides>>();
|
||||
}
|
||||
|
||||
Variant * create_by_type_info(const ov::DiscreteTypeInfo & type_info) {
|
||||
return m_factory_registry.create(type_info);
|
||||
}
|
||||
private:
|
||||
template <class T>
|
||||
void register_factory() {
|
||||
m_factory_registry.register_factory<T>(ngraph::FactoryRegistry<T>::template get_default_factory<T>());
|
||||
}
|
||||
|
||||
ngraph::FactoryRegistry<Variant> m_factory_registry;
|
||||
};
|
||||
} // namespace pass
|
||||
} // namespace ov
|
@ -2,6 +2,8 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@ -13,7 +15,7 @@
|
||||
#include <transformations_visibility.hpp>
|
||||
|
||||
|
||||
namespace ngraph {
|
||||
namespace ov {
|
||||
|
||||
/**
|
||||
* @ingroup ie_runtime_attr_api
|
||||
@ -25,15 +27,19 @@ public:
|
||||
};
|
||||
|
||||
TRANSFORMATIONS_API void disable_constant_folding(const std::shared_ptr<Node>& node);
|
||||
} // namespace ngraph
|
||||
|
||||
namespace ov {
|
||||
extern template class TRANSFORMATIONS_API VariantImpl<ngraph::DisableConstantFolding>;
|
||||
TRANSFORMATIONS_API void enable_constant_folding(const std::shared_ptr<Node>& node);
|
||||
|
||||
TRANSFORMATIONS_API bool constant_folding_is_disabled(const std::shared_ptr<Node>& node);
|
||||
|
||||
extern template class TRANSFORMATIONS_API VariantImpl<DisableConstantFolding>;
|
||||
|
||||
template<>
|
||||
class TRANSFORMATIONS_API VariantWrapper<ngraph::DisableConstantFolding> : public VariantImpl<ngraph::DisableConstantFolding> {
|
||||
class TRANSFORMATIONS_API VariantWrapper<DisableConstantFolding> : public VariantImpl<DisableConstantFolding> {
|
||||
public:
|
||||
OPENVINO_RTTI("DISABLED_CONSTANT_FOLDING");
|
||||
OPENVINO_RTTI("disabled_constant_folding", "0");
|
||||
|
||||
VariantWrapper() = default;
|
||||
|
||||
VariantWrapper(const value_type &value) : VariantImpl<value_type>(value) {}
|
||||
|
||||
|
@ -7,6 +7,8 @@
|
||||
* @file fused_names_attribute.hpp
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@ -15,7 +17,8 @@
|
||||
|
||||
#include <ngraph/node.hpp>
|
||||
#include <ngraph/variant.hpp>
|
||||
#include "openvino/core/rtti.hpp"
|
||||
#include <openvino/core/rtti.hpp>
|
||||
#include <ngraph/attribute_visitor.hpp>
|
||||
#include <transformations_visibility.hpp>
|
||||
|
||||
|
||||
@ -31,6 +34,8 @@ private:
|
||||
std::set<std::string> fused_names;
|
||||
|
||||
public:
|
||||
friend class VariantWrapper<FusedNames>;
|
||||
|
||||
/**
|
||||
* A default constructor
|
||||
*/
|
||||
@ -86,13 +91,23 @@ extern template class TRANSFORMATIONS_API VariantImpl<ngraph::FusedNames>;
|
||||
template<>
|
||||
class TRANSFORMATIONS_API VariantWrapper<ngraph::FusedNames> : public VariantImpl<ngraph::FusedNames> {
|
||||
public:
|
||||
OPENVINO_RTTI("Variant::RuntimeAttribute::FusedNames");
|
||||
BWDCMP_RTTI_DECLARATION;
|
||||
OPENVINO_RTTI("fused_names", "0");
|
||||
|
||||
VariantWrapper() = default;
|
||||
|
||||
VariantWrapper(const value_type &value) : VariantImpl<value_type>(value) {}
|
||||
|
||||
std::shared_ptr<ngraph::Variant> merge(const ngraph::NodeVector & nodes) override;
|
||||
|
||||
std::shared_ptr<ngraph::Variant> init(const std::shared_ptr<ngraph::Node> & node) override;
|
||||
|
||||
bool visit_attributes(AttributeVisitor & visitor) override;
|
||||
};
|
||||
|
||||
template <>
|
||||
class TRANSFORMATIONS_API AttributeAdapter<std::set<std::string>> : public DirectValueAccessor<std::set<std::string>> {
|
||||
public:
|
||||
OPENVINO_RTTI("AttributeAdapter<set<string>>");
|
||||
AttributeAdapter(std::set<std::string>& value) : DirectValueAccessor<std::set<std::string>>(value) {}
|
||||
};
|
||||
} // namespace ov
|
||||
|
@ -2,6 +2,8 @@
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
@ -28,11 +30,9 @@ extern template class TRANSFORMATIONS_API VariantImpl<NmsSelectedIndices>;
|
||||
template<>
|
||||
class TRANSFORMATIONS_API VariantWrapper<NmsSelectedIndices> : public VariantImpl<NmsSelectedIndices> {
|
||||
public:
|
||||
static constexpr VariantTypeInfo type_info{"NMS_SELECTED_INDICES", 0};
|
||||
OPENVINO_RTTI("nms_selected_indices", "0");
|
||||
|
||||
const VariantTypeInfo &get_type_info() const override {
|
||||
return type_info;
|
||||
}
|
||||
VariantWrapper() = default;
|
||||
|
||||
VariantWrapper(const value_type &value) : VariantImpl<value_type>(value) {}
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <ngraph/variant.hpp>
|
||||
#include <transformations_visibility.hpp>
|
||||
|
||||
namespace ngraph {
|
||||
namespace ov {
|
||||
|
||||
/**
|
||||
* @ingroup ie_runtime_attr_api
|
||||
@ -31,6 +31,7 @@ private:
|
||||
std::string primitives_priority;
|
||||
|
||||
public:
|
||||
friend class VariantWrapper<PrimitivesPriority>;
|
||||
/**
|
||||
* A default constructor
|
||||
*/
|
||||
@ -54,22 +55,22 @@ public:
|
||||
*/
|
||||
TRANSFORMATIONS_API std::string getPrimitivesPriority(const std::shared_ptr<ngraph::Node> & node);
|
||||
|
||||
} // namespace ngraph
|
||||
|
||||
namespace ov {
|
||||
|
||||
extern template class TRANSFORMATIONS_API VariantImpl<ngraph::PrimitivesPriority>;
|
||||
extern template class TRANSFORMATIONS_API VariantImpl<PrimitivesPriority>;
|
||||
|
||||
template<>
|
||||
class TRANSFORMATIONS_API VariantWrapper<ngraph::PrimitivesPriority> : public VariantImpl<ngraph::PrimitivesPriority> {
|
||||
class TRANSFORMATIONS_API VariantWrapper<PrimitivesPriority> : public VariantImpl<PrimitivesPriority> {
|
||||
public:
|
||||
OPENVINO_RTTI("VariantWrapper<PrimitivesPriority>");
|
||||
OPENVINO_RTTI("primitives_priority", "0");
|
||||
|
||||
VariantWrapper() = default;
|
||||
|
||||
VariantWrapper(const value_type &value) : VariantImpl<value_type>(value) {}
|
||||
|
||||
std::shared_ptr<ov::Variant> merge(const ngraph::NodeVector & nodes) override;
|
||||
|
||||
std::shared_ptr<ov::Variant> init(const std::shared_ptr<ngraph::Node> & node) override;
|
||||
|
||||
bool visit_attributes(AttributeVisitor & visitor) override;
|
||||
};
|
||||
|
||||
} // namespace ov
|
||||
|
@ -9,19 +9,18 @@
|
||||
#include <ngraph/variant.hpp>
|
||||
#include <transformations_visibility.hpp>
|
||||
|
||||
|
||||
namespace ov {
|
||||
template <>
|
||||
class TRANSFORMATIONS_API VariantWrapper<ngraph::Strides> : public VariantImpl<ngraph::Strides> {
|
||||
public:
|
||||
OPENVINO_RTTI("VariantWrapper<Strides>");
|
||||
VariantWrapper(const value_type& value)
|
||||
: VariantImpl<value_type>(value) {
|
||||
}
|
||||
};
|
||||
OPENVINO_RTTI("strides", "0");
|
||||
|
||||
} // namespace ov
|
||||
VariantWrapper() = default;
|
||||
|
||||
VariantWrapper(const value_type& value) : VariantImpl<value_type>(value) {}
|
||||
};
|
||||
|
||||
TRANSFORMATIONS_API bool has_strides_prop(const ngraph::Input<ngraph::Node>& node);
|
||||
TRANSFORMATIONS_API ngraph::Strides get_strides_prop(const ngraph::Input<ngraph::Node>& node);
|
||||
TRANSFORMATIONS_API void insert_strides_prop(ngraph::Input<ngraph::Node>& node, const ngraph::Strides& strides);
|
||||
} // namespace ov
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <ngraph/rt_info.hpp>
|
||||
#include <transformations/utils/utils.hpp>
|
||||
#include <transformations/common_optimizations/weights_dequantize_to_fake_quantize.hpp>
|
||||
#include <transformations/rt_info/disable_constant_folding.hpp>
|
||||
#include "itt.hpp"
|
||||
|
||||
NGRAPH_RTTI_DEFINITION(ngraph::pass::WeightsDequantizeToFakeQuantize, "WeightsDequantizeToFakeQuantize", 0);
|
||||
@ -62,8 +63,8 @@ ngraph::pass::WeightsDequantizeToFakeQuantize::WeightsDequantizeToFakeQuantize()
|
||||
ngraph::copy_runtime_info(nodes_to_copy_RT_info_from, fq);
|
||||
multiply_node->output(0).replace(fq->output(0));
|
||||
|
||||
if (convert_node->get_rt_info().count("DISABLED_CONSTANT_FOLDING"))
|
||||
convert_node->get_rt_info().erase("DISABLED_CONSTANT_FOLDING");
|
||||
if (ov::constant_folding_is_disabled(convert_node))
|
||||
ov::enable_constant_folding(convert_node);
|
||||
return true;
|
||||
};
|
||||
|
||||
|
@ -19,14 +19,14 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::InitNodeInfo, "InitNodeInfo", 0);
|
||||
bool ngraph::pass::InitNodeInfo::run_on_function(std::shared_ptr<ngraph::Function> f) {
|
||||
RUN_ON_FUNCTION_SCOPE(InitNodeInfo);
|
||||
std::vector<std::shared_ptr<Variant> > attributes {
|
||||
std::make_shared<VariantWrapper<FusedNames> >(FusedNames())
|
||||
std::make_shared<VariantWrapper<ngraph::FusedNames> >(ngraph::FusedNames())
|
||||
};
|
||||
|
||||
using VariantCreator = std::function<std::shared_ptr<Variant>(const std::string&)>;
|
||||
std::map<std::string, VariantCreator> update_attributes {
|
||||
{"PrimitivesPriority",
|
||||
[](const std::string & value) -> std::shared_ptr<Variant> {
|
||||
return std::make_shared<VariantWrapper<PrimitivesPriority> >(PrimitivesPriority(value));
|
||||
return std::make_shared<VariantWrapper<ov::PrimitivesPriority> >(ov::PrimitivesPriority(value));
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -42,9 +42,9 @@ bool ngraph::pass::InitNodeInfo::run_on_function(std::shared_ptr<ngraph::Functio
|
||||
// Default attributes initialization
|
||||
for (auto & attr : attributes) {
|
||||
// Skip initialization if attribute has been already set
|
||||
if (rtInfo.count(attr->get_type_info().name)) continue;
|
||||
if (rtInfo.count(attr->get_type_info())) continue;
|
||||
if (auto init_attr = attr->init(node)) {
|
||||
rtInfo[attr->get_type_info().name] = init_attr;
|
||||
rtInfo[attr->get_type_info()] = init_attr;
|
||||
}
|
||||
}
|
||||
// Convert manually set attributes to appropriate VariantWrapper class instances
|
||||
@ -54,7 +54,7 @@ bool ngraph::pass::InitNodeInfo::run_on_function(std::shared_ptr<ngraph::Functio
|
||||
if (auto variant_string = std::dynamic_pointer_cast<VariantWrapper<std::string> >(rtInfo[attr.first])) {
|
||||
rtInfo.erase(attr.first);
|
||||
auto res = attr.second(variant_string->get());
|
||||
rtInfo[res->get_type_info().name] = res;
|
||||
rtInfo[res->get_type_info()] = res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <ngraph/rt_info.hpp>
|
||||
#include <ngraph/pattern/op/wrap_type.hpp>
|
||||
#include <ngraph/variant.hpp>
|
||||
#include <transformations/rt_info/disable_constant_folding.hpp>
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
@ -53,8 +54,7 @@ ngraph::pass::DisableConvertConstantFoldingOnConstPath::DisableConvertConstantFo
|
||||
auto child = target_inputs.begin()->get_node();
|
||||
if (ov::is_type<ngraph::opset1::Constant>(parent) &&
|
||||
(ov::is_type<ngraph::opset1::Subtract>(child) || ov::is_type<ngraph::opset1::Multiply>(child))) {
|
||||
auto& rtInfo = convert->get_rt_info();
|
||||
rtInfo["DISABLED_CONSTANT_FOLDING"] = std::make_shared<VariantWrapper<std::string>>("");
|
||||
ov::disable_constant_folding(convert);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -4,9 +4,19 @@
|
||||
|
||||
#include "transformations/rt_info/disable_constant_folding.hpp"
|
||||
|
||||
template class ov::VariantImpl<ngraph::DisableConstantFolding>;
|
||||
template class ov::VariantImpl<ov::DisableConstantFolding>;
|
||||
|
||||
void ngraph::disable_constant_folding(const std::shared_ptr<Node>& node) {
|
||||
void ov::disable_constant_folding(const std::shared_ptr<Node>& node) {
|
||||
auto & rt_info = node->get_rt_info();
|
||||
rt_info[VariantWrapper<DisableConstantFolding>::get_type_info_static().name] = make_variant<DisableConstantFolding>({});
|
||||
rt_info[VariantWrapper<DisableConstantFolding>::get_type_info_static()] = make_variant<DisableConstantFolding>({});
|
||||
}
|
||||
|
||||
void ov::enable_constant_folding(const std::shared_ptr<Node>& node) {
|
||||
auto & rt_info = node->get_rt_info();
|
||||
rt_info.erase(VariantWrapper<DisableConstantFolding>::get_type_info_static());
|
||||
}
|
||||
|
||||
bool ov::constant_folding_is_disabled(const std::shared_ptr<Node> &node) {
|
||||
const auto & rt_info = node->get_rt_info();
|
||||
return rt_info.count(VariantWrapper<DisableConstantFolding>::get_type_info_static());
|
||||
}
|
||||
|
@ -38,9 +38,9 @@ std::string ngraph::getFusedNames(const std::shared_ptr<ngraph::Node> &node) {
|
||||
const auto &rtInfo = node->get_rt_info();
|
||||
using FusedNamesWrapper = VariantWrapper<FusedNames>;
|
||||
|
||||
if (!rtInfo.count(FusedNamesWrapper::get_type_info_static().name)) return {};
|
||||
if (!rtInfo.count(FusedNamesWrapper::get_type_info_static())) return {};
|
||||
|
||||
const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static().name);
|
||||
const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static());
|
||||
FusedNames fusedNames = ov::as_type_ptr<FusedNamesWrapper>(attr)->get();
|
||||
return fusedNames.getNames();
|
||||
}
|
||||
@ -51,24 +51,23 @@ std::vector<std::string> ngraph::getFusedNamesVector(const std::shared_ptr<ngrap
|
||||
const auto &rtInfo = node->get_rt_info();
|
||||
using FusedNamesWrapper = VariantWrapper<FusedNames>;
|
||||
|
||||
if (!rtInfo.count(FusedNamesWrapper::get_type_info_static().name)) return {};
|
||||
if (!rtInfo.count(FusedNamesWrapper::get_type_info_static())) return {};
|
||||
|
||||
const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static().name);
|
||||
const auto &attr = rtInfo.at(FusedNamesWrapper::get_type_info_static());
|
||||
FusedNames fusedNames = ov::as_type_ptr<FusedNamesWrapper>(attr)->get();
|
||||
return fusedNames.getVectorNames();
|
||||
}
|
||||
|
||||
template class ov::VariantImpl<FusedNames>;
|
||||
|
||||
BWDCMP_RTTI_DEFINITION(VariantWrapper<FusedNames>);
|
||||
|
||||
std::shared_ptr<ngraph::Variant> VariantWrapper<FusedNames>::merge(const ngraph::NodeVector & nodes) {
|
||||
FusedNames mergedNames;
|
||||
for (auto &node : nodes) {
|
||||
const auto &rtInfo = node->get_rt_info();
|
||||
if (!rtInfo.count(VariantWrapper<FusedNames>::get_type_info_static().name)) continue;
|
||||
|
||||
const auto attr = rtInfo.at(VariantWrapper<FusedNames>::get_type_info_static().name);
|
||||
if (!rtInfo.count(VariantWrapper<FusedNames>::get_type_info_static())) continue;
|
||||
|
||||
const auto attr = rtInfo.at(VariantWrapper<FusedNames>::get_type_info_static());
|
||||
if (auto fusedNames = std::dynamic_pointer_cast<VariantWrapper<FusedNames> >(attr)) {
|
||||
mergedNames.fuseWith(fusedNames->get());
|
||||
}
|
||||
@ -79,3 +78,8 @@ std::shared_ptr<ngraph::Variant> VariantWrapper<FusedNames>::merge(const ngraph:
|
||||
std::shared_ptr<ngraph::Variant> VariantWrapper<FusedNames>::init(const std::shared_ptr<ngraph::Node> & node) {
|
||||
return std::make_shared<VariantWrapper<FusedNames> > (FusedNames(node->get_friendly_name()));
|
||||
}
|
||||
|
||||
bool VariantWrapper<FusedNames>::visit_attributes(AttributeVisitor &visitor) {
|
||||
visitor.on_attribute("value", m_value.fused_names);
|
||||
return true;
|
||||
}
|
@ -6,14 +6,12 @@
|
||||
|
||||
template class ov::VariantImpl<ov::NmsSelectedIndices>;
|
||||
|
||||
constexpr ov::VariantTypeInfo ov::VariantWrapper<ov::NmsSelectedIndices>::type_info;
|
||||
|
||||
void ov::set_nms_selected_indices(Node * node) {
|
||||
auto & rt_info = node->get_rt_info();
|
||||
rt_info[VariantWrapper<NmsSelectedIndices>::type_info.name] = make_variant<NmsSelectedIndices>({});
|
||||
rt_info[VariantWrapper<NmsSelectedIndices>::get_type_info_static()] = make_variant<NmsSelectedIndices>({});
|
||||
}
|
||||
|
||||
bool ov::has_nms_selected_indices(const Node * node) {
|
||||
const auto & rt_info = node->get_rt_info();
|
||||
return rt_info.count(VariantWrapper<NmsSelectedIndices>::type_info.name);
|
||||
return rt_info.count(VariantWrapper<NmsSelectedIndices>::get_type_info_static());
|
||||
}
|
||||
|
@ -23,13 +23,13 @@ std::string PrimitivesPriority::getPrimitivesPriority() const {
|
||||
return primitives_priority;
|
||||
}
|
||||
|
||||
std::string ngraph::getPrimitivesPriority(const std::shared_ptr<ngraph::Node> &node) {
|
||||
std::string ov::getPrimitivesPriority(const std::shared_ptr<ngraph::Node> &node) {
|
||||
const auto &rtInfo = node->get_rt_info();
|
||||
using PrimitivesPriorityWrapper = VariantWrapper<PrimitivesPriority>;
|
||||
|
||||
if (!rtInfo.count(PrimitivesPriorityWrapper::get_type_info_static().name)) return "";
|
||||
if (!rtInfo.count(PrimitivesPriorityWrapper::get_type_info_static())) return "";
|
||||
|
||||
const auto &attr = rtInfo.at(PrimitivesPriorityWrapper::get_type_info_static().name);
|
||||
const auto &attr = rtInfo.at(PrimitivesPriorityWrapper::get_type_info_static());
|
||||
PrimitivesPriority pp = ov::as_type_ptr<PrimitivesPriorityWrapper>(attr)->get();
|
||||
return pp.getPrimitivesPriority();
|
||||
}
|
||||
@ -59,7 +59,7 @@ std::shared_ptr<ngraph::Variant> VariantWrapper<PrimitivesPriority>::merge(const
|
||||
}
|
||||
|
||||
if (unique_pp.size() > 1) {
|
||||
throw ngraph_error(std::string(get_type_info().name) + " no rule defined for multiple values.");
|
||||
throw ngraph_error(std::string(get_type_info()) + " no rule defined for multiple values.");
|
||||
}
|
||||
|
||||
std::string final_primitives_priority;
|
||||
@ -70,5 +70,10 @@ std::shared_ptr<ngraph::Variant> VariantWrapper<PrimitivesPriority>::merge(const
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Variant> VariantWrapper<PrimitivesPriority>::init(const std::shared_ptr<ngraph::Node> & node) {
|
||||
throw ngraph_error(std::string(get_type_info().name) + " has no default initialization.");
|
||||
throw ngraph_error(std::string(get_type_info()) + " has no default initialization.");
|
||||
}
|
||||
|
||||
bool VariantWrapper<PrimitivesPriority>::visit_attributes(AttributeVisitor &visitor) {
|
||||
visitor.on_attribute("value", m_value.primitives_priority);
|
||||
return true;
|
||||
}
|
||||
|
@ -4,19 +4,19 @@
|
||||
|
||||
#include "transformations/rt_info/strides_property.hpp"
|
||||
|
||||
bool has_strides_prop(const ngraph::Input<ngraph::Node>& node) {
|
||||
bool ov::has_strides_prop(const ngraph::Input<ngraph::Node>& node) {
|
||||
const auto& rt_map = node.get_rt_info();
|
||||
auto it = rt_map.find(ngraph::VariantWrapper<ngraph::Strides>::get_type_info_static().name);
|
||||
auto it = rt_map.find(ngraph::VariantWrapper<ngraph::Strides>::get_type_info_static());
|
||||
return it != rt_map.end();
|
||||
}
|
||||
|
||||
ngraph::Strides get_strides_prop(const ngraph::Input<ngraph::Node>& node) {
|
||||
ngraph::Strides ov::get_strides_prop(const ngraph::Input<ngraph::Node>& node) {
|
||||
const auto& rt_map = node.get_rt_info();
|
||||
const auto& var = rt_map.at(ngraph::VariantWrapper<ngraph::Strides>::get_type_info_static().name);
|
||||
const auto& var = rt_map.at(ngraph::VariantWrapper<ngraph::Strides>::get_type_info_static());
|
||||
return ngraph::as_type_ptr<ngraph::VariantWrapper<ngraph::Strides>>(var)->get();
|
||||
}
|
||||
|
||||
void insert_strides_prop(ngraph::Input<ngraph::Node>& node, const ngraph::Strides& strides) {
|
||||
void ov::insert_strides_prop(ngraph::Input<ngraph::Node>& node, const ngraph::Strides& strides) {
|
||||
auto& rt_map = node.get_rt_info();
|
||||
rt_map[ngraph::VariantWrapper<ngraph::Strides>::get_type_info_static().name] = std::make_shared<ngraph::VariantWrapper<ngraph::Strides>>(strides);
|
||||
rt_map[ngraph::VariantWrapper<ngraph::Strides>::get_type_info_static()] = std::make_shared<ngraph::VariantWrapper<ngraph::Strides>>(strides);
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "ngraph_ops/type_relaxed.hpp"
|
||||
#include "pugixml.hpp"
|
||||
#include "transformations/serialize.hpp"
|
||||
#include "transformations/rt_info/attributes.hpp"
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
@ -175,6 +176,82 @@ private:
|
||||
pugi::xml_node& m_xml_node;
|
||||
};
|
||||
|
||||
class RTInfoSerializer : public ngraph::AttributeVisitor {
|
||||
pugi::xml_node m_node;
|
||||
|
||||
public:
|
||||
RTInfoSerializer(const pugi::xml_node node) : m_node(node) {}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<void> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::set<std::string>>>(&adapter)) {
|
||||
const auto & value = join(a->get());
|
||||
m_node.append_attribute(name.c_str()).set_value(value.c_str());
|
||||
} else {
|
||||
throw ngraph_error("Unsupported attribute type for serialization: " + name);
|
||||
}
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<bool> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
m_node.append_attribute(name.c_str()).set_value(adapter.get());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<std::string> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
m_node.append_attribute(name.c_str()).set_value(adapter.get().c_str());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<int64_t> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
m_node.append_attribute(name.c_str()).set_value(adapter.get());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<double> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
m_node.append_attribute(name.c_str()).set_value(adapter.get());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<std::vector<int>> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
const auto & value = join(adapter.get());
|
||||
m_node.append_attribute(name.c_str()).set_value(value.c_str());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<std::vector<int64_t>> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
const auto & value = join(adapter.get());
|
||||
m_node.append_attribute(name.c_str()).set_value(value.c_str());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<std::vector<uint64_t>> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
const auto & value = join(adapter.get());
|
||||
m_node.append_attribute(name.c_str()).set_value(value.c_str());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<std::vector<float>> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
const auto & value = join(adapter.get());
|
||||
m_node.append_attribute(name.c_str()).set_value(value.c_str());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<std::vector<std::string>> &adapter) override {
|
||||
check_attribute_name(name);
|
||||
const auto & value = join(adapter.get());
|
||||
m_node.append_attribute(name.c_str()).set_value(value.c_str());
|
||||
}
|
||||
|
||||
void on_adapter(const std::string &name, ngraph::ValueAccessor<std::shared_ptr<Function>> &adapter) override {
|
||||
throw ngraph_error("Function type is unsupported for rt info serialization");
|
||||
}
|
||||
|
||||
void check_attribute_name(const std::string & name) const {
|
||||
if (name == "name" || name == "version") {
|
||||
throw ngraph_error("Attribute key with name: " + name + " is not allowed. Please use another name");
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace rt_info
|
||||
|
||||
class XmlSerializer : public ngraph::AttributeVisitor {
|
||||
@ -724,6 +801,27 @@ void ngfunction_2_irv10(pugi::xml_node& netXml,
|
||||
// <layers/data> general attributes
|
||||
pugi::xml_node data = layer.append_child("data");
|
||||
|
||||
auto append_runtime_info = [](pugi::xml_node & node, const RTMap& attributes) {
|
||||
pugi::xml_node rt_node = node.append_child("rt_info");
|
||||
bool has_attrs = false;
|
||||
for (const auto &item : attributes) {
|
||||
auto attribute_node = rt_node.append_child("attribute");
|
||||
attribute_node.append_attribute("name").set_value(item.second->get_type_info().name);
|
||||
attribute_node.append_attribute("version").set_value(item.second->get_type_info().get_version().c_str());
|
||||
rt_info::RTInfoSerializer serializer(attribute_node);
|
||||
if (!item.second->visit_attributes(serializer)) {
|
||||
rt_node.remove_child(attribute_node);
|
||||
} else {
|
||||
has_attrs = true;
|
||||
}
|
||||
}
|
||||
if (!has_attrs) {
|
||||
node.remove_child(rt_node);
|
||||
}
|
||||
};
|
||||
|
||||
append_runtime_info(layer, node->get_rt_info());
|
||||
|
||||
int port_id = 0;
|
||||
// <layers/input>
|
||||
if (node->get_input_size() > 0) {
|
||||
@ -748,6 +846,7 @@ void ngfunction_2_irv10(pugi::xml_node& netXml,
|
||||
.set_value(std::to_string(d.get_length()).c_str());
|
||||
}
|
||||
}
|
||||
append_runtime_info(port, i.get_rt_info());
|
||||
}
|
||||
|
||||
if (node_type_name == "TensorIterator" || node_type_name == "Loop") {
|
||||
@ -798,6 +897,7 @@ void ngfunction_2_irv10(pugi::xml_node& netXml,
|
||||
.set_value(std::to_string(d.get_length()).c_str());
|
||||
}
|
||||
}
|
||||
append_runtime_info(port, o.get_rt_info());
|
||||
}
|
||||
if (node_type_name == "TensorIterator" || node_type_name == "Loop") {
|
||||
layer.insert_move_after(output, layer.first_child());
|
||||
@ -868,9 +968,6 @@ std::string provide_bin_path(const std::string &xmlPath, const std::string &binP
|
||||
} // namespace
|
||||
|
||||
namespace ngraph {
|
||||
|
||||
// ! [function_pass:serialize_cpp]
|
||||
// serialize.cpp
|
||||
bool pass::Serialize::run_on_function(std::shared_ptr<ngraph::Function> f) {
|
||||
RUN_ON_FUNCTION_SCOPE(Serialize);
|
||||
|
||||
@ -1014,5 +1111,4 @@ bool ngraph::pass::StreamSerialize::run_on_function(std::shared_ptr<ngraph::Func
|
||||
// Return false because we didn't change nGraph Function
|
||||
return false;
|
||||
}
|
||||
// ! [function_pass:serialize_cpp]
|
||||
} // namespace ngraph
|
||||
|
@ -0,0 +1,186 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
#include <inference_engine.hpp>
|
||||
#include <transformations/rt_info/fused_names_attribute.hpp>
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
TEST(RTInfoDeserialization, Node) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="10">
|
||||
<layers>
|
||||
<layer name="in1" type="Parameter" id="0" version="opset8">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="in1"/>
|
||||
</rt_info>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="Round" id="1" type="Round" version="opset8">
|
||||
<data mode="half_to_even"/>
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="Round1,Round2"/>
|
||||
</rt_info>
|
||||
<input>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="2" version="opset8">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
|
||||
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
auto core = InferenceEngine::Core();
|
||||
auto net = core.ReadNetwork(model, InferenceEngine::Blob::Ptr());
|
||||
auto f = net.getFunction();
|
||||
|
||||
auto check_fused_names = [](const RTMap & info, const std::string & names) {
|
||||
const std::string & key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
||||
ASSERT_TRUE(info.count(key));
|
||||
auto fused_names_attr = std::dynamic_pointer_cast<VariantWrapper<ngraph::FusedNames>>(info.at(key));
|
||||
ASSERT_TRUE(fused_names_attr);
|
||||
ASSERT_EQ(fused_names_attr->get().getNames(), names);
|
||||
};
|
||||
|
||||
auto param = f->get_parameters()[0];
|
||||
check_fused_names(param->get_rt_info(), "in1");
|
||||
|
||||
auto result = f->get_results()[0];
|
||||
auto round = result->get_input_node_ptr(0);
|
||||
check_fused_names(round->get_rt_info(), "Round1,Round2");
|
||||
}
|
||||
|
||||
TEST(RTInfoDeserialization, InputAndOutput) {
|
||||
std::string model = R"V0G0N(
|
||||
<net name="Network" version="10">
|
||||
<layers>
|
||||
<layer name="in1" type="Parameter" id="0" version="opset8">
|
||||
<data element_type="f32" shape="1,3,22,22"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="test1,test2"/>
|
||||
</rt_info>
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="sum" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="test2,test3"/>
|
||||
</rt_info>
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="test3,test4"/>
|
||||
</rt_info>
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="test4,test5"/>
|
||||
</rt_info>
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer name="output" type="Result" id="2" version="opset8">
|
||||
<input>
|
||||
<port id="0" precision="FP32">
|
||||
<rt_info>
|
||||
<attribute name="fused_names" version="0" value="test5,test6"/>
|
||||
</rt_info>
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>22</dim>
|
||||
<dim>22</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="0"/>
|
||||
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
|
||||
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>
|
||||
)V0G0N";
|
||||
auto core = InferenceEngine::Core();
|
||||
auto net = core.ReadNetwork(model, InferenceEngine::Blob::Ptr());
|
||||
auto f = net.getFunction();
|
||||
|
||||
auto check_fused_names = [](const RTMap & info, const std::string & names) {
|
||||
const std::string & key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
||||
ASSERT_TRUE(info.count(key));
|
||||
auto fused_names_attr = std::dynamic_pointer_cast<VariantWrapper<ngraph::FusedNames>>(info.at(key));
|
||||
ASSERT_TRUE(fused_names_attr);
|
||||
ASSERT_EQ(fused_names_attr->get().getNames(), names);
|
||||
};
|
||||
|
||||
auto param = f->get_parameters()[0];
|
||||
check_fused_names(param->output(0).get_rt_info(), "test1,test2");
|
||||
|
||||
auto result = f->get_results()[0];
|
||||
check_fused_names(result->input(0).get_rt_info(), "test5,test6");
|
||||
|
||||
auto add = result->get_input_node_ptr(0);
|
||||
check_fused_names(add->input(0).get_rt_info(), "test2,test3");
|
||||
check_fused_names(add->input(1).get_rt_info(), "test3,test4");
|
||||
check_fused_names(add->output(0).get_rt_info(), "test4,test5");
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include <file_utils.h>
|
||||
#include <ie_api.h>
|
||||
#include <ie_iextension.h>
|
||||
#include "common_test_utils/ngraph_test_utils.hpp"
|
||||
#include "ie_core.hpp"
|
||||
#include "ngraph/ngraph.hpp"
|
||||
#include "transformations/serialize.hpp"
|
||||
#include <ngraph/opsets/opset8.hpp>
|
||||
#include <transformations/rt_info/attributes.hpp>
|
||||
|
||||
using namespace ngraph;
|
||||
|
||||
class RTInfoSerializationTest : public CommonTestUtils::TestsCommon {
|
||||
protected:
|
||||
std::string test_name = GetTestName() + "_" + GetTimestamp();
|
||||
std::string m_out_xml_path = test_name + ".xml";
|
||||
std::string m_out_bin_path = test_name + ".bin";
|
||||
|
||||
void TearDown() override {
|
||||
std::remove(m_out_xml_path.c_str());
|
||||
std::remove(m_out_bin_path.c_str());
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(RTInfoSerializationTest, all_attributes) {
|
||||
auto init_info = [](RTMap & info) {
|
||||
info[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames("add"));
|
||||
info[VariantWrapper<ov::PrimitivesPriority>::get_type_info_static()] =
|
||||
std::make_shared<VariantWrapper<ov::PrimitivesPriority>>(ov::PrimitivesPriority("priority"));
|
||||
};
|
||||
|
||||
std::shared_ptr<ngraph::Function> function;
|
||||
{
|
||||
auto data = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10});
|
||||
auto add = std::make_shared<ngraph::opset8::Add>(data, data);
|
||||
init_info(add->get_rt_info());
|
||||
init_info(add->input(0).get_rt_info());
|
||||
init_info(add->input(1).get_rt_info());
|
||||
init_info(add->output(0).get_rt_info());
|
||||
function = std::make_shared<ngraph::Function>(OutputVector{add}, ParameterVector{data});
|
||||
}
|
||||
|
||||
pass::Manager m;
|
||||
m.register_pass<pass::Serialize>(m_out_xml_path, m_out_bin_path);
|
||||
m.run_passes(function);
|
||||
|
||||
auto core = InferenceEngine::Core();
|
||||
auto net = core.ReadNetwork(m_out_xml_path, m_out_bin_path);
|
||||
auto f = net.getFunction();
|
||||
|
||||
auto check_info = [](const RTMap & info) {
|
||||
const std::string & key = VariantWrapper<ngraph::FusedNames>::get_type_info_static();
|
||||
ASSERT_TRUE(info.count(key));
|
||||
auto fused_names_attr = std::dynamic_pointer_cast<VariantWrapper<ngraph::FusedNames>>(info.at(key));
|
||||
ASSERT_TRUE(fused_names_attr);
|
||||
ASSERT_EQ(fused_names_attr->get().getNames(), "add");
|
||||
|
||||
const std::string & pkey = VariantWrapper<ov::PrimitivesPriority>::get_type_info_static();
|
||||
ASSERT_TRUE(info.count(pkey));
|
||||
auto primitives_priority_attr = std::dynamic_pointer_cast<VariantWrapper<ov::PrimitivesPriority>>(info.at(pkey));
|
||||
ASSERT_TRUE(primitives_priority_attr);
|
||||
ASSERT_EQ(primitives_priority_attr->get().getPrimitivesPriority(), "priority");
|
||||
};
|
||||
|
||||
auto add = f->get_results()[0]->get_input_node_ptr(0);
|
||||
check_info(add->get_rt_info());
|
||||
check_info(add->input(0).get_rt_info());
|
||||
check_info(add->input(1).get_rt_info());
|
||||
check_info(add->output(0).get_rt_info());
|
||||
}
|
@ -169,7 +169,7 @@ const std::vector<ConvertSubtractConstantTransformationTestValues> testValues =
|
||||
},
|
||||
{
|
||||
{ ngraph::element::f32, false },
|
||||
{ {127.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "DISABLED_CONSTANT_FOLDING" } },
|
||||
{ {127.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "disabled_constant_folding_0" } },
|
||||
{ {0.03f}, element::f32, {}, false }
|
||||
},
|
||||
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
||||
|
@ -284,7 +284,7 @@ const std::vector<ConvolutionBackpropDataTransformationTestValues> testValues =
|
||||
{
|
||||
ngraph::element::u8,
|
||||
{{}, { { 128.f }, ngraph::element::f32, {}, false }, {}},
|
||||
{{}, { { 2.f }, ngraph::element::f32, {1, 2, 1, 1}, true, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } }, {}},
|
||||
{{}, { { 2.f }, ngraph::element::f32, {1, 2, 1, 1}, true, 1ul, element::i8, false, { "disabled_constant_folding_0" } }, {}},
|
||||
{{}, {}, {{ 0.0002f }, ngraph::element::f32, {}}},
|
||||
op::Constant::create(ngraph::element::i8, ngraph::Shape{}, std::vector<float>{ 2.f }),
|
||||
true
|
||||
|
@ -174,7 +174,7 @@ const std::vector<ConvolutionQDqTransformationTestValues> testValues = {
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } },
|
||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } },
|
||||
{}
|
||||
},
|
||||
{ std::vector<float>{ 100.f }, ngraph::element::i8},
|
||||
@ -349,7 +349,7 @@ const std::vector<ConvolutionQDqTransformationTestValues> testValues = {
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } },
|
||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } },
|
||||
{}
|
||||
},
|
||||
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
||||
@ -417,7 +417,7 @@ const std::vector<ConvolutionQDqTransformationTestValues> testValues = {
|
||||
},
|
||||
{
|
||||
{},
|
||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "DISABLED_CONSTANT_FOLDING" } },
|
||||
{ { 127.f }, ngraph::element::f32, { 6, 1, 1, 1 }, false, 1ul, element::i8, false, { "disabled_constant_folding_0" } },
|
||||
{}
|
||||
},
|
||||
{ std::vector<float>{ 2.f }, ngraph::element::i8},
|
||||
|
@ -145,12 +145,12 @@ const std::vector<DisableConvertOnConstPathTransformationValues> testValues = {
|
||||
ngraph::element::u8,
|
||||
{
|
||||
{ngraph::element::f32},
|
||||
{ {128.f}, element::f32, {}, false, 1ul, element::u8, true, {}, { "DISABLED_CONSTANT_FOLDING" } },
|
||||
{ {128.f}, element::f32, {}, false, 1ul, element::u8, true, {}, { "disabled_constant_folding_0" } },
|
||||
{ {0.02f}, element::f32, {}, false }
|
||||
},
|
||||
{
|
||||
{ ngraph::element::f32, false },
|
||||
{ {128.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "DISABLED_CONSTANT_FOLDING" } },
|
||||
{ {128.f}, element::f32, {}, false, 1ul, element::i8, true, {}, { "disabled_constant_folding_0" } },
|
||||
{ {0.03f}, element::f32, {}, false }
|
||||
},
|
||||
{ std::vector<float>{ 1.f }, ngraph::element::f32},
|
||||
|
@ -198,7 +198,7 @@ const std::vector<FakeQuantizeWithNotOptimalTransformationTestValues> fakeQuanti
|
||||
{},
|
||||
{
|
||||
{},
|
||||
{ std::vector<float>(64, 127.f), ngraph::element::f32, {64, 1, 1, 1}, false, 1ul, ngraph::element::i8, false, {"DISABLED_CONSTANT_FOLDING"}},
|
||||
{ std::vector<float>(64, 127.f), ngraph::element::f32, {64, 1, 1, 1}, false, 1ul, ngraph::element::i8, false, {"disabled_constant_folding_0"}},
|
||||
{}
|
||||
},
|
||||
{
|
||||
|
@ -473,7 +473,7 @@ const std::vector<GroupConvolutionTestValues> testValuesGroupConv = {
|
||||
1,
|
||||
ngraph::element::i8,
|
||||
false,
|
||||
{"DISABLED_CONSTANT_FOLDING"}
|
||||
{"disabled_constant_folding_0"}
|
||||
},
|
||||
{}
|
||||
},
|
||||
|
@ -140,7 +140,7 @@ TEST(nop_elimination, squeeze_reshape_elimination_check_info) {
|
||||
if (node->get_friendly_name() == "reshape") {
|
||||
reshape_is_missing = false;
|
||||
ASSERT_TRUE(std::dynamic_pointer_cast<opset4::Reshape>(node));
|
||||
auto original_names = getFusedNamesVector(node);
|
||||
auto original_names = ngraph::getFusedNamesVector(node);
|
||||
sort(original_names.begin(), original_names.end());
|
||||
ASSERT_EQ(original_names, std::vector<std::string>({"reshape", "squeeze"}));
|
||||
}
|
||||
|
@ -786,7 +786,7 @@ FunctionsComparator::Result FunctionsComparator::compare(
|
||||
}
|
||||
|
||||
void check_rt_info(const std::shared_ptr<ngraph::Function>& f) {
|
||||
static const std::vector<std::string> attrs_to_check{"Variant::RuntimeAttribute::FusedNames"};
|
||||
static const std::vector<std::string> attrs_to_check{"fused_names_0"};
|
||||
|
||||
std::ostringstream err_log;
|
||||
for (auto& op : f->get_ops()) {
|
||||
|
@ -225,24 +225,24 @@ TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriority) {
|
||||
|
||||
TEST(NetworkContext_CNNNetwork, HashWithFusedNames) {
|
||||
auto setFusedEmpty = [&](Node::RTMap& rtInfo) {
|
||||
rtInfo[VariantWrapper<FusedNames>::get_type_info_static().name] =
|
||||
std::make_shared<VariantWrapper<FusedNames>>(FusedNames());
|
||||
rtInfo[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames());
|
||||
};
|
||||
auto setFused = [&](Node::RTMap& rtInfo, const std::string& name) {
|
||||
rtInfo[VariantWrapper<FusedNames>::get_type_info_static().name] =
|
||||
std::make_shared<VariantWrapper<FusedNames>>(FusedNames(name));
|
||||
rtInfo[VariantWrapper<ngraph::FusedNames>::get_type_info_static()] =
|
||||
std::make_shared<VariantWrapper<ngraph::FusedNames>>(ngraph::FusedNames(name));
|
||||
};
|
||||
checkCustomRt(setFusedEmpty, setFused);
|
||||
}
|
||||
|
||||
TEST(NetworkContext_CNNNetwork, HashWithPrimitivesPriorityType) {
|
||||
auto setPrimEmpty = [&](Node::RTMap& rtInfo) {
|
||||
rtInfo[VariantWrapper<PrimitivesPriority>::get_type_info_static().name] =
|
||||
std::make_shared<VariantWrapper<PrimitivesPriority>>(PrimitivesPriority());
|
||||
rtInfo[VariantWrapper<ov::PrimitivesPriority>::get_type_info_static()] =
|
||||
std::make_shared<VariantWrapper<ov::PrimitivesPriority>>(ov::PrimitivesPriority());
|
||||
};
|
||||
auto setPrim = [&](Node::RTMap& rtInfo, const std::string& name) {
|
||||
rtInfo[VariantWrapper<PrimitivesPriority>::get_type_info_static().name] =
|
||||
std::make_shared<VariantWrapper<PrimitivesPriority>>(PrimitivesPriority(name));
|
||||
rtInfo[VariantWrapper<ov::PrimitivesPriority>::get_type_info_static()] =
|
||||
std::make_shared<VariantWrapper<ov::PrimitivesPriority>>(ov::PrimitivesPriority(name));
|
||||
};
|
||||
checkCustomRt(setPrimEmpty, setPrim);
|
||||
}
|
||||
|
@ -213,7 +213,12 @@ class IREngine(object):
|
||||
if layer.attrib['type'] == 'Const':
|
||||
assert 'offset' in new_attrs and 'size' in new_attrs, \
|
||||
'Incorrect attributes for Const layer, {} instead of {}!'.format(new_attrs.keys(), ['offset', 'size'])
|
||||
new_attrs.update(self.__prepare_bin_attrs(layer, 0, 'custom', new_attrs['offset'], new_attrs['size'], layer[1][0].attrib['precision']))
|
||||
precision = ""
|
||||
for item in layer:
|
||||
if item.tag == "output":
|
||||
precision = item[0].attrib["precision"]
|
||||
break
|
||||
new_attrs.update(self.__prepare_bin_attrs(layer, 0, 'custom', new_attrs['offset'], new_attrs['size'], precision))
|
||||
layer_attrs.update(new_attrs)
|
||||
elif attr.tag == 'input':
|
||||
inputs_counter = len(attr)
|
||||
@ -223,7 +228,8 @@ class IREngine(object):
|
||||
port_id = int(port.attrib['id'])
|
||||
output_shape = []
|
||||
for dim in port:
|
||||
output_shape.append(int(dim.text))
|
||||
if dim.tag == "dim":
|
||||
output_shape.append(int(dim.text))
|
||||
|
||||
output_shape = shape_array([d if d != -1 else dynamic_dimension_value for d in output_shape])
|
||||
|
||||
|
@ -54,6 +54,17 @@ struct OPENVINO_API DiscreteTypeInfo {
|
||||
return *this == target_type || (parent && parent->is_castable(target_type));
|
||||
}
|
||||
|
||||
std::string get_version() const {
|
||||
if (version_id) {
|
||||
return std::string(version_id);
|
||||
}
|
||||
return std::to_string(version);
|
||||
}
|
||||
|
||||
operator std::string() const {
|
||||
return std::string(name) + "_" + get_version();
|
||||
}
|
||||
|
||||
// For use as a key
|
||||
bool operator<(const DiscreteTypeInfo& b) const;
|
||||
bool operator<=(const DiscreteTypeInfo& b) const;
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
namespace ov {
|
||||
class Node;
|
||||
class AttributeVisitor;
|
||||
using VariantTypeInfo = DiscreteTypeInfo;
|
||||
|
||||
class OPENVINO_API Variant {
|
||||
@ -27,6 +28,11 @@ public:
|
||||
virtual std::string to_string() {
|
||||
return "";
|
||||
}
|
||||
virtual bool visit_attributes(AttributeVisitor&) {
|
||||
return false;
|
||||
}
|
||||
|
||||
using type_info_t = DiscreteTypeInfo;
|
||||
};
|
||||
|
||||
template <typename VT>
|
||||
@ -34,6 +40,8 @@ class VariantImpl : public Variant {
|
||||
public:
|
||||
using value_type = VT;
|
||||
|
||||
VariantImpl() = default;
|
||||
|
||||
VariantImpl(const value_type& value) : m_value(value) {}
|
||||
|
||||
const value_type& get() const {
|
||||
|
@ -805,7 +805,7 @@ bool ov::Node::evaluate_upper(const HostTensorVector& output_values) const {
|
||||
bool ov::Node::constant_fold(OutputVector& output_values, const OutputVector& input_values) {
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "Node::constant_fold");
|
||||
|
||||
if (m_rt_info.count("DISABLED_CONSTANT_FOLDING")) {
|
||||
if (m_rt_info.count("disabled_constant_folding_0")) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ bool op::v3::ShapeOf::evaluate_upper(const HostTensorVector& output_values) cons
|
||||
|
||||
bool op::v3::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values) {
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "op::v3::ShapeOf::constant_fold");
|
||||
if (get_rt_info().count("DISABLED_CONSTANT_FOLDING"))
|
||||
if (get_rt_info().count("disabled_constant_folding_0"))
|
||||
return false;
|
||||
return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0]);
|
||||
}
|
||||
@ -233,7 +233,7 @@ bool op::v0::ShapeOf::has_evaluate() const {
|
||||
|
||||
bool op::v0::ShapeOf::constant_fold(OutputVector& output_values, const OutputVector& input_values) {
|
||||
OV_ITT_SCOPED_TASK(ov::itt::domains::nGraph, "op::v0::ShapeOf::constant_fold");
|
||||
if (get_rt_info().count("DISABLED_CONSTANT_FOLDING"))
|
||||
if (get_rt_info().count("disabled_constant_folding_0"))
|
||||
return false;
|
||||
return shape_of::constant_fold_shape_of(this, output_values[0], input_values[0]);
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ bool ngraph::pass::ConstantFolding::pre_calculated_values_folding(const std::sha
|
||||
if (status) {
|
||||
for (const auto& node : order) {
|
||||
const auto& rt_info = node->get_rt_info();
|
||||
if (rt_info.count("DISABLED_CONSTANT_FOLDING")) {
|
||||
if (rt_info.count("disabled_constant_folding_0")) {
|
||||
status = false;
|
||||
break;
|
||||
}
|
||||
|
739
ngraph/frontend/ir/src/ir_deserializer.cpp
Normal file
739
ngraph/frontend/ir/src/ir_deserializer.cpp
Normal file
@ -0,0 +1,739 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <xml_parse_utils.h>
|
||||
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include <ir_deserializer.hpp>
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
#include <ngraph_ops/framework_node.hpp>
|
||||
#include <pugixml.hpp>
|
||||
#include <rt_info_deserializer.hpp>
|
||||
#include <transformations/rt_info/attributes.hpp>
|
||||
#include <utils.hpp>
|
||||
|
||||
#include "ir_frontend/model.hpp"
|
||||
|
||||
using namespace ov;
|
||||
|
||||
XmlDeserializer::IoMap XmlDeserializer::updated_io_map(const pugi::xml_node& node) {
|
||||
auto body_node = node.child("body");
|
||||
|
||||
if (body_node.empty()) {
|
||||
IE_THROW() << "Missing body part.";
|
||||
}
|
||||
// Fill map: parameter/result id to parameter/result number in Function
|
||||
|
||||
auto extend_io_map = io_map;
|
||||
|
||||
FOREACH_CHILD (layer, body_node.child("layers"), "layer") {
|
||||
auto type = XMLParseUtils::GetStrAttr(layer, "type");
|
||||
|
||||
if (type == "Parameter") {
|
||||
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
|
||||
extend_io_map.inputs.insert({id, -1}); // try add as unconnected
|
||||
} else if (type == "Result") {
|
||||
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
|
||||
extend_io_map.outputs.insert({id, -1}); // try add as unconnected
|
||||
}
|
||||
}
|
||||
return extend_io_map;
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> XmlDeserializer::parseInputDescription(
|
||||
const pugi::xml_node& node) {
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> inputs;
|
||||
const auto up_io_map = updated_io_map(node);
|
||||
|
||||
// Parse PortMap: external_port_id for inputs does not always appear in consecutive order
|
||||
std::map<uint64_t, pugi::xml_node> input_map;
|
||||
FOREACH_CHILD (input, node.child("port_map"), "input") {
|
||||
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id");
|
||||
input_map.emplace(ext_port_id, input);
|
||||
}
|
||||
|
||||
for (const auto& input : input_map) {
|
||||
auto& xml_input = input.second;
|
||||
auto axis_attr = xml_input.attribute("axis");
|
||||
int64_t ti_input_index = XMLParseUtils::GetInt64Attr(xml_input, "external_port_id");
|
||||
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
|
||||
|
||||
// if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput.
|
||||
if (!axis_attr.empty()) {
|
||||
size_t axis = XMLParseUtils::GetUIntAttr(xml_input, "axis");
|
||||
int64_t start = XMLParseUtils::GetInt64Attr(xml_input, "start", 0);
|
||||
int64_t stride = XMLParseUtils::GetInt64Attr(xml_input, "stride", 1);
|
||||
int64_t end = XMLParseUtils::GetInt64Attr(xml_input, "end", -1);
|
||||
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_input, "part_size", 1);
|
||||
|
||||
const auto input_index = up_io_map.inputs.at(body_parameter_index);
|
||||
|
||||
inputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::SliceInputDescription>(ti_input_index,
|
||||
input_index,
|
||||
start,
|
||||
stride,
|
||||
part_size,
|
||||
end,
|
||||
axis));
|
||||
} else {
|
||||
// otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput
|
||||
bool is_back_edge_exist = false;
|
||||
FOREACH_CHILD (xml_edge, node.child("back_edges"), "edge") {
|
||||
size_t to_layer = XMLParseUtils::GetUIntAttr(xml_edge, "to-layer");
|
||||
|
||||
if (to_layer == body_parameter_index) {
|
||||
size_t from_layer = XMLParseUtils::GetUIntAttr(xml_edge, "from-layer");
|
||||
|
||||
const auto input_index = up_io_map.inputs.at(body_parameter_index);
|
||||
const auto output_index = up_io_map.outputs.at(from_layer);
|
||||
|
||||
inputs.push_back(
|
||||
std::make_shared<ngraph::op::util::SubGraphOp::MergedInputDescription>(ti_input_index,
|
||||
input_index,
|
||||
output_index));
|
||||
|
||||
is_back_edge_exist = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// ti_input_index = -1 means that Parameter of the body is not connected to inputs of
|
||||
// TensorIterator and is used only for internal needs.
|
||||
if (!is_back_edge_exist && ti_input_index >= 0) {
|
||||
const auto input_index = up_io_map.inputs.at(body_parameter_index);
|
||||
|
||||
inputs.push_back(
|
||||
std::make_shared<ngraph::op::util::SubGraphOp::InvariantInputDescription>(ti_input_index,
|
||||
input_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
return inputs;
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> XmlDeserializer::parseOutputDescription(
|
||||
const pugi::xml_node& node) {
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> outputs;
|
||||
const auto up_io_map = updated_io_map(node);
|
||||
|
||||
// Parse PortMap: outputs
|
||||
std::map<int64_t, pugi::xml_node> output_map;
|
||||
FOREACH_CHILD (output, node.child("port_map"), "output") {
|
||||
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id");
|
||||
output_map.emplace(ext_port_id, output);
|
||||
}
|
||||
|
||||
uint64_t output_number = 0;
|
||||
for (const auto& output : output_map) {
|
||||
auto& xml_output = output.second;
|
||||
auto axis_attr = xml_output.attribute("axis");
|
||||
size_t body_result_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
|
||||
|
||||
// if external_port_id < 0 it means that this body result isn't connected to the Loop output
|
||||
// and is used only for internal needs. For TensorIterator external_port_id is always > 0.
|
||||
if (XMLParseUtils::GetInt64Attr(xml_output, "external_port_id") >= 0) {
|
||||
// if axis is set, then concatenation is enabled. Create
|
||||
// ngraph::TensorIterator::ConcatOutput.
|
||||
if (!axis_attr.empty()) {
|
||||
int64_t axis = XMLParseUtils::GetInt64Attr(xml_output, "axis");
|
||||
int64_t start = XMLParseUtils::GetInt64Attr(xml_output, "start", 0);
|
||||
int64_t stride = XMLParseUtils::GetInt64Attr(xml_output, "stride", 1);
|
||||
int64_t end = XMLParseUtils::GetInt64Attr(xml_output, "end", -1);
|
||||
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_output, "part_size", 1);
|
||||
|
||||
const auto output_index = up_io_map.outputs.at(body_result_index);
|
||||
|
||||
outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::ConcatOutputDescription>(output_index,
|
||||
output_number,
|
||||
start,
|
||||
stride,
|
||||
part_size,
|
||||
end,
|
||||
axis));
|
||||
} else {
|
||||
// otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration.
|
||||
const auto output_index = up_io_map.outputs.at(body_result_index);
|
||||
|
||||
outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::BodyOutputDescription>(output_index,
|
||||
output_number,
|
||||
-1));
|
||||
}
|
||||
output_number++;
|
||||
}
|
||||
}
|
||||
return outputs;
|
||||
}
|
||||
|
||||
ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(const pugi::xml_node& node) {
|
||||
ngraph::op::v5::Loop::SpecialBodyPorts result = {-1, -1};
|
||||
const auto up_io_map = updated_io_map(node);
|
||||
|
||||
NGRAPH_CHECK(!up_io_map.inputs.empty() || !up_io_map.outputs.empty(),
|
||||
"No parameters or results found in body Function.");
|
||||
|
||||
// Parse PortMap: external_port_id for inputs/outputs does not always appear in consecutive
|
||||
// order
|
||||
std::map<uint64_t, pugi::xml_node> input_map;
|
||||
FOREACH_CHILD (input, node.child("port_map"), "input") {
|
||||
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id");
|
||||
input_map.emplace(ext_port_id, input);
|
||||
}
|
||||
std::map<int64_t, pugi::xml_node> output_map;
|
||||
FOREACH_CHILD (output, node.child("port_map"), "output") {
|
||||
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id");
|
||||
output_map.emplace(ext_port_id, output);
|
||||
}
|
||||
|
||||
for (const auto& input : input_map) {
|
||||
auto& xml_input = input.second;
|
||||
auto purpose = XMLParseUtils::GetStrAttr(xml_input, "purpose", "");
|
||||
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
|
||||
if (purpose == "current_iteration") {
|
||||
result.current_iteration_input_idx = up_io_map.inputs.at(body_parameter_index);
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& output : output_map) {
|
||||
auto& xml_output = output.second;
|
||||
auto purpose = XMLParseUtils::GetStrAttr(xml_output, "purpose", "");
|
||||
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
|
||||
if (purpose == "execution_condition") {
|
||||
result.body_condition_output_idx = up_io_map.outputs.at(body_parameter_index);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) {
|
||||
static const std::unordered_set<std::string> skip_names = {"input_descriptions",
|
||||
"output_descriptions",
|
||||
"special_body_ports"};
|
||||
std::string val;
|
||||
|
||||
// for TensorIterator look for 'port_map' as 'data' does not exist
|
||||
if (m_node.child("port_map")) {
|
||||
if (auto a = ngraph::as_type<
|
||||
ngraph::AttributeAdapter<std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>>>>(
|
||||
&adapter)) {
|
||||
a->set(parseInputDescription(m_node));
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>>>>(&adapter)) {
|
||||
a->set(parseOutputDescription(m_node));
|
||||
} else if (auto a =
|
||||
ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::v5::Loop::SpecialBodyPorts>>(&adapter)) {
|
||||
a->set(parsePurposeAttribute(m_node));
|
||||
}
|
||||
}
|
||||
|
||||
if (skip_names.count(name) && !getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::element::Type>>(&adapter)) {
|
||||
static_cast<ngraph::element::Type&>(*a) = InferenceEngine::details::convertPrecision(val);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::PartialShape>>(&adapter)) {
|
||||
std::vector<int64_t> shape;
|
||||
std::vector<ngraph::Dimension> dims;
|
||||
if (!getParameters<int64_t>(m_node.child("data"), name, shape))
|
||||
return;
|
||||
for (const auto& dim : shape)
|
||||
dims.emplace_back(dim);
|
||||
static_cast<ngraph::PartialShape&>(*a) = ngraph::PartialShape(dims);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::Shape>>(&adapter)) {
|
||||
std::vector<size_t> shape;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, shape))
|
||||
return;
|
||||
static_cast<ngraph::Shape&>(*a) = ngraph::Shape(shape);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::Strides>>(&adapter)) {
|
||||
std::vector<size_t> shape;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, shape))
|
||||
return;
|
||||
static_cast<ngraph::Strides&>(*a) = ngraph::Strides(shape);
|
||||
#ifdef __APPLE__
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::vector<size_t>>>(&adapter)) {
|
||||
std::vector<size_t> result;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, result))
|
||||
return;
|
||||
static_cast<std::vector<size_t>&>(*a) = result;
|
||||
#else
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::vector<size_t>>>(&adapter)) {
|
||||
std::vector<size_t> result;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, result))
|
||||
return;
|
||||
a->set(result);
|
||||
#endif
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::AxisSet>>(&adapter)) {
|
||||
std::vector<size_t> axes;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, axes))
|
||||
return;
|
||||
static_cast<ngraph::AxisSet&>(*a) = ngraph::AxisSet(axes);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::TopKSortType>>(&adapter)) {
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
static_cast<ngraph::op::TopKSortType&>(*a) = ngraph::as_enum<ngraph::op::TopKSortType>(val);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::TopKMode>>(&adapter)) {
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
static_cast<ngraph::op::TopKMode&>(*a) = ngraph::as_enum<ngraph::op::TopKMode>(val);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::CoordinateDiff>>(&adapter)) {
|
||||
std::vector<size_t> shape;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, shape))
|
||||
return;
|
||||
std::vector<std::ptrdiff_t> coord_diff(shape.begin(), shape.end());
|
||||
static_cast<ngraph::CoordinateDiff&>(*a) = ngraph::CoordinateDiff(coord_diff);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::shared_ptr<ngraph::Variable>>>(&adapter)) {
|
||||
std::string variable_id;
|
||||
if (!getStrAttribute(m_node.child("data"), name, variable_id))
|
||||
return;
|
||||
if (!m_variables.count(variable_id)) {
|
||||
m_variables[variable_id] = std::make_shared<ngraph::Variable>(
|
||||
ngraph::VariableInfo{ngraph::PartialShape::dynamic(), ngraph::element::dynamic, variable_id});
|
||||
}
|
||||
a->set(m_variables[variable_id]);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::shared_ptr<ngraph::runtime::AlignedBuffer>>>(
|
||||
&adapter)) {
|
||||
std::string value;
|
||||
pugi::xml_node dn = m_node.child("data");
|
||||
auto type = XMLParseUtils::GetStrAttr(m_node, "type");
|
||||
|
||||
if (dn.empty())
|
||||
IE_THROW() << "No attrtibutes defined for " << type << " op!";
|
||||
|
||||
if (getStrAttribute(dn, name, value)) {
|
||||
auto buffer = std::make_shared<ngraph::runtime::AlignedBuffer>(value.size());
|
||||
auto data = static_cast<char*>(buffer->get_ptr());
|
||||
value.copy(data, value.size());
|
||||
a->set(buffer);
|
||||
} else if (name == "value" && type == "Const") {
|
||||
std::vector<int64_t> shape;
|
||||
std::string el_type_str;
|
||||
|
||||
size_t offset = XMLParseUtils::GetUInt64Attr(dn, "offset");
|
||||
size_t size = XMLParseUtils::GetUInt64Attr(dn, "size");
|
||||
if (!getStrAttribute(dn, "element_type", el_type_str))
|
||||
return;
|
||||
if (!getParameters<int64_t>(dn, "shape", shape))
|
||||
return;
|
||||
|
||||
ngraph::element::Type el_type = InferenceEngine::details::convertPrecision(el_type_str);
|
||||
|
||||
if (!m_weights)
|
||||
IE_THROW() << "Empty weights data in bin file or bin file cannot be found!";
|
||||
if (m_weights->size() < offset + size)
|
||||
IE_THROW() << "Incorrect weights in bin file!";
|
||||
if (size < std::ceil(ngraph::shape_size(shape) * el_type.bitwidth() / 8.f))
|
||||
IE_THROW() << "Attribute and shape size are inconsistent for " << type << " op!";
|
||||
|
||||
char* data = m_weights->get_ptr<char>() + offset;
|
||||
auto buffer =
|
||||
std::make_shared<ngraph::runtime::SharedBuffer<std::shared_ptr<ngraph::runtime::AlignedBuffer>>>(
|
||||
data,
|
||||
size,
|
||||
m_weights);
|
||||
a->set(buffer);
|
||||
}
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::FrameworkNodeAttrs>>(&adapter)) {
|
||||
const auto& type = XMLParseUtils::GetStrAttr(m_node, "type");
|
||||
const auto& version = XMLParseUtils::GetStrAttr(m_node, "version");
|
||||
|
||||
ngraph::op::FrameworkNodeAttrs node_attrs;
|
||||
node_attrs.set_opset_name(version);
|
||||
node_attrs.set_type_name(type);
|
||||
|
||||
pugi::xml_node dn = m_node.child("data");
|
||||
|
||||
if (!dn.empty()) {
|
||||
for (const auto& data_attr : dn.attributes()) {
|
||||
node_attrs[data_attr.name()] = data_attr.as_string();
|
||||
}
|
||||
}
|
||||
|
||||
a->set(node_attrs);
|
||||
} else if (const auto& a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::element::TypeVector>>(&adapter)) {
|
||||
ngraph::element::TypeVector types;
|
||||
if (!getParameters<ngraph::element::Type>(m_node.child("data"), name, types))
|
||||
return;
|
||||
a->set(types);
|
||||
} else {
|
||||
IE_THROW() << "Error IR reading. Attribute adapter can not be found for " << name << " parameter";
|
||||
}
|
||||
}
|
||||
|
||||
void XmlDeserializer::on_adapter(const std::string& name,
|
||||
ngraph::ValueAccessor<std::shared_ptr<ngraph::Function>>& adapter) {
|
||||
std::shared_ptr<ngraph::Function> ngraph_function;
|
||||
if (!name.compare("body")) {
|
||||
auto body_node = m_node.child(name.c_str());
|
||||
if (body_node.empty()) {
|
||||
IE_THROW() << "TensorIterator has no body.";
|
||||
}
|
||||
ngraph_function = parse_function(m_node.child(name.c_str()), m_weights);
|
||||
} else if (!name.compare("net")) {
|
||||
ngraph_function = parse_function(m_node, m_weights);
|
||||
} else {
|
||||
IE_THROW() << "Error: not recognized adapter name: " << name << ".";
|
||||
}
|
||||
adapter.set(ngraph_function);
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Function> XmlDeserializer::parse_function(const pugi::xml_node& root,
|
||||
const ov::Weights& weights) {
|
||||
// OV_ITT_SCOPE_CHAIN(FIRST_INFERENCE, taskChain, itt::domains::V10Reader_RT, "V10Parser", "Parse");
|
||||
|
||||
struct FunctionNodes {
|
||||
ngraph::ParameterVector parameters;
|
||||
ngraph::ResultVector results;
|
||||
ngraph::NodeVector all;
|
||||
ngraph::SinkVector sinks;
|
||||
};
|
||||
|
||||
struct edge {
|
||||
size_t fromLayerId, fromPortId, toPortId;
|
||||
};
|
||||
struct node_params {
|
||||
pugi::xml_node xml;
|
||||
GenericLayerParams params;
|
||||
};
|
||||
|
||||
std::map<size_t /*layer-id*/, node_params> params;
|
||||
|
||||
std::vector<size_t /*layer-id*/> outputs;
|
||||
std::unordered_set<std::string> opName;
|
||||
|
||||
// Read all layers and store their parameters in params map
|
||||
FOREACH_CHILD (node, root.child("layers"), "layer") {
|
||||
auto node_param = parseGenericParams(node);
|
||||
if (opName.find(node_param.name) != opName.end() && node_param.type != "Result")
|
||||
IE_THROW() << "Invalid IR! " << node_param.name << " name is not unique!";
|
||||
opName.insert(node_param.name);
|
||||
params[node_param.layerId] = {node, node_param};
|
||||
if (node_param.type == "Result" || node_param.type == "Assign") {
|
||||
outputs.push_back(node_param.layerId);
|
||||
}
|
||||
}
|
||||
|
||||
std::map<size_t /*to-layer-id*/, std::vector<edge>> edges;
|
||||
std::map<size_t, std::shared_ptr<ngraph::Node>> id_to_node;
|
||||
|
||||
// Read all edges and store them for further usage
|
||||
FOREACH_CHILD (_ec, root.child("edges"), "edge") {
|
||||
size_t fromLayer = XMLParseUtils::GetUIntAttr(_ec, "from-layer");
|
||||
size_t fromPort = XMLParseUtils::GetUIntAttr(_ec, "from-port");
|
||||
size_t toLayer = XMLParseUtils::GetUIntAttr(_ec, "to-layer");
|
||||
size_t toPort = XMLParseUtils::GetUIntAttr(_ec, "to-port");
|
||||
edges[toLayer].push_back({fromLayer, fromPort, toPort});
|
||||
}
|
||||
|
||||
// Run DFS starting from outputs to get nodes topological order
|
||||
std::set<size_t> used;
|
||||
std::vector<size_t> order;
|
||||
std::function<void(size_t)> dfs = [&edges, &order, &used, &dfs](const size_t id) {
|
||||
if (used.count(id))
|
||||
return;
|
||||
used.insert(id);
|
||||
for (auto& edge : edges[id]) {
|
||||
dfs(edge.fromLayerId);
|
||||
}
|
||||
order.push_back(id);
|
||||
};
|
||||
std::for_each(outputs.begin(), outputs.end(), dfs);
|
||||
|
||||
// OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "ConstructNgraphNodes");
|
||||
|
||||
FunctionNodes func_nodes;
|
||||
|
||||
std::map<std::string, std::shared_ptr<ngraph::Node>> variable_id_to_read_value;
|
||||
|
||||
// Following topological order create nGraph operations
|
||||
for (auto& layer_id : order) {
|
||||
auto& p = params[layer_id];
|
||||
const auto& edgeIt = edges.find(layer_id);
|
||||
if (edgeIt == edges.end())
|
||||
continue;
|
||||
ngraph::OutputVector inputs(edgeIt->second.size());
|
||||
for (auto& e : edgeIt->second) {
|
||||
auto input_node = id_to_node[e.fromLayerId];
|
||||
if (!input_node) {
|
||||
IE_THROW() << "Attempt to access node " << e.fromLayerId << " that not in graph.";
|
||||
}
|
||||
auto& p_output = params[e.fromLayerId].params;
|
||||
size_t const realInputPortId = p.params.getRealInputPortId(e.toPortId);
|
||||
if (realInputPortId >= inputs.size())
|
||||
IE_THROW() << p.params.type << " layer " << p.params.name << " with id: " << p.params.layerId
|
||||
<< " is inconsistent!";
|
||||
inputs[realInputPortId] = input_node->output(p_output.getRealOutputPortId(e.fromPortId));
|
||||
}
|
||||
|
||||
auto node = createNode(inputs, p.xml, weights, p.params);
|
||||
id_to_node[layer_id] = node;
|
||||
|
||||
// Check that output shape after nGraph node validation the same as in IR
|
||||
// because IR always right!
|
||||
// Temporary disabled!
|
||||
// for (size_t i = 0; i < p.params.outputPorts.size(); ++i) {
|
||||
// if (p.params.outputPorts[i].dims != node->output(i).get_shape()) {
|
||||
// IE_THROW() << "Shape after nGraph infer " <<
|
||||
// details::dumpVec(node->output(i).get_shape())
|
||||
// << " differ from IR shapes: " <<
|
||||
// details::dumpVec(p.params.outputPorts[i].dims);
|
||||
// }
|
||||
// }
|
||||
|
||||
if (const auto& parameter_node = std::dynamic_pointer_cast<ngraph::op::Parameter>(node)) {
|
||||
io_map.inputs.insert({layer_id, func_nodes.parameters.size()});
|
||||
func_nodes.parameters.emplace_back(parameter_node);
|
||||
}
|
||||
|
||||
if (const auto& result_node = std::dynamic_pointer_cast<ngraph::op::Result>(node)) {
|
||||
io_map.outputs.insert({layer_id, func_nodes.results.size()});
|
||||
func_nodes.results.emplace_back(result_node);
|
||||
}
|
||||
|
||||
if (const auto& sink = std::dynamic_pointer_cast<ngraph::op::Sink>(node)) {
|
||||
func_nodes.sinks.emplace_back(sink);
|
||||
}
|
||||
|
||||
if (const auto& read_value = std::dynamic_pointer_cast<ngraph::op::ReadValueBase>(node)) {
|
||||
variable_id_to_read_value[read_value->get_variable_id()] = read_value;
|
||||
}
|
||||
|
||||
func_nodes.all.emplace_back(node);
|
||||
}
|
||||
|
||||
// OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "ConstructNgraphFunction");
|
||||
|
||||
auto function = std::make_shared<ngraph::Function>(func_nodes.results,
|
||||
func_nodes.sinks,
|
||||
func_nodes.parameters,
|
||||
XMLParseUtils::GetStrAttr(root, "name", ""));
|
||||
for (const auto& sink : func_nodes.sinks) {
|
||||
if (const auto& assign = std::dynamic_pointer_cast<ngraph::op::AssignBase>(sink)) {
|
||||
assign->add_control_dependency(variable_id_to_read_value.at(assign->get_variable_id()));
|
||||
}
|
||||
}
|
||||
|
||||
return function;
|
||||
}
|
||||
|
||||
GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& node) {
|
||||
const auto parsePort = [this](const pugi::xml_node& parentNode,
|
||||
const GenericLayerParams& params,
|
||||
bool input) -> GenericLayerParams::LayerPortData {
|
||||
GenericLayerParams::LayerPortData port;
|
||||
|
||||
port.portId = XMLParseUtils::GetIntAttr(parentNode, "id");
|
||||
|
||||
FOREACH_CHILD (node, parentNode, "dim") {
|
||||
int64_t dim = 0;
|
||||
const pugi::char_t* dimVal = node.child_value();
|
||||
std::stringstream ss(dimVal);
|
||||
if (!(ss >> dim) || dim < -1) {
|
||||
IE_THROW() << "dimension (" << dimVal << ") in node " << node.name()
|
||||
<< " must be greater or equal to -1: at offset " << node.offset_debug();
|
||||
}
|
||||
port.dims.push_back(dim);
|
||||
}
|
||||
|
||||
ngraph::element::Type type(ngraph::element::Type_t::undefined);
|
||||
// Input port hasn't precision
|
||||
if (!input) {
|
||||
const std::string& preStr = XMLParseUtils::GetStrAttr(parentNode, "precision");
|
||||
type = InferenceEngine::details::convertPrecision(preStr);
|
||||
}
|
||||
port.precision = type;
|
||||
std::vector<std::string> names;
|
||||
if (getParameters<std::string>(parentNode, "names", names)) {
|
||||
for (size_t i = 0; i < names.size(); i++) {
|
||||
std::string name = names[i];
|
||||
// Restore original name if it contains delimiter
|
||||
// getParameters(...) returns the vector of names which were split by delimiter ','
|
||||
// but some names can contain ',' as a part of name, in this case we use '\' to
|
||||
// escape delimiter the cycle below is needed in order to find names which contained
|
||||
// delimiter and restore the original name
|
||||
while (i < names.size() && names[i].at(names[i].length() - 1) == '\\') {
|
||||
name.replace(names[i].length() - 1, 1, ",");
|
||||
name += names[++i];
|
||||
}
|
||||
port.names.emplace(name);
|
||||
}
|
||||
}
|
||||
return port;
|
||||
};
|
||||
GenericLayerParams params;
|
||||
|
||||
params.layerId = XMLParseUtils::GetIntAttr(node, "id");
|
||||
params.version = XMLParseUtils::GetStrAttr(node, "version");
|
||||
|
||||
params.type = XMLParseUtils::GetStrAttr(node, "type");
|
||||
|
||||
params.name = XMLParseUtils::GetStrAttr(node, "name");
|
||||
|
||||
auto outNode = node.child("output");
|
||||
if (!outNode.empty()) {
|
||||
FOREACH_CHILD (_cn, outNode, "port") { params.outputPorts.emplace_back(parsePort(_cn, params, false)); }
|
||||
}
|
||||
auto inpNode = node.child("input");
|
||||
if (!inpNode.empty()) {
|
||||
FOREACH_CHILD (_cn, inpNode, "port") { params.inputPorts.emplace_back(parsePort(_cn, params, true)); }
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(const std::vector<ngraph::Output<ngraph::Node>>& inputs,
|
||||
const pugi::xml_node& node,
|
||||
const ov::Weights& weights,
|
||||
const GenericLayerParams& params) {
|
||||
// Check that inputs are correctly defined
|
||||
for (size_t i = 0; i < inputs.size(); i++) {
|
||||
if (!inputs[i].get_node())
|
||||
IE_THROW() << params.type << " layer " << params.name << " with id: " << params.layerId
|
||||
<< " has incorrect input with index " << i << "!";
|
||||
if (ngraph::element::Type_t::undefined == inputs[i].get_element_type())
|
||||
IE_THROW() << params.type << " layer " << params.name << " with id: " << params.layerId
|
||||
<< " has undefined element type for input with index " << i << "!";
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Node> ngraphNode;
|
||||
|
||||
// Find registered opset
|
||||
auto opsetIt = m_opsets.find(params.version);
|
||||
|
||||
// Try to create operation from loaded opsets
|
||||
static const std::unordered_set<std::string> experimental_ops_added_to_opset = {
|
||||
"ExperimentalDetectronDetectionOutput",
|
||||
"ExperimentalDetectronGenerateProposalsSingleImage",
|
||||
"ExperimentalDetectronPriorGridGenerator",
|
||||
"ExperimentalDetectronROIFeatureExtractor",
|
||||
"ExperimentalDetectronTopKROIs",
|
||||
"GRUCell",
|
||||
"RNNCell",
|
||||
"Proposal"};
|
||||
|
||||
if (experimental_ops_added_to_opset.count(params.type) &&
|
||||
(params.version == "experimental" || params.version == "extension")) {
|
||||
opsetIt = m_opsets.find("opset6");
|
||||
}
|
||||
|
||||
if (!ngraphNode && opsetIt != m_opsets.end()) {
|
||||
auto const& type = params.type == "Const" ? "Constant" : params.type;
|
||||
|
||||
if (params.version == "opset1") {
|
||||
// MVN, ROIPooling and ReorgYolo were missing in opset1
|
||||
if (type == "MVN" || type == "ROIPooling" || type == "ReorgYolo") {
|
||||
opsetIt = m_opsets.find("opset2");
|
||||
if (opsetIt == m_opsets.end()) {
|
||||
IE_THROW() << "Cannot create " << params.type << " layer " << params.name
|
||||
<< " id:" << params.layerId << " from unsupported opset: " << params.version;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto const& opset = opsetIt->second;
|
||||
|
||||
ngraphNode = std::shared_ptr<ngraph::Node>(opset.create_insensitive(type));
|
||||
if (!ngraphNode) {
|
||||
IE_THROW() << "Opset " << params.version << " doesn't contain the operation with type: " << type;
|
||||
}
|
||||
// Share Weights form constant blob
|
||||
if (auto constant = std::dynamic_pointer_cast<ngraph::op::Constant>(ngraphNode)) {
|
||||
constant->alloc_buffer_on_visit_attributes(false);
|
||||
}
|
||||
ngraphNode->set_arguments(inputs);
|
||||
XmlDeserializer visitor(node, weights, m_opsets, m_variables);
|
||||
|
||||
if (ngraphNode->visit_attributes(visitor)) {
|
||||
ngraphNode->constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
// To be sure that all default values will be initialized:
|
||||
ngraphNode = ngraphNode->clone_with_new_inputs(ngraphNode->input_values());
|
||||
}
|
||||
|
||||
if (!ngraphNode && m_use_framework_node) {
|
||||
ngraphNode = std::make_shared<ngraph::op::FrameworkNode>(inputs);
|
||||
XmlDeserializer visitor(node, weights, m_opsets, m_variables);
|
||||
ngraphNode->visit_attributes(visitor);
|
||||
|
||||
size_t index{0};
|
||||
for (const auto& output_params : params.outputPorts) {
|
||||
ngraphNode->set_output_type(index, output_params.precision, ngraph::PartialShape(output_params.dims));
|
||||
++index;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ngraphNode) {
|
||||
IE_THROW() << "Cannot create " << params.type << " layer " << params.name << " id:" << params.layerId
|
||||
<< " from unsupported opset: " << params.version;
|
||||
}
|
||||
|
||||
// Save run time info
|
||||
auto& rtInfo = ngraphNode->get_rt_info();
|
||||
pugi::xml_node dn = node.child("data");
|
||||
if (dn) {
|
||||
const auto pr_data = dn.attribute("PrimitivesPriority");
|
||||
if (pr_data) {
|
||||
rtInfo["PrimitivesPriority"] = std::make_shared<::ngraph::VariantWrapper<std::string>>(pr_data.value());
|
||||
}
|
||||
const auto aw_data = dn.attribute("alt_width");
|
||||
if (aw_data) {
|
||||
rtInfo["alt_width"] = std::make_shared<::ngraph::VariantWrapper<std::string>>(aw_data.value());
|
||||
}
|
||||
}
|
||||
|
||||
ngraphNode->set_friendly_name(params.name);
|
||||
for (size_t i = 0; i < params.outputPorts.size() && i < ngraphNode->get_output_size(); ++i) {
|
||||
if (!params.outputPorts[i].names.empty())
|
||||
ngraphNode->get_output_tensor(i).set_names(params.outputPorts[i].names);
|
||||
}
|
||||
|
||||
ov::pass::Attributes attrs_factory;
|
||||
auto set_runtime_info = [&attrs_factory](RTMap& rt_info, const pugi::xml_node& rt_attrs) {
|
||||
if (!rt_attrs)
|
||||
return;
|
||||
for (const auto& item : rt_attrs) {
|
||||
std::string attribute_name, attribute_version;
|
||||
if (!getStrAttribute(item, "name", attribute_name)) {
|
||||
IE_THROW() << "rt_info attribute has no \"name\" field";
|
||||
}
|
||||
if (!getStrAttribute(item, "version", attribute_version)) {
|
||||
IE_THROW() << "rt_info attribute: " << attribute_name << " has no \"version\" field";
|
||||
}
|
||||
const auto& type_info = ov::DiscreteTypeInfo(attribute_name.c_str(), 0, attribute_version.c_str());
|
||||
if (auto attr = attrs_factory.create_by_type_info(type_info)) {
|
||||
RTInfoDeserializer attribute_visitor(item);
|
||||
if (attr->visit_attributes(attribute_visitor)) {
|
||||
rt_info[type_info] = std::shared_ptr<Variant>(attr);
|
||||
} else {
|
||||
IE_THROW() << "VisitAttributes is not supported for: " << item.name() << " attribute";
|
||||
}
|
||||
} else {
|
||||
IE_THROW() << "Attribute: " << item.name() << " is not recognized";
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// set node runtime info attributes
|
||||
set_runtime_info(ngraphNode->get_rt_info(), node.child("rt_info"));
|
||||
|
||||
// set output ports runtime info attributes
|
||||
auto out_node = node.child("output");
|
||||
if (!out_node.empty()) {
|
||||
size_t index{0};
|
||||
FOREACH_CHILD (rt_node, out_node, "port") {
|
||||
set_runtime_info(ngraphNode->output(index).get_rt_info(), rt_node.child("rt_info"));
|
||||
++index;
|
||||
}
|
||||
}
|
||||
|
||||
// set input ports runtime info attributes
|
||||
auto in_node = node.child("input");
|
||||
if (!in_node.empty()) {
|
||||
size_t index{0};
|
||||
FOREACH_CHILD (rt_node, in_node, "port") {
|
||||
set_runtime_info(ngraphNode->input(index).get_rt_info(), rt_node.child("rt_info"));
|
||||
++index;
|
||||
}
|
||||
}
|
||||
|
||||
return ngraphNode;
|
||||
}
|
191
ngraph/frontend/ir/src/ir_deserializer.hpp
Normal file
191
ngraph/frontend/ir/src/ir_deserializer.hpp
Normal file
@ -0,0 +1,191 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <xml_parse_utils.h>
|
||||
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include <ir_frontend/model.hpp>
|
||||
#include <istream>
|
||||
#include <memory>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <pugixml.hpp>
|
||||
#include <utils.hpp>
|
||||
|
||||
namespace ov {
|
||||
struct GenericLayerParams {
|
||||
struct LayerPortData {
|
||||
size_t portId;
|
||||
std::vector<ngraph::Dimension> dims;
|
||||
ngraph::element::Type_t precision;
|
||||
std::unordered_set<std::string> names;
|
||||
};
|
||||
size_t layerId;
|
||||
std::string version;
|
||||
std::string name;
|
||||
std::string type;
|
||||
std::vector<LayerPortData> inputPorts;
|
||||
std::vector<LayerPortData> outputPorts;
|
||||
|
||||
size_t getRealInputPortId(size_t id) const {
|
||||
size_t real_id = 0;
|
||||
for (auto& it : inputPorts) {
|
||||
if (it.portId == id) {
|
||||
return real_id;
|
||||
}
|
||||
++real_id;
|
||||
}
|
||||
IE_THROW() << "Can not find input port with id " << id << " in layer " << name;
|
||||
}
|
||||
|
||||
size_t getRealOutputPortId(size_t id) const {
|
||||
size_t real_id = 0;
|
||||
for (auto& it : outputPorts) {
|
||||
if (it.portId == id) {
|
||||
return real_id;
|
||||
}
|
||||
++real_id;
|
||||
}
|
||||
IE_THROW() << "Can not find output port with id " << id << " in layer " << name;
|
||||
}
|
||||
};
|
||||
|
||||
class XmlDeserializer : public ngraph::AttributeVisitor {
|
||||
public:
|
||||
explicit XmlDeserializer(const pugi::xml_node& node,
|
||||
const ov::Weights& weights,
|
||||
const std::unordered_map<std::string, ngraph::OpSet>& opsets,
|
||||
std::unordered_map<std::string, std::shared_ptr<ngraph::Variable>>& variables)
|
||||
: m_node(node),
|
||||
m_weights(weights),
|
||||
m_opsets(opsets),
|
||||
m_variables(variables) {}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::string>& value) override {
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
value.set(val);
|
||||
}
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<bool>& value) override {
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
std::transform(val.begin(), val.end(), val.begin(), [](char ch) {
|
||||
return std::tolower(static_cast<unsigned char>(ch));
|
||||
});
|
||||
std::set<std::string> true_names{"true", "1"};
|
||||
std::set<std::string> false_names{"false", "0"};
|
||||
|
||||
bool is_true = true_names.find(val) != true_names.end();
|
||||
bool is_false = false_names.find(val) != false_names.end();
|
||||
|
||||
if (!is_true && !is_false)
|
||||
return;
|
||||
value.set(is_true);
|
||||
}
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) override;
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<double>& adapter) override {
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
adapter.set(stringToType<double>(val));
|
||||
}
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<int64_t>& adapter) override {
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
adapter.set(stringToType<int64_t>(val));
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name,
|
||||
ngraph::ValueAccessor<std::shared_ptr<ngraph::Function>>& adapter) override;
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int32_t>>& adapter) override {
|
||||
std::vector<int32_t> value;
|
||||
if (!getParameters<int32_t>(m_node.child("data"), name, value))
|
||||
return;
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int64_t>>& adapter) override {
|
||||
std::vector<int64_t> value;
|
||||
if (!getParameters<int64_t>(m_node.child("data"), name, value))
|
||||
return;
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<float>>& adapter) override {
|
||||
std::vector<float> value;
|
||||
if (!getParameters<float>(m_node.child("data"), name, value))
|
||||
return;
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<std::string>>& adapter) override {
|
||||
std::vector<std::string> value;
|
||||
if (!getParameters<std::string>(m_node.child("data"), name, value))
|
||||
return;
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void use_framework_node(bool flag) {
|
||||
m_use_framework_node = flag;
|
||||
}
|
||||
|
||||
private:
|
||||
struct IoMap {
|
||||
using NodeIdToIoIndex = std::unordered_map<size_t /*xml node id*/, uint64_t /*body io index*/>;
|
||||
NodeIdToIoIndex inputs;
|
||||
NodeIdToIoIndex outputs;
|
||||
};
|
||||
|
||||
/// \brief Traverses port_map in order to create vector of InputDescription shared_ptrs.
|
||||
/// Shall be used only for ops which have port_map attribute.
|
||||
/// \param node xml op representation
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> parseInputDescription(
|
||||
const pugi::xml_node& node);
|
||||
/// \brief Traverses port_map in order to create vector of OutputDescription shared_ptrs.
|
||||
/// Shall be used only for ops which have port_map attribute.
|
||||
/// \param node xml op representation
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> parseOutputDescription(
|
||||
const pugi::xml_node& node);
|
||||
|
||||
// TODO consider to call only once per layer/TI-Loop node
|
||||
IoMap updated_io_map(const pugi::xml_node& node);
|
||||
|
||||
/// \brief Traverses xml node representation in order to create nGraph function for it.
|
||||
/// \param node xml node representation
|
||||
/// \param weights weights attached to current node
|
||||
/// \return shared pointer to function representing input node
|
||||
std::shared_ptr<ngraph::Function> parse_function(const pugi::xml_node& root, const ov::Weights& weights);
|
||||
/// \brief Traverses xml node representation in order to get the purpose attribute of
|
||||
/// inputs/outputs in the body of Loop op. \param node xml node representation \return struct
|
||||
/// with value of purpuse attribute
|
||||
ngraph::op::v5::Loop::SpecialBodyPorts parsePurposeAttribute(const pugi::xml_node& node);
|
||||
|
||||
GenericLayerParams parseGenericParams(const pugi::xml_node& node);
|
||||
|
||||
std::shared_ptr<ngraph::Node> createNode(const ngraph::OutputVector& inputs,
|
||||
const pugi::xml_node& node,
|
||||
const ov::Weights& weights,
|
||||
const GenericLayerParams& params);
|
||||
|
||||
// -- DATA --
|
||||
const pugi::xml_node m_node;
|
||||
const ov::Weights& m_weights;
|
||||
const std::unordered_map<std::string, ngraph::OpSet>& m_opsets;
|
||||
std::unordered_map<std::string, std::shared_ptr<ngraph::Variable>>& m_variables;
|
||||
|
||||
///
|
||||
/// store information about parameters/results order during function creation
|
||||
/// it will be used during Inputs/Outputs Description creation in SubGraph processing
|
||||
///
|
||||
IoMap io_map;
|
||||
|
||||
bool m_use_framework_node{false};
|
||||
};
|
||||
} // namespace ov
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include <xml_parse_utils.h>
|
||||
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include <ir_deserializer.hpp>
|
||||
#include <ngraph/opsets/opset1.hpp>
|
||||
#include <ngraph_ops/framework_node.hpp>
|
||||
#include <pugixml.hpp>
|
||||
@ -15,893 +15,6 @@ using namespace ngraph;
|
||||
using namespace InferenceEngine;
|
||||
|
||||
namespace {
|
||||
|
||||
struct GenericLayerParams {
|
||||
struct LayerPortData {
|
||||
size_t portId;
|
||||
std::vector<ngraph::Dimension> dims;
|
||||
ngraph::element::Type_t precision;
|
||||
std::unordered_set<std::string> names;
|
||||
};
|
||||
size_t layerId;
|
||||
std::string version;
|
||||
std::string name;
|
||||
std::string type;
|
||||
std::vector<LayerPortData> inputPorts;
|
||||
std::vector<LayerPortData> outputPorts;
|
||||
|
||||
size_t getRealInputPortId(size_t id) const {
|
||||
size_t real_id = 0;
|
||||
for (auto& it : inputPorts) {
|
||||
if (it.portId == id) {
|
||||
return real_id;
|
||||
}
|
||||
++real_id;
|
||||
}
|
||||
IE_THROW() << "Can not find input port with id " << id << " in layer " << name;
|
||||
}
|
||||
|
||||
size_t getRealOutputPortId(size_t id) const {
|
||||
size_t real_id = 0;
|
||||
for (auto& it : outputPorts) {
|
||||
if (it.portId == id) {
|
||||
return real_id;
|
||||
}
|
||||
++real_id;
|
||||
}
|
||||
IE_THROW() << "Can not find output port with id " << id << " in layer " << name;
|
||||
}
|
||||
};
|
||||
|
||||
void operator>>(const std::stringstream& in, ngraph::element::Type& type) {
|
||||
type = details::convertPrecision(ngraph::trim(in.str()));
|
||||
}
|
||||
|
||||
bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::string& value) {
|
||||
if (!node)
|
||||
return false;
|
||||
|
||||
auto attr = node.attribute(name.c_str());
|
||||
if (attr.empty())
|
||||
return false;
|
||||
value = std::string(attr.value());
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool getParameters(const pugi::xml_node& node, const std::string& name, std::vector<T>& value) {
|
||||
std::string param;
|
||||
if (!getStrAttribute(node, name, param))
|
||||
return false;
|
||||
std::stringstream ss(param);
|
||||
std::string field;
|
||||
while (getline(ss, field, ',')) {
|
||||
if (field.empty())
|
||||
IE_THROW() << "Cannot get vector of parameters! \"" << param << "\" is incorrect";
|
||||
std::stringstream fs(field);
|
||||
T val;
|
||||
fs >> val;
|
||||
value.emplace_back(val);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T stringToType(const std::string& valStr) {
|
||||
T ret{0};
|
||||
std::istringstream ss(valStr);
|
||||
if (!ss.eof()) {
|
||||
ss >> ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
class XmlDeserializer : public ngraph::AttributeVisitor {
|
||||
public:
|
||||
explicit XmlDeserializer(const pugi::xml_node& node,
|
||||
const ov::Weights& weights,
|
||||
const std::unordered_map<std::string, ngraph::OpSet>& opsets,
|
||||
std::unordered_map<std::string, std::shared_ptr<ngraph::Variable>>& variables)
|
||||
: m_node(node),
|
||||
m_weights(weights),
|
||||
m_opsets(opsets),
|
||||
m_variables(variables) {}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::string>& value) override {
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
value.set(val);
|
||||
}
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<bool>& value) override {
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
std::transform(val.begin(), val.end(), val.begin(), [](char ch) {
|
||||
return std::tolower(static_cast<unsigned char>(ch));
|
||||
});
|
||||
std::set<std::string> true_names{"true", "1"};
|
||||
std::set<std::string> false_names{"false", "0"};
|
||||
|
||||
bool is_true = true_names.find(val) != true_names.end();
|
||||
bool is_false = false_names.find(val) != false_names.end();
|
||||
|
||||
if (!is_true && !is_false)
|
||||
return;
|
||||
value.set(is_true);
|
||||
}
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) override;
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<double>& adapter) override {
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
adapter.set(stringToType<double>(val));
|
||||
}
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<int64_t>& adapter) override {
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
adapter.set(stringToType<int64_t>(val));
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name,
|
||||
ngraph::ValueAccessor<std::shared_ptr<ngraph::Function>>& adapter) override;
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int32_t>>& adapter) override {
|
||||
std::vector<int32_t> value;
|
||||
if (!getParameters<int32_t>(m_node.child("data"), name, value))
|
||||
return;
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int64_t>>& adapter) override {
|
||||
std::vector<int64_t> value;
|
||||
if (!getParameters<int64_t>(m_node.child("data"), name, value))
|
||||
return;
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<float>>& adapter) override {
|
||||
std::vector<float> value;
|
||||
if (!getParameters<float>(m_node.child("data"), name, value))
|
||||
return;
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<std::string>>& adapter) override {
|
||||
std::vector<std::string> value;
|
||||
if (!getParameters<std::string>(m_node.child("data"), name, value))
|
||||
return;
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void use_framework_node(bool flag) {
|
||||
m_use_framework_node = flag;
|
||||
}
|
||||
|
||||
private:
|
||||
struct IoMap {
|
||||
using NodeIdToIoIndex = std::unordered_map<size_t /*xml node id*/, uint64_t /*body io index*/>;
|
||||
NodeIdToIoIndex inputs;
|
||||
NodeIdToIoIndex outputs;
|
||||
};
|
||||
|
||||
/// \brief Traverses port_map in order to create vector of InputDescription shared_ptrs.
|
||||
/// Shall be used only for ops which have port_map attribute.
|
||||
/// \param node xml op representation
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> parseInputDescription(
|
||||
const pugi::xml_node& node);
|
||||
/// \brief Traverses port_map in order to create vector of OutputDescription shared_ptrs.
|
||||
/// Shall be used only for ops which have port_map attribute.
|
||||
/// \param node xml op representation
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> parseOutputDescription(
|
||||
const pugi::xml_node& node);
|
||||
|
||||
// TODO consider to call only once per layer/TI-Loop node
|
||||
IoMap updated_io_map(const pugi::xml_node& node);
|
||||
|
||||
/// \brief Traverses xml node representation in order to create nGraph function for it.
|
||||
/// \param node xml node representation
|
||||
/// \param weights weights attached to current node
|
||||
/// \return shared pointer to function representing input node
|
||||
std::shared_ptr<ngraph::Function> parse_function(const pugi::xml_node& root, const ov::Weights& weights);
|
||||
/// \brief Traverses xml node representation in order to get the purpose attribute of
|
||||
/// inputs/outputs in the body of Loop op. \param node xml node representation \return struct
|
||||
/// with value of purpuse attribute
|
||||
ngraph::op::v5::Loop::SpecialBodyPorts parsePurposeAttribute(const pugi::xml_node& node);
|
||||
|
||||
GenericLayerParams parseGenericParams(const pugi::xml_node& node);
|
||||
|
||||
std::shared_ptr<ngraph::Node> createNode(const ngraph::OutputVector& inputs,
|
||||
const pugi::xml_node& node,
|
||||
const ov::Weights& weights,
|
||||
const GenericLayerParams& params);
|
||||
|
||||
// -- DATA --
|
||||
const pugi::xml_node m_node;
|
||||
const ov::Weights& m_weights;
|
||||
const std::unordered_map<std::string, ngraph::OpSet>& m_opsets;
|
||||
std::unordered_map<std::string, std::shared_ptr<ngraph::Variable>>& m_variables;
|
||||
|
||||
///
|
||||
/// store information about parameters/results order during function creation
|
||||
/// it will be used during Inputs/Outputs Description creation in SubGraph processing
|
||||
///
|
||||
IoMap io_map;
|
||||
|
||||
bool m_use_framework_node{false};
|
||||
};
|
||||
|
||||
XmlDeserializer::IoMap XmlDeserializer::updated_io_map(const pugi::xml_node& node) {
|
||||
auto body_node = node.child("body");
|
||||
|
||||
if (body_node.empty()) {
|
||||
IE_THROW() << "Missing body part.";
|
||||
}
|
||||
// Fill map: parameter/result id to parameter/result number in Function
|
||||
|
||||
auto extend_io_map = io_map;
|
||||
|
||||
FOREACH_CHILD (layer, body_node.child("layers"), "layer") {
|
||||
auto type = XMLParseUtils::GetStrAttr(layer, "type");
|
||||
|
||||
if (type == "Parameter") {
|
||||
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
|
||||
extend_io_map.inputs.insert({id, -1}); // try add as unconnected
|
||||
} else if (type == "Result") {
|
||||
auto id = XMLParseUtils::GetUIntAttr(layer, "id");
|
||||
extend_io_map.outputs.insert({id, -1}); // try add as unconnected
|
||||
}
|
||||
}
|
||||
return extend_io_map;
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> XmlDeserializer::parseInputDescription(
|
||||
const pugi::xml_node& node) {
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>> inputs;
|
||||
const auto up_io_map = updated_io_map(node);
|
||||
|
||||
// Parse PortMap: external_port_id for inputs does not always appear in consecutive order
|
||||
std::map<uint64_t, pugi::xml_node> input_map;
|
||||
FOREACH_CHILD (input, node.child("port_map"), "input") {
|
||||
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id");
|
||||
input_map.emplace(ext_port_id, input);
|
||||
}
|
||||
|
||||
for (const auto& input : input_map) {
|
||||
auto& xml_input = input.second;
|
||||
auto axis_attr = xml_input.attribute("axis");
|
||||
int64_t ti_input_index = XMLParseUtils::GetInt64Attr(xml_input, "external_port_id");
|
||||
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
|
||||
|
||||
// if axis is set, then slicing is enabled. Create ngraph::TensorIterator::SlicedInput.
|
||||
if (!axis_attr.empty()) {
|
||||
size_t axis = XMLParseUtils::GetUIntAttr(xml_input, "axis");
|
||||
int64_t start = XMLParseUtils::GetInt64Attr(xml_input, "start", 0);
|
||||
int64_t stride = XMLParseUtils::GetInt64Attr(xml_input, "stride", 1);
|
||||
int64_t end = XMLParseUtils::GetInt64Attr(xml_input, "end", -1);
|
||||
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_input, "part_size", 1);
|
||||
|
||||
const auto input_index = up_io_map.inputs.at(body_parameter_index);
|
||||
|
||||
inputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::SliceInputDescription>(ti_input_index,
|
||||
input_index,
|
||||
start,
|
||||
stride,
|
||||
part_size,
|
||||
end,
|
||||
axis));
|
||||
} else {
|
||||
// otherwise find corresponding back edge and create ngraph::TensorIterator::MergedInput
|
||||
bool is_back_edge_exist = false;
|
||||
FOREACH_CHILD (xml_edge, node.child("back_edges"), "edge") {
|
||||
size_t to_layer = XMLParseUtils::GetUIntAttr(xml_edge, "to-layer");
|
||||
|
||||
if (to_layer == body_parameter_index) {
|
||||
size_t from_layer = XMLParseUtils::GetUIntAttr(xml_edge, "from-layer");
|
||||
|
||||
const auto input_index = up_io_map.inputs.at(body_parameter_index);
|
||||
const auto output_index = up_io_map.outputs.at(from_layer);
|
||||
|
||||
inputs.push_back(
|
||||
std::make_shared<ngraph::op::util::SubGraphOp::MergedInputDescription>(ti_input_index,
|
||||
input_index,
|
||||
output_index));
|
||||
|
||||
is_back_edge_exist = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// ti_input_index = -1 means that Parameter of the body is not connected to inputs of
|
||||
// TensorIterator and is used only for internal needs.
|
||||
if (!is_back_edge_exist && ti_input_index >= 0) {
|
||||
const auto input_index = up_io_map.inputs.at(body_parameter_index);
|
||||
|
||||
inputs.push_back(
|
||||
std::make_shared<ngraph::op::util::SubGraphOp::InvariantInputDescription>(ti_input_index,
|
||||
input_index));
|
||||
}
|
||||
}
|
||||
}
|
||||
return inputs;
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> XmlDeserializer::parseOutputDescription(
|
||||
const pugi::xml_node& node) {
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>> outputs;
|
||||
const auto up_io_map = updated_io_map(node);
|
||||
|
||||
// Parse PortMap: outputs
|
||||
std::map<int64_t, pugi::xml_node> output_map;
|
||||
FOREACH_CHILD (output, node.child("port_map"), "output") {
|
||||
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id");
|
||||
output_map.emplace(ext_port_id, output);
|
||||
}
|
||||
|
||||
uint64_t output_number = 0;
|
||||
for (const auto& output : output_map) {
|
||||
auto& xml_output = output.second;
|
||||
auto axis_attr = xml_output.attribute("axis");
|
||||
size_t body_result_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
|
||||
|
||||
// if external_port_id < 0 it means that this body result isn't connected to the Loop output
|
||||
// and is used only for internal needs. For TensorIterator external_port_id is always > 0.
|
||||
if (XMLParseUtils::GetInt64Attr(xml_output, "external_port_id") >= 0) {
|
||||
// if axis is set, then concatenation is enabled. Create
|
||||
// ngraph::TensorIterator::ConcatOutput.
|
||||
if (!axis_attr.empty()) {
|
||||
int64_t axis = XMLParseUtils::GetInt64Attr(xml_output, "axis");
|
||||
int64_t start = XMLParseUtils::GetInt64Attr(xml_output, "start", 0);
|
||||
int64_t stride = XMLParseUtils::GetInt64Attr(xml_output, "stride", 1);
|
||||
int64_t end = XMLParseUtils::GetInt64Attr(xml_output, "end", -1);
|
||||
int64_t part_size = XMLParseUtils::GetInt64Attr(xml_output, "part_size", 1);
|
||||
|
||||
const auto output_index = up_io_map.outputs.at(body_result_index);
|
||||
|
||||
outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::ConcatOutputDescription>(output_index,
|
||||
output_number,
|
||||
start,
|
||||
stride,
|
||||
part_size,
|
||||
end,
|
||||
axis));
|
||||
} else {
|
||||
// otherwise create ngraph::TensorIterator::BodyOutput. -1 means last iteration.
|
||||
const auto output_index = up_io_map.outputs.at(body_result_index);
|
||||
|
||||
outputs.push_back(std::make_shared<ngraph::op::util::SubGraphOp::BodyOutputDescription>(output_index,
|
||||
output_number,
|
||||
-1));
|
||||
}
|
||||
output_number++;
|
||||
}
|
||||
}
|
||||
return outputs;
|
||||
}
|
||||
|
||||
ngraph::op::v5::Loop::SpecialBodyPorts XmlDeserializer::parsePurposeAttribute(const pugi::xml_node& node) {
|
||||
ngraph::op::v5::Loop::SpecialBodyPorts result = {-1, -1};
|
||||
const auto up_io_map = updated_io_map(node);
|
||||
|
||||
NGRAPH_CHECK(!up_io_map.inputs.empty() || !up_io_map.outputs.empty(),
|
||||
"No parameters or results found in body Function.");
|
||||
|
||||
// Parse PortMap: external_port_id for inputs/outputs does not always appear in consecutive
|
||||
// order
|
||||
std::map<uint64_t, pugi::xml_node> input_map;
|
||||
FOREACH_CHILD (input, node.child("port_map"), "input") {
|
||||
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(input, "external_port_id");
|
||||
input_map.emplace(ext_port_id, input);
|
||||
}
|
||||
std::map<int64_t, pugi::xml_node> output_map;
|
||||
FOREACH_CHILD (output, node.child("port_map"), "output") {
|
||||
int64_t ext_port_id = XMLParseUtils::GetInt64Attr(output, "external_port_id");
|
||||
output_map.emplace(ext_port_id, output);
|
||||
}
|
||||
|
||||
for (const auto& input : input_map) {
|
||||
auto& xml_input = input.second;
|
||||
auto purpose = XMLParseUtils::GetStrAttr(xml_input, "purpose", "");
|
||||
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_input, "internal_layer_id");
|
||||
if (purpose == "current_iteration") {
|
||||
result.current_iteration_input_idx = up_io_map.inputs.at(body_parameter_index);
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& output : output_map) {
|
||||
auto& xml_output = output.second;
|
||||
auto purpose = XMLParseUtils::GetStrAttr(xml_output, "purpose", "");
|
||||
size_t body_parameter_index = XMLParseUtils::GetUIntAttr(xml_output, "internal_layer_id");
|
||||
if (purpose == "execution_condition") {
|
||||
result.body_condition_output_idx = up_io_map.outputs.at(body_parameter_index);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void XmlDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) {
|
||||
static const std::unordered_set<std::string> skip_names = {"input_descriptions",
|
||||
"output_descriptions",
|
||||
"special_body_ports"};
|
||||
std::string val;
|
||||
|
||||
// for TensorIterator look for 'port_map' as 'data' does not exist
|
||||
if (m_node.child("port_map")) {
|
||||
if (auto a = ngraph::as_type<
|
||||
ngraph::AttributeAdapter<std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::InputDescription>>>>(
|
||||
&adapter)) {
|
||||
a->set(parseInputDescription(m_node));
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<
|
||||
std::vector<std::shared_ptr<ngraph::op::util::SubGraphOp::OutputDescription>>>>(&adapter)) {
|
||||
a->set(parseOutputDescription(m_node));
|
||||
} else if (auto a =
|
||||
ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::v5::Loop::SpecialBodyPorts>>(&adapter)) {
|
||||
a->set(parsePurposeAttribute(m_node));
|
||||
}
|
||||
}
|
||||
|
||||
if (skip_names.count(name) && !getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::element::Type>>(&adapter)) {
|
||||
static_cast<ngraph::element::Type&>(*a) = details::convertPrecision(val);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::PartialShape>>(&adapter)) {
|
||||
std::vector<int64_t> shape;
|
||||
std::vector<ngraph::Dimension> dims;
|
||||
if (!getParameters<int64_t>(m_node.child("data"), name, shape))
|
||||
return;
|
||||
for (const auto& dim : shape)
|
||||
dims.emplace_back(dim);
|
||||
static_cast<ngraph::PartialShape&>(*a) = ngraph::PartialShape(dims);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::Shape>>(&adapter)) {
|
||||
std::vector<size_t> shape;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, shape))
|
||||
return;
|
||||
static_cast<ngraph::Shape&>(*a) = ngraph::Shape(shape);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::Strides>>(&adapter)) {
|
||||
std::vector<size_t> shape;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, shape))
|
||||
return;
|
||||
static_cast<ngraph::Strides&>(*a) = ngraph::Strides(shape);
|
||||
#ifdef __APPLE__
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::vector<size_t>>>(&adapter)) {
|
||||
std::vector<size_t> result;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, result))
|
||||
return;
|
||||
static_cast<std::vector<size_t>&>(*a) = result;
|
||||
#else
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::vector<size_t>>>(&adapter)) {
|
||||
std::vector<size_t> result;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, result))
|
||||
return;
|
||||
a->set(result);
|
||||
#endif
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::AxisSet>>(&adapter)) {
|
||||
std::vector<size_t> axes;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, axes))
|
||||
return;
|
||||
static_cast<ngraph::AxisSet&>(*a) = ngraph::AxisSet(axes);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::TopKSortType>>(&adapter)) {
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
static_cast<ngraph::op::TopKSortType&>(*a) = ngraph::as_enum<ngraph::op::TopKSortType>(val);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::TopKMode>>(&adapter)) {
|
||||
if (!getStrAttribute(m_node.child("data"), name, val))
|
||||
return;
|
||||
static_cast<ngraph::op::TopKMode&>(*a) = ngraph::as_enum<ngraph::op::TopKMode>(val);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::CoordinateDiff>>(&adapter)) {
|
||||
std::vector<size_t> shape;
|
||||
if (!getParameters<size_t>(m_node.child("data"), name, shape))
|
||||
return;
|
||||
std::vector<std::ptrdiff_t> coord_diff(shape.begin(), shape.end());
|
||||
static_cast<ngraph::CoordinateDiff&>(*a) = ngraph::CoordinateDiff(coord_diff);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::shared_ptr<ngraph::Variable>>>(&adapter)) {
|
||||
std::string variable_id;
|
||||
if (!getStrAttribute(m_node.child("data"), name, variable_id))
|
||||
return;
|
||||
if (!m_variables.count(variable_id)) {
|
||||
m_variables[variable_id] = std::make_shared<ngraph::Variable>(
|
||||
ngraph::VariableInfo{ngraph::PartialShape::dynamic(), ngraph::element::dynamic, variable_id});
|
||||
}
|
||||
a->set(m_variables[variable_id]);
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::shared_ptr<ngraph::runtime::AlignedBuffer>>>(
|
||||
&adapter)) {
|
||||
std::string value;
|
||||
pugi::xml_node dn = m_node.child("data");
|
||||
auto type = XMLParseUtils::GetStrAttr(m_node, "type");
|
||||
|
||||
if (dn.empty())
|
||||
IE_THROW() << "No attrtibutes defined for " << type << " op!";
|
||||
|
||||
if (getStrAttribute(dn, name, value)) {
|
||||
auto buffer = std::make_shared<ngraph::runtime::AlignedBuffer>(value.size());
|
||||
auto data = static_cast<char*>(buffer->get_ptr());
|
||||
value.copy(data, value.size());
|
||||
a->set(buffer);
|
||||
} else if (name == "value" && type == "Const") {
|
||||
std::vector<int64_t> shape;
|
||||
std::string el_type_str;
|
||||
|
||||
size_t offset = XMLParseUtils::GetUInt64Attr(dn, "offset");
|
||||
size_t size = XMLParseUtils::GetUInt64Attr(dn, "size");
|
||||
if (!getStrAttribute(dn, "element_type", el_type_str))
|
||||
return;
|
||||
if (!getParameters<int64_t>(dn, "shape", shape))
|
||||
return;
|
||||
|
||||
ngraph::element::Type el_type = details::convertPrecision(el_type_str);
|
||||
|
||||
if (!m_weights)
|
||||
IE_THROW() << "Empty weights data in bin file or bin file cannot be found!";
|
||||
if (m_weights->size() < offset + size)
|
||||
IE_THROW() << "Incorrect weights in bin file!";
|
||||
if (size < std::ceil(ngraph::shape_size(shape) * el_type.bitwidth() / 8.f))
|
||||
IE_THROW() << "Attribute and shape size are inconsistent for " << type << " op!";
|
||||
|
||||
char* data = m_weights->get_ptr<char>() + offset;
|
||||
auto buffer =
|
||||
std::make_shared<runtime::SharedBuffer<std::shared_ptr<runtime::AlignedBuffer>>>(data, size, m_weights);
|
||||
a->set(buffer);
|
||||
}
|
||||
} else if (auto a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::op::FrameworkNodeAttrs>>(&adapter)) {
|
||||
const auto& type = XMLParseUtils::GetStrAttr(m_node, "type");
|
||||
const auto& version = XMLParseUtils::GetStrAttr(m_node, "version");
|
||||
|
||||
ngraph::op::FrameworkNodeAttrs node_attrs;
|
||||
node_attrs.set_opset_name(version);
|
||||
node_attrs.set_type_name(type);
|
||||
|
||||
pugi::xml_node dn = m_node.child("data");
|
||||
|
||||
if (!dn.empty()) {
|
||||
for (const auto& data_attr : dn.attributes()) {
|
||||
node_attrs[data_attr.name()] = data_attr.as_string();
|
||||
}
|
||||
}
|
||||
|
||||
a->set(node_attrs);
|
||||
} else if (const auto& a = ngraph::as_type<ngraph::AttributeAdapter<ngraph::element::TypeVector>>(&adapter)) {
|
||||
ngraph::element::TypeVector types;
|
||||
if (!getParameters<ngraph::element::Type>(m_node.child("data"), name, types))
|
||||
return;
|
||||
a->set(types);
|
||||
} else {
|
||||
IE_THROW() << "Error IR reading. Attribute adapter can not be found for " << name << " parameter";
|
||||
}
|
||||
}
|
||||
|
||||
void XmlDeserializer::on_adapter(const std::string& name,
|
||||
ngraph::ValueAccessor<std::shared_ptr<ngraph::Function>>& adapter) {
|
||||
std::shared_ptr<ngraph::Function> ngraph_function;
|
||||
if (!name.compare("body")) {
|
||||
auto body_node = m_node.child(name.c_str());
|
||||
if (body_node.empty()) {
|
||||
IE_THROW() << "TensorIterator has no body.";
|
||||
}
|
||||
ngraph_function = parse_function(m_node.child(name.c_str()), m_weights);
|
||||
} else if (!name.compare("net")) {
|
||||
ngraph_function = parse_function(m_node, m_weights);
|
||||
} else {
|
||||
IE_THROW() << "Error: not recognized adapter name: " << name << ".";
|
||||
}
|
||||
adapter.set(ngraph_function);
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Function> XmlDeserializer::parse_function(const pugi::xml_node& root,
|
||||
const ov::Weights& weights) {
|
||||
// OV_ITT_SCOPE_CHAIN(FIRST_INFERENCE, taskChain, itt::domains::V10Reader_RT, "V10Parser", "Parse");
|
||||
|
||||
struct FunctionNodes {
|
||||
ngraph::ParameterVector parameters;
|
||||
ngraph::ResultVector results;
|
||||
ngraph::NodeVector all;
|
||||
ngraph::SinkVector sinks;
|
||||
};
|
||||
|
||||
struct edge {
|
||||
size_t fromLayerId, fromPortId, toPortId;
|
||||
};
|
||||
struct node_params {
|
||||
pugi::xml_node xml;
|
||||
GenericLayerParams params;
|
||||
};
|
||||
|
||||
std::map<size_t /*layer-id*/, node_params> params;
|
||||
|
||||
std::vector<size_t /*layer-id*/> outputs;
|
||||
std::unordered_set<std::string> opName;
|
||||
|
||||
// Read all layers and store their parameters in params map
|
||||
FOREACH_CHILD (node, root.child("layers"), "layer") {
|
||||
auto node_param = parseGenericParams(node);
|
||||
if (opName.find(node_param.name) != opName.end() && node_param.type != "Result")
|
||||
IE_THROW() << "Invalid IR! " << node_param.name << " name is not unique!";
|
||||
opName.insert(node_param.name);
|
||||
params[node_param.layerId] = {node, node_param};
|
||||
if (node_param.type == "Result" || node_param.type == "Assign") {
|
||||
outputs.push_back(node_param.layerId);
|
||||
}
|
||||
}
|
||||
|
||||
std::map<size_t /*to-layer-id*/, std::vector<edge>> edges;
|
||||
std::map<size_t, std::shared_ptr<ngraph::Node>> id_to_node;
|
||||
|
||||
// Read all edges and store them for further usage
|
||||
FOREACH_CHILD (_ec, root.child("edges"), "edge") {
|
||||
size_t fromLayer = XMLParseUtils::GetUIntAttr(_ec, "from-layer");
|
||||
size_t fromPort = XMLParseUtils::GetUIntAttr(_ec, "from-port");
|
||||
size_t toLayer = XMLParseUtils::GetUIntAttr(_ec, "to-layer");
|
||||
size_t toPort = XMLParseUtils::GetUIntAttr(_ec, "to-port");
|
||||
edges[toLayer].push_back({fromLayer, fromPort, toPort});
|
||||
}
|
||||
|
||||
// Run DFS starting from outputs to get nodes topological order
|
||||
std::set<size_t> used;
|
||||
std::vector<size_t> order;
|
||||
std::function<void(size_t)> dfs = [&edges, &order, &used, &dfs](const size_t id) {
|
||||
if (used.count(id))
|
||||
return;
|
||||
used.insert(id);
|
||||
for (auto& edge : edges[id]) {
|
||||
dfs(edge.fromLayerId);
|
||||
}
|
||||
order.push_back(id);
|
||||
};
|
||||
std::for_each(outputs.begin(), outputs.end(), dfs);
|
||||
|
||||
// OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "ConstructNgraphNodes");
|
||||
|
||||
FunctionNodes func_nodes;
|
||||
|
||||
std::map<std::string, std::shared_ptr<ngraph::Node>> variable_id_to_read_value;
|
||||
|
||||
// Following topological order create nGraph operations
|
||||
for (auto& layer_id : order) {
|
||||
auto& p = params[layer_id];
|
||||
const auto& edgeIt = edges.find(layer_id);
|
||||
if (edgeIt == edges.end())
|
||||
continue;
|
||||
ngraph::OutputVector inputs(edgeIt->second.size());
|
||||
for (auto& e : edgeIt->second) {
|
||||
auto input_node = id_to_node[e.fromLayerId];
|
||||
if (!input_node) {
|
||||
IE_THROW() << "Attempt to access node " << e.fromLayerId << " that not in graph.";
|
||||
}
|
||||
auto& p_output = params[e.fromLayerId].params;
|
||||
size_t const realInputPortId = p.params.getRealInputPortId(e.toPortId);
|
||||
if (realInputPortId >= inputs.size())
|
||||
IE_THROW() << p.params.type << " layer " << p.params.name << " with id: " << p.params.layerId
|
||||
<< " is inconsistent!";
|
||||
inputs[realInputPortId] = input_node->output(p_output.getRealOutputPortId(e.fromPortId));
|
||||
}
|
||||
|
||||
auto node = createNode(inputs, p.xml, weights, p.params);
|
||||
id_to_node[layer_id] = node;
|
||||
|
||||
// Check that output shape after nGraph node validation the same as in IR
|
||||
// because IR always right!
|
||||
// Temporary disabled!
|
||||
// for (size_t i = 0; i < p.params.outputPorts.size(); ++i) {
|
||||
// if (p.params.outputPorts[i].dims != node->output(i).get_shape()) {
|
||||
// IE_THROW() << "Shape after nGraph infer " <<
|
||||
// details::dumpVec(node->output(i).get_shape())
|
||||
// << " differ from IR shapes: " <<
|
||||
// details::dumpVec(p.params.outputPorts[i].dims);
|
||||
// }
|
||||
// }
|
||||
|
||||
if (const auto& parameter_node = std::dynamic_pointer_cast<ngraph::op::Parameter>(node)) {
|
||||
io_map.inputs.insert({layer_id, func_nodes.parameters.size()});
|
||||
func_nodes.parameters.emplace_back(parameter_node);
|
||||
}
|
||||
|
||||
if (const auto& result_node = std::dynamic_pointer_cast<ngraph::op::Result>(node)) {
|
||||
io_map.outputs.insert({layer_id, func_nodes.results.size()});
|
||||
func_nodes.results.emplace_back(result_node);
|
||||
}
|
||||
|
||||
if (const auto& sink = std::dynamic_pointer_cast<ngraph::op::Sink>(node)) {
|
||||
func_nodes.sinks.emplace_back(sink);
|
||||
}
|
||||
|
||||
if (const auto& read_value = std::dynamic_pointer_cast<ngraph::op::ReadValueBase>(node)) {
|
||||
variable_id_to_read_value[read_value->get_variable_id()] = read_value;
|
||||
}
|
||||
|
||||
func_nodes.all.emplace_back(node);
|
||||
}
|
||||
|
||||
// OV_ITT_SCOPE_NEXT(FIRST_INFERENCE, taskChain, "ConstructNgraphFunction");
|
||||
|
||||
auto function = std::make_shared<ngraph::Function>(func_nodes.results,
|
||||
func_nodes.sinks,
|
||||
func_nodes.parameters,
|
||||
XMLParseUtils::GetStrAttr(root, "name", ""));
|
||||
for (const auto& sink : func_nodes.sinks) {
|
||||
if (const auto& assign = std::dynamic_pointer_cast<ngraph::op::AssignBase>(sink)) {
|
||||
assign->add_control_dependency(variable_id_to_read_value.at(assign->get_variable_id()));
|
||||
}
|
||||
}
|
||||
|
||||
return function;
|
||||
}
|
||||
|
||||
GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& node) {
|
||||
const auto parsePort = [this](const pugi::xml_node& parentNode,
|
||||
const GenericLayerParams& params,
|
||||
bool input) -> GenericLayerParams::LayerPortData {
|
||||
GenericLayerParams::LayerPortData port;
|
||||
|
||||
port.portId = XMLParseUtils::GetIntAttr(parentNode, "id");
|
||||
|
||||
FOREACH_CHILD (node, parentNode, "dim") {
|
||||
int64_t dim = 0;
|
||||
const pugi::char_t* dimVal = node.child_value();
|
||||
std::stringstream ss(dimVal);
|
||||
if (!(ss >> dim) || dim < -1) {
|
||||
IE_THROW() << "dimension (" << dimVal << ") in node " << node.name()
|
||||
<< " must be greater or equal to -1: at offset " << node.offset_debug();
|
||||
}
|
||||
port.dims.push_back(dim);
|
||||
}
|
||||
|
||||
ngraph::element::Type type(ngraph::element::Type_t::undefined);
|
||||
// Input port hasn't precision
|
||||
if (!input) {
|
||||
const std::string& preStr = XMLParseUtils::GetStrAttr(parentNode, "precision");
|
||||
type = InferenceEngine::details::convertPrecision(preStr);
|
||||
}
|
||||
port.precision = type;
|
||||
std::vector<std::string> names;
|
||||
if (getParameters<std::string>(parentNode, "names", names)) {
|
||||
for (size_t i = 0; i < names.size(); i++) {
|
||||
std::string name = names[i];
|
||||
// Restore original name if it contains delimiter
|
||||
// getParameters(...) returns the vector of names which were split by delimiter ','
|
||||
// but some names can contain ',' as a part of name, in this case we use '\' to
|
||||
// escape delimiter the cycle below is needed in order to find names which contained
|
||||
// delimiter and restore the original name
|
||||
while (i < names.size() && names[i].at(names[i].length() - 1) == '\\') {
|
||||
name.replace(names[i].length() - 1, 1, ",");
|
||||
name += names[++i];
|
||||
}
|
||||
port.names.emplace(name);
|
||||
}
|
||||
}
|
||||
return port;
|
||||
};
|
||||
GenericLayerParams params;
|
||||
|
||||
params.layerId = XMLParseUtils::GetIntAttr(node, "id");
|
||||
params.version = XMLParseUtils::GetStrAttr(node, "version");
|
||||
|
||||
params.type = XMLParseUtils::GetStrAttr(node, "type");
|
||||
|
||||
params.name = XMLParseUtils::GetStrAttr(node, "name");
|
||||
|
||||
auto outNode = node.child("output");
|
||||
if (!outNode.empty()) {
|
||||
FOREACH_CHILD (_cn, outNode, "port") { params.outputPorts.emplace_back(parsePort(_cn, params, false)); }
|
||||
}
|
||||
auto inpNode = node.child("input");
|
||||
if (!inpNode.empty()) {
|
||||
FOREACH_CHILD (_cn, inpNode, "port") { params.inputPorts.emplace_back(parsePort(_cn, params, true)); }
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(const std::vector<ngraph::Output<ngraph::Node>>& inputs,
|
||||
const pugi::xml_node& node,
|
||||
const ov::Weights& weights,
|
||||
const GenericLayerParams& params) {
|
||||
// Check that inputs are correctly defined
|
||||
for (size_t i = 0; i < inputs.size(); i++) {
|
||||
if (!inputs[i].get_node())
|
||||
IE_THROW() << params.type << " layer " << params.name << " with id: " << params.layerId
|
||||
<< " has incorrect input with index " << i << "!";
|
||||
if (ngraph::element::Type_t::undefined == inputs[i].get_element_type())
|
||||
IE_THROW() << params.type << " layer " << params.name << " with id: " << params.layerId
|
||||
<< " has undefined element type for input with index " << i << "!";
|
||||
}
|
||||
|
||||
std::shared_ptr<ngraph::Node> ngraphNode;
|
||||
|
||||
// Find registered opset
|
||||
auto opsetIt = m_opsets.find(params.version);
|
||||
|
||||
// Try to create operation from loaded opsets
|
||||
static const std::unordered_set<std::string> experimental_ops_added_to_opset = {
|
||||
"ExperimentalDetectronDetectionOutput",
|
||||
"ExperimentalDetectronGenerateProposalsSingleImage",
|
||||
"ExperimentalDetectronPriorGridGenerator",
|
||||
"ExperimentalDetectronROIFeatureExtractor",
|
||||
"ExperimentalDetectronTopKROIs",
|
||||
"GRUCell",
|
||||
"RNNCell",
|
||||
"Proposal"};
|
||||
|
||||
if (experimental_ops_added_to_opset.count(params.type) &&
|
||||
(params.version == "experimental" || params.version == "extension")) {
|
||||
opsetIt = m_opsets.find("opset6");
|
||||
}
|
||||
|
||||
if (!ngraphNode && opsetIt != m_opsets.end()) {
|
||||
auto const& type = params.type == "Const" ? "Constant" : params.type;
|
||||
|
||||
if (params.version == "opset1") {
|
||||
// MVN, ROIPooling and ReorgYolo were missing in opset1
|
||||
if (type == "MVN" || type == "ROIPooling" || type == "ReorgYolo") {
|
||||
opsetIt = m_opsets.find("opset2");
|
||||
if (opsetIt == m_opsets.end()) {
|
||||
IE_THROW() << "Cannot create " << params.type << " layer " << params.name
|
||||
<< " id:" << params.layerId << " from unsupported opset: " << params.version;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto const& opset = opsetIt->second;
|
||||
|
||||
ngraphNode = std::shared_ptr<ngraph::Node>(opset.create_insensitive(type));
|
||||
if (!ngraphNode) {
|
||||
IE_THROW() << "Opset " << params.version << " doesn't contain the operation with type: " << type;
|
||||
}
|
||||
// Share Weights form constant blob
|
||||
if (auto constant = std::dynamic_pointer_cast<ngraph::op::Constant>(ngraphNode)) {
|
||||
constant->alloc_buffer_on_visit_attributes(false);
|
||||
}
|
||||
ngraphNode->set_arguments(inputs);
|
||||
XmlDeserializer visitor(node, weights, m_opsets, m_variables);
|
||||
|
||||
if (ngraphNode->visit_attributes(visitor)) {
|
||||
ngraphNode->constructor_validate_and_infer_types();
|
||||
}
|
||||
|
||||
// To be sure that all default values will be initialized:
|
||||
ngraphNode = ngraphNode->clone_with_new_inputs(ngraphNode->input_values());
|
||||
}
|
||||
|
||||
if (!ngraphNode && m_use_framework_node) {
|
||||
ngraphNode = std::make_shared<ngraph::op::FrameworkNode>(inputs);
|
||||
XmlDeserializer visitor(node, weights, m_opsets, m_variables);
|
||||
ngraphNode->visit_attributes(visitor);
|
||||
|
||||
size_t index{0};
|
||||
for (const auto& output_params : params.outputPorts) {
|
||||
ngraphNode->set_output_type(index, output_params.precision, ngraph::PartialShape(output_params.dims));
|
||||
++index;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ngraphNode) {
|
||||
IE_THROW() << "Cannot create " << params.type << " layer " << params.name << " id:" << params.layerId
|
||||
<< " from unsupported opset: " << params.version;
|
||||
}
|
||||
|
||||
// Save run time info
|
||||
auto& rtInfo = ngraphNode->get_rt_info();
|
||||
pugi::xml_node dn = node.child("data");
|
||||
if (dn) {
|
||||
const auto pr_data = dn.attribute("PrimitivesPriority");
|
||||
if (pr_data) {
|
||||
rtInfo["PrimitivesPriority"] = std::make_shared<::ngraph::VariantWrapper<std::string>>(pr_data.value());
|
||||
}
|
||||
const auto aw_data = dn.attribute("alt_width");
|
||||
if (aw_data) {
|
||||
rtInfo["alt_width"] = std::make_shared<::ngraph::VariantWrapper<std::string>>(aw_data.value());
|
||||
}
|
||||
}
|
||||
|
||||
ngraphNode->set_friendly_name(params.name);
|
||||
for (size_t i = 0; i < params.outputPorts.size() && i < ngraphNode->get_output_size(); ++i) {
|
||||
if (!params.outputPorts[i].names.empty())
|
||||
ngraphNode->get_output_tensor(i).set_names(params.outputPorts[i].names);
|
||||
}
|
||||
|
||||
return ngraphNode;
|
||||
}
|
||||
|
||||
void ParsePreProcess(pugi::xml_node& root, ov::Weights weights, std::shared_ptr<Function> f) {
|
||||
/* Preprocessing block can have two preprocessing types:
|
||||
*
|
||||
@ -1065,22 +178,10 @@ void ParsePreProcess(pugi::xml_node& root, ov::Weights weights, std::shared_ptr<
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace ngraph {
|
||||
namespace frontend {
|
||||
|
||||
namespace {
|
||||
void loadXml(pugi::xml_document& xmlDoc, std::istream& model) {
|
||||
// OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::V10Reader_RT, "loadXml");
|
||||
pugi::xml_parse_result res = xmlDoc.load(model);
|
||||
if (res.status != pugi::status_ok) {
|
||||
IE_THROW() << res.description() << "at offset " << res.offset;
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
class InputModelIR::InputModelIRImpl {
|
||||
ov::Weights m_weights;
|
||||
ov::Extensions m_extensions;
|
||||
@ -1091,7 +192,10 @@ public:
|
||||
InputModelIRImpl(std::istream& stream, const ov::Weights& weights, const ov::Extensions& extensions)
|
||||
: m_weights(weights),
|
||||
m_extensions(extensions) {
|
||||
loadXml(m_xml_doc, stream);
|
||||
pugi::xml_parse_result res = m_xml_doc.load(stream);
|
||||
if (res.status != pugi::status_ok) {
|
||||
IE_THROW() << res.description() << "at offset " << res.offset;
|
||||
}
|
||||
m_root = m_xml_doc.document_element();
|
||||
}
|
||||
|
||||
@ -1127,7 +231,7 @@ std::shared_ptr<Function> InputModelIR::InputModelIRImpl::convert() {
|
||||
opsets[it.first] = it.second;
|
||||
}
|
||||
|
||||
XmlDeserializer visitor(m_root, m_weights, opsets, variables);
|
||||
ov::XmlDeserializer visitor(m_root, m_weights, opsets, variables);
|
||||
visitor.use_framework_node(opsets.count("framework_node_ext"));
|
||||
std::shared_ptr<ngraph::Function> function;
|
||||
visitor.on_attribute("net", function);
|
||||
|
24
ngraph/frontend/ir/src/rt_info_deserializer.cpp
Normal file
24
ngraph/frontend/ir/src/rt_info_deserializer.cpp
Normal file
@ -0,0 +1,24 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include <ir_frontend/utility.hpp>
|
||||
#include <pugixml.hpp>
|
||||
#include <rt_info_deserializer.hpp>
|
||||
#include <transformations/rt_info/attributes.hpp>
|
||||
|
||||
using namespace ov;
|
||||
|
||||
void RTInfoDeserializer::on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
if (auto a = ngraph::as_type<ngraph::AttributeAdapter<std::set<std::string>>>(&adapter)) {
|
||||
std::set<std::string> ss;
|
||||
str_to_container(val, ss);
|
||||
a->set(ss);
|
||||
} else {
|
||||
IR_THROW("Not implemented");
|
||||
}
|
||||
}
|
116
ngraph/frontend/ir/src/rt_info_deserializer.hpp
Normal file
116
ngraph/frontend/ir/src/rt_info_deserializer.hpp
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <ir_frontend/utility.hpp>
|
||||
#include <istream>
|
||||
#include <memory>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <utils.hpp>
|
||||
|
||||
namespace ov {
|
||||
class RTInfoDeserializer : public ngraph::AttributeVisitor {
|
||||
public:
|
||||
explicit RTInfoDeserializer(const pugi::xml_node& node) : m_node(node) {}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::string>& value) override {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
value.set(val);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<bool>& value) override {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
std::transform(val.begin(), val.end(), val.begin(), [](char ch) {
|
||||
return std::tolower(static_cast<unsigned char>(ch));
|
||||
});
|
||||
std::set<std::string> true_names{"true", "1"};
|
||||
std::set<std::string> false_names{"false", "0"};
|
||||
|
||||
bool is_true = true_names.find(val) != true_names.end();
|
||||
bool is_false = false_names.find(val) != false_names.end();
|
||||
|
||||
if (!is_true && !is_false)
|
||||
return;
|
||||
value.set(is_true);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<void>& adapter) override;
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<double>& adapter) override {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
adapter.set(stringToType<double>(val));
|
||||
}
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<int64_t>& adapter) override {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
adapter.set(stringToType<int64_t>(val));
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name,
|
||||
ngraph::ValueAccessor<std::shared_ptr<ngraph::Function>>& adapter) override {
|
||||
throw ngraph::ngraph_error("Function type is unsupported for rt info deserialization");
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int32_t>>& adapter) override {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
std::vector<int32_t> value;
|
||||
str_to_container(val, value);
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<int64_t>>& adapter) override {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
std::vector<int64_t> value;
|
||||
str_to_container(val, value);
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<float>>& adapter) override {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
std::vector<float> value;
|
||||
str_to_container(val, value);
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void on_adapter(const std::string& name, ngraph::ValueAccessor<std::vector<std::string>>& adapter) override {
|
||||
check_attribute_name(name);
|
||||
std::string val;
|
||||
if (!getStrAttribute(m_node, name, val))
|
||||
return;
|
||||
std::vector<std::string> value;
|
||||
str_to_container(val, value);
|
||||
adapter.set(value);
|
||||
}
|
||||
|
||||
void check_attribute_name(const std::string& name) const {
|
||||
if (name == "name" || name == "version") {
|
||||
throw ngraph::ngraph_error("Attribute key with name: " + name + " is not allowed. Please use another name");
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
pugi::xml_node m_node;
|
||||
};
|
||||
} // namespace ov
|
22
ngraph/frontend/ir/src/utils.cpp
Normal file
22
ngraph/frontend/ir/src/utils.cpp
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#include "utils.hpp"
|
||||
|
||||
namespace ov {
|
||||
void operator>>(const std::stringstream& in, ngraph::element::Type& type) {
|
||||
type = InferenceEngine::details::convertPrecision(ngraph::trim(in.str()));
|
||||
}
|
||||
|
||||
bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::string& value) {
|
||||
if (!node)
|
||||
return false;
|
||||
|
||||
auto attr = node.attribute(name.c_str());
|
||||
if (attr.empty())
|
||||
return false;
|
||||
value = std::string(attr.value());
|
||||
return true;
|
||||
}
|
||||
} // namespace ov
|
52
ngraph/frontend/ir/src/utils.hpp
Normal file
52
ngraph/frontend/ir/src/utils.hpp
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <xml_parse_utils.h>
|
||||
|
||||
#include <ie_ngraph_utils.hpp>
|
||||
#include <istream>
|
||||
#include <memory>
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <pugixml.hpp>
|
||||
|
||||
namespace ov {
|
||||
void operator>>(const std::stringstream& in, ngraph::element::Type& type);
|
||||
|
||||
bool getStrAttribute(const pugi::xml_node& node, const std::string& name, std::string& value);
|
||||
|
||||
template <class T>
|
||||
void str_to_container(const std::string& value, T& res) {
|
||||
std::stringstream ss(value);
|
||||
std::string field;
|
||||
while (getline(ss, field, ',')) {
|
||||
if (field.empty())
|
||||
IE_THROW() << "Cannot get vector of parameters! \"" << value << "\" is incorrect";
|
||||
std::stringstream fs(field);
|
||||
typename T::value_type val;
|
||||
fs >> val;
|
||||
res.insert(res.end(), val);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
bool getParameters(const pugi::xml_node& node, const std::string& name, std::vector<T>& value) {
|
||||
std::string param;
|
||||
if (!getStrAttribute(node, name, param))
|
||||
return false;
|
||||
str_to_container(param, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T stringToType(const std::string& valStr) {
|
||||
T ret{0};
|
||||
std::istringstream ss(valStr);
|
||||
if (!ss.eof()) {
|
||||
ss >> ret;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
} // namespace ov
|
@ -3157,7 +3157,7 @@ TEST(constant_folding, disable_constant_folding) {
|
||||
auto constant_shape = op::Constant::create(element::i64, Shape{1}, {3});
|
||||
auto dyn_reshape = make_shared<op::v1::Reshape>(input, constant_shape, true);
|
||||
auto& rt_info = dyn_reshape->get_rt_info();
|
||||
rt_info["DISABLED_CONSTANT_FOLDING"];
|
||||
rt_info["disabled_constant_folding_0"];
|
||||
auto f = make_shared<Function>(dyn_reshape, ParameterVector{input});
|
||||
|
||||
pass::Manager pass_manager;
|
||||
|
Loading…
Reference in New Issue
Block a user