Added reading of meta information from IR (#12925)

* Added reading of meta information from IR

* Fixed code style

* Added tests to cover multi-threading and removed file

* Fixed asserts

* Changed parsing of meta information

* Added meta information serialization

* Fixed unit tests

* Support unset_cli_parameters

* Changed get meta API

* Fixed threading tests

* Fixed mutex

* Removed serialization

* Fixed tests

* Fixed build

* Add quantization_parameters

* Use rt_info instead of meta section

* Disable old rt_info serialization

* Try to fix MO Reader for new rt info

* Fixed property name

* Added test to new meta format

* Added tests for new PoT config

* Added tests for new rt_info attributes

* Remove redundant code

* Revert old rt_info style for nodes

* Add get API for python

* Added python tests

* Fixed python code style

* Fixed tests code style

* Fix flake8

* Fixed python tests

* Add has_rt_info to test

* Added more comments

* Extended tests and API

* Changed error message

* Use new API to get model version

* Fixed comments on python API

* Fixed comments

* Fixed comments

* Update src/bindings/python/src/pyopenvino/graph/model.cpp

Co-authored-by: Jan Iwaszkiewicz <jan.iwaszkiewicz@intel.com>

* Update src/bindings/python/src/pyopenvino/graph/model.cpp

Co-authored-by: Jan Iwaszkiewicz <jan.iwaszkiewicz@intel.com>

* Fixed comments

* Fixed python check

Co-authored-by: Jan Iwaszkiewicz <jan.iwaszkiewicz@intel.com>
This commit is contained in:
Ilya Churaev 2022-10-21 12:21:41 +04:00 committed by GitHub
parent 27a03948c2
commit 5e25341904
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1592 additions and 14 deletions

View File

@ -7,6 +7,10 @@
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <utility>
#include <vector>
#include "openvino/core/except.hpp"
#include "openvino/core/graph_util.hpp"
#include "openvino/core/model.hpp" // ov::Model
#include "openvino/core/partial_shape.hpp"
@ -17,6 +21,7 @@
#include "pyopenvino/graph/ops/result.hpp"
#include "pyopenvino/graph/ops/util/variable.hpp"
#include "pyopenvino/graph/rt_map.hpp"
#include "pyopenvino/utils/utils.hpp"
namespace py = pybind11;
@ -701,9 +706,123 @@ void regclass_graph_Model(py::module m) {
return "<" + class_name + ": '" + self.get_friendly_name() + "'\ninputs[\n" + inputs_str + "\n]\noutputs[\n" +
outputs_str + "\n]>";
});
model.def("get_rt_info",
(PyRTMap & (ov::Model::*)()) & ov::Model::get_rt_info,
py::return_value_policy::reference_internal,
R"(
Returns PyRTMap which is a dictionary of user defined runtime info.
:return: A dictionary of user defined data.
:rtype: openvino.runtime.RTMap
)");
model.def(
"get_rt_info",
[](const ov::Model& self, const py::list& path) -> py::object {
std::vector<std::string> cpp_args(path.size());
for (size_t i = 0; i < path.size(); i++) {
cpp_args[i] = path[i].cast<std::string>();
}
return Common::utils::from_ov_any(self.get_rt_info<ov::Any>(cpp_args));
},
py::arg("path"),
R"(
Returns runtime attribute.
:param path: List of strings which defines a path to runtime info.
:type path: List[str]
:return: A runtime attribute.
:rtype: Any
)");
model.def(
"get_rt_info",
[](const ov::Model& self, const py::str& path) -> py::object {
return Common::utils::from_ov_any(self.get_rt_info<ov::Any>(path.cast<std::string>()));
},
py::arg("path"),
R"(
Returns runtime attribute.
:param path: List of strings which defines a path to runtime info.
:type path: str
:return: A runtime attribute.
:rtype: Any
)");
model.def(
"has_rt_info",
[](const ov::Model& self, const py::list& path) -> bool {
std::vector<std::string> cpp_args(path.size());
for (size_t i = 0; i < path.size(); i++) {
cpp_args[i] = path[i].cast<std::string>();
}
return self.has_rt_info(cpp_args);
},
py::arg("path"),
R"(
Checks if given path exists in runtime info of the model.
:param path: List of strings which defines a path to runtime info.
:type path: List[str]
:return: `True` if path exists, otherwise `False`.
:rtype: bool
)");
model.def(
"has_rt_info",
[](const ov::Model& self, const py::str& path) -> bool {
return self.has_rt_info(path.cast<std::string>());
},
py::arg("path"),
R"(
Checks if given path exists in runtime info of the model.
:param path: List of strings which defines a path to runtime info.
:type path: str
:return: `True` if path exists, otherwise `False`.
:rtype: bool
)");
model.def(
"set_rt_info",
[](ov::Model& self, const py::object& obj, const py::list& path) -> void {
std::vector<std::string> cpp_args(path.size());
for (size_t i = 0; i < path.size(); i++) {
cpp_args[i] = path[i].cast<std::string>();
}
self.set_rt_info<ov::Any>(py_object_to_any(obj), cpp_args);
},
py::arg("obj"),
py::arg("path"),
R"(
Add value inside runtime info
:param obj: value for the runtime info
:type obj: py:object
:param path: List of strings which defines a path to runtime info.
:type path: List[str]
)");
model.def(
"set_rt_info",
[](ov::Model& self, const py::object& obj, const py::str& path) -> void {
self.set_rt_info<ov::Any>(py_object_to_any(obj), path.cast<std::string>());
},
py::arg("obj"),
py::arg("path"),
R"(
Add value inside runtime info
:param obj: value for the runtime info
:type obj: Any
:param path: String which defines a path to runtime info.
:type path: str
)");
model.def_property_readonly("inputs", (std::vector<ov::Output<ov::Node>>(ov::Model::*)()) & ov::Model::inputs);
model.def_property_readonly("outputs", (std::vector<ov::Output<ov::Node>>(ov::Model::*)()) & ov::Model::outputs);
model.def_property_readonly("name", &ov::Model::get_name);
model.def_property_readonly("rt_info",
(PyRTMap & (ov::Model::*)()) & ov::Model::get_rt_info,
py::return_value_policy::reference_internal);
model.def_property("friendly_name", &ov::Model::get_friendly_name, &ov::Model::set_friendly_name);
}

View File

@ -2,6 +2,7 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import numpy as np
import pytest
@ -18,6 +19,7 @@ from openvino.runtime import (
Shape,
set_batch,
get_batch,
serialize,
)
from ..test_utils.test_utils import generate_add_model # TODO: reformat into an absolute path
@ -448,3 +450,68 @@ def test_reshape_with_python_types(device):
"expected values as openvino.runtime.PartialShape, str, list or tuple."
in str(e.value)
)
def test_serialize_rt_info():
version = "TestVersion"
config = "TestConfig"
framework_batch = "1"
def check_rt_info(model):
assert model.get_rt_info("MO_version") == version
assert model.get_rt_info(["Runtime_version"]) == version
assert model.get_rt_info(["optimization", "config"]) == config
assert model.get_rt_info(["framework", "batch"]) == framework_batch
assert model.has_rt_info(["test"]) is False
assert model.has_rt_info("optimization") is True
assert model.has_rt_info(["optimization", "test"]) is False
with pytest.raises(RuntimeError):
assert model.get_rt_info(["test"])
with pytest.raises(RuntimeError):
assert model.get_rt_info(["optimization", "test"])
core = Core()
xml_path = "./serialized_model.xml"
bin_path = "./serialized_model.bin"
input_shape = PartialShape([1])
param = ops.parameter(input_shape, dtype=np.float32, name="data")
relu1 = ops.relu(param, name="relu1")
relu1.get_output_tensor(0).set_names({"relu_t1"})
assert "relu_t1" in relu1.get_output_tensor(0).names
relu2 = ops.relu(relu1, name="relu2")
model = Model(relu2, [param], "TestFunction")
assert model is not None
assert model.has_rt_info("MO_version") is False
model.set_rt_info(version, "MO_version")
assert model.has_rt_info("MO_version") is True
assert model.has_rt_info(["Runtime_version"]) is False
model.set_rt_info(version, ["Runtime_version"])
assert model.has_rt_info(["Runtime_version"]) is True
assert model.has_rt_info(["optimization"]) is False
assert model.has_rt_info(["optimization", "config"]) is False
model.set_rt_info(config, ["optimization", "config"])
assert model.has_rt_info(["optimization"]) is True
assert model.has_rt_info(["optimization", "config"]) is True
assert model.has_rt_info(["framework"]) is False
assert model.has_rt_info(["framework", "batch"]) is False
model.set_rt_info(framework_batch, ["framework", "batch"])
assert model.has_rt_info(["framework"]) is True
assert model.has_rt_info(["framework", "batch"]) is True
check_rt_info(model)
serialize(model, xml_path, bin_path)
res_model = core.read_model(model=xml_path, weights=bin_path)
check_rt_info(res_model)
os.remove(xml_path)
os.remove(bin_path)

View File

@ -0,0 +1,30 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "openvino/core/any.hpp"
namespace ov {
/**
* @brief Provide an abstract interface for lazy reading of meta information
*/
class OPENVINO_API Meta {
public:
/**
* @brief Parses and returns meta information by request
*
* @return ov::AnyMap with meta information
*/
virtual operator ov::AnyMap&() = 0;
/**
* @brief Parses and returns meta information by request
*
* @return const ov::AnyMap with meta information
*/
virtual operator const ov::AnyMap&() const = 0;
};
} // namespace ov

View File

@ -12,6 +12,7 @@
#include <string>
#include <vector>
#include "openvino/core/any.hpp"
#include "openvino/core/core_visibility.hpp"
#include "openvino/core/node.hpp"
#include "openvino/core/rtti.hpp"
@ -310,13 +311,131 @@ public:
/// \brief Return a variable by specified variable_id.
ov::op::util::Variable::Ptr get_variable_by_id(const std::string& variable_id) const;
/**
* @brief Returns a runtime info
*
* @return reference to ov::AnyMap with runtime info
*/
RTMap& get_rt_info() {
return m_rt_info;
}
/**
* @brief Returns a constant runtime info
*
* @return reference to const ov::AnyMap with runtime info
*/
const RTMap& get_rt_info() const {
return m_rt_info;
}
/**
* @brief Returns a runtime attribute for the path, throws an ov::Exception if path doesn't exist
*
* @tparam T the type of returned value
* @tparam Args types of variadic arguments
* @param args path to the runtime attribute
*
* @return constant reference to value from runtime info
*/
template <class T, class... Args, typename std::enable_if<!std::is_same<T, ov::Any>::value, bool>::type = true>
const T& get_rt_info(Args... args) const {
const ov::Any& arg = get_rt_arg<Args...>(m_rt_info, args...);
return arg.as<T>();
}
/**
* @brief Returns a runtime attribute for the path, throws an ov::Exception if path doesn't exist
*
* @tparam T the type of returned value
* @tparam Args types of variadic arguments
* @param args path to the runtime attribute
*
* @return constant reference to value from runtime info
*/
template <class T, class... Args, typename std::enable_if<std::is_same<T, ov::Any>::value, bool>::type = true>
const T& get_rt_info(Args... args) const {
const ov::Any& arg = get_rt_arg<Args...>(m_rt_info, args...);
return arg;
}
/**
* @brief Returns a runtime attribute for the path, throws an ov::Exception if path doesn't exist
*
* @tparam T the type of returned value
* @param args vector with path to the runtime attribute
*
* @return constant reference to value from runtime info
*/
template <class T, typename std::enable_if<!std::is_same<T, ov::Any>::value, bool>::type = true>
const T& get_rt_info(const std::vector<std::string>& args) const {
const ov::Any& arg = get_rt_info(m_rt_info, args.cbegin(), args.cend());
return arg.as<T>();
}
/**
* @brief Returns a runtime attribute for the path, throws an ov::Exception if path doesn't exist
*
* @tparam T the type of returned value
* @param args vector with path to the runtime attribute
*
* @return constant reference to value from runtime info
*/
template <class T, typename std::enable_if<std::is_same<T, ov::Any>::value, bool>::type = true>
const T& get_rt_info(const std::vector<std::string>& args) const {
const ov::Any& arg = get_rt_info(m_rt_info, args.cbegin(), args.cend());
return arg;
}
/**
* @brief Checks if given path exists in runtime info
*
* @tparam Args types of variadic arguments
* @param args path to the runtime attribute
*
* @return true if path exists, otherwise false
*/
template <class... Args>
bool has_rt_info(Args... args) const {
return has_rt_arg<Args...>(m_rt_info, args...);
}
/**
* @brief Checks if given path exists in runtime info
*
* @param args vector with path to the runtime attribute
*
* @return true if path exists, otherwise false
*/
bool has_rt_info(const std::vector<std::string>& args) const;
/**
* @brief Add value inside the runtime info
*
* @tparam T type of new value
* @tparam Args types of variadic arguments
* @param argument value for the runtime info
* @param args path to the runtime attribute
*/
template <class T, class... Args>
void set_rt_info(const T& argument, Args... args) {
ov::Any& arg = get_rt_arg<Args...>(m_rt_info, args...);
arg = argument;
}
/**
* @brief Add value inside the runtime info
*
* @tparam T type of new value
* @param argument value for the runtime info
* @param args vector with path to the runtime attribute
*/
template <class T>
void set_rt_info(const T& argument, const std::vector<std::string>& args) {
ov::Any& arg = get_rt_info(m_rt_info, args.cbegin(), args.cend());
arg = argument;
}
Model(const Model&) = delete;
Model(Model&&) = delete;
Model& operator=(const Model&) = delete;
@ -325,6 +444,92 @@ public:
private:
friend class ov::ModelAccessor;
// Allow to get attribute for the vector
ov::Any& get_rt_info(ov::AnyMap& info,
const std::vector<std::string>::const_iterator& begin,
const std::vector<std::string>::const_iterator& end);
// Allow to get constant attribute for the vector
const ov::Any& get_rt_info(const ov::AnyMap& info,
const std::vector<std::string>::const_iterator& begin,
const std::vector<std::string>::const_iterator& end) const;
// Checks rt attribute
template <class T,
typename std::enable_if<std::is_same<std::string, T>::value || std::is_same<T, const char*>::value ||
std::is_same<T, char*>::value,
bool>::type = true>
bool has_rt_arg(const ov::AnyMap& rt_info, const T& name) const {
return rt_info.find(name) != rt_info.end();
}
// Checks rt attribute
template <class T,
class... Args,
typename std::enable_if<std::is_same<std::string, T>::value || std::is_same<T, const char*>::value ||
std::is_same<T, char*>::value,
bool>::type = true>
bool has_rt_arg(const ov::AnyMap& rt_info, const T& name, Args... args) const {
bool has_attr = has_rt_arg(rt_info, name);
if (!has_attr)
return false;
const ov::Any& rt_attr = get_rt_arg<T>(rt_info, name);
const ov::AnyMap& new_map = get_map_from_attr(rt_attr);
return has_rt_arg<Args...>(new_map, args...);
}
// Allow to get constant attribute for variadic arguments
template <class T,
typename std::enable_if<std::is_same<std::string, T>::value || std::is_same<T, const char*>::value ||
std::is_same<T, char*>::value,
bool>::type = true>
const ov::Any& get_rt_arg(const ov::AnyMap& rt_info, const T& name) const {
if (rt_info.find(name) == rt_info.end())
throw ov::Exception("Cannot get runtime attribute. Path to runtime attribute is incorrect.");
return get_attr(rt_info.at(name));
}
// Allow to get constant attribute for variadic arguments
template <class T,
class... Args,
typename std::enable_if<std::is_same<std::string, T>::value || std::is_same<T, const char*>::value ||
std::is_same<T, char*>::value,
bool>::type = true>
const ov::Any& get_rt_arg(const ov::AnyMap& rt_info, const T& name, Args... args) const {
const ov::Any& rt_attr = get_rt_arg<T>(rt_info, name);
const ov::AnyMap& new_map = get_map_from_attr(rt_attr);
return get_rt_arg<Args...>(new_map, args...);
}
// Allow to get attribute for variadic arguments
template <class T,
typename std::enable_if<std::is_same<std::string, T>::value || std::is_same<T, const char*>::value ||
std::is_same<T, char*>::value,
bool>::type = true>
ov::Any& get_rt_arg(ov::AnyMap& rt_info, const T& name) {
return get_attr(rt_info[name]);
}
// Allow to get attribute for variadic arguments
template <class T,
class... Args,
typename std::enable_if<std::is_same<std::string, T>::value || std::is_same<T, const char*>::value ||
std::is_same<T, char*>::value,
bool>::type = true>
ov::Any& get_rt_arg(ov::AnyMap& rt_info, const T& name, Args... args) {
ov::Any& rt_attr = get_rt_arg<T>(rt_info, name);
ov::AnyMap& new_map = get_map_from_attr(rt_attr);
return get_rt_arg<Args...>(new_map, args...);
}
// Returns real ov::Any from argument
const ov::Any& get_attr(const ov::Any& info) const;
ov::Any& get_attr(ov::Any& info) const;
// Returns ov::AnyMap from argument
const ov::AnyMap& get_map_from_attr(const ov::Any& info) const;
ov::AnyMap& get_map_from_attr(ov::Any& info) const;
/// \brief Depending on the options selected,
/// checks all the Parameter/Variables are registered in the list of Model
/// parameters/variables or finds all Parameters/Variables in a model and registers them.
@ -360,7 +565,7 @@ private:
// for internal purposes.
std::shared_ptr<SharedRTInfo> m_shared_rt_info;
mutable std::mutex m_topological_sort_mutex;
mutable std::mutex m_model_mutex;
};
OPENVINO_API

View File

@ -10,6 +10,7 @@
#include "itt.hpp"
#include "layout_utils.hpp"
#include "meta_data.hpp"
#include "ngraph/evaluator.hpp"
#include "ngraph/function.hpp"
#include "ngraph/graph_util.hpp"
@ -267,7 +268,7 @@ void ov::Model::validate_nodes_and_infer_types() const {
std::vector<shared_ptr<ov::Node>> ov::Model::get_ordered_ops() const {
OV_ITT_SCOPED_TASK(ov::itt::domains::core, "Model::get_ordered_ops");
lock_guard<mutex> lock(m_topological_sort_mutex);
lock_guard<mutex> lock(m_model_mutex);
NodeVector nodes;
if (m_shared_rt_info->get_use_topological_cache()) {
@ -975,6 +976,98 @@ std::shared_ptr<ov::Model> ov::Model::clone() const {
return ov::clone_model(*this);
}
bool ov::Model::has_rt_info(const std::vector<std::string>& args) const {
ov::AnyMap info = m_rt_info;
for (size_t i = 0; i < args.size(); i++) {
bool has_attr = has_rt_arg(info, args[i]);
if (!has_attr)
return false;
if (i == args.size() - 1)
break;
const ov::Any& rt_attr = get_rt_arg<std::string>(info, args[i]);
info = get_map_from_attr(rt_attr);
}
return true;
}
ov::Any& ov::Model::get_rt_info(ov::AnyMap& info,
const std::vector<std::string>::const_iterator& begin,
const std::vector<std::string>::const_iterator& end) {
if (begin == end - 1) {
return get_rt_arg(info, *begin);
} else {
ov::Any& rt_attr = get_rt_arg<std::string>(info, *begin);
return get_rt_info(get_map_from_attr(rt_attr), begin + 1, end);
}
}
// Allow to get constant attribute for the vector
const ov::Any& ov::Model::get_rt_info(const ov::AnyMap& info,
const std::vector<std::string>::const_iterator& begin,
const std::vector<std::string>::const_iterator& end) const {
if (begin == end - 1) {
return get_rt_arg(info, *begin);
} else {
const ov::Any& rt_attr = get_rt_arg<std::string>(info, *begin);
return get_rt_info(get_map_from_attr(rt_attr), begin + 1, end);
}
}
const ov::AnyMap& ov::Model::get_map_from_attr(const ov::Any& info) const {
// lock to get meta from different threads in order to avoid thread safety
// implementations of meta information for each frontend
std::lock_guard<mutex> lock(m_model_mutex);
if (info.is<ov::AnyMap>()) {
return info.as<ov::AnyMap>();
} else if (info.is<std::shared_ptr<ov::Meta>>()) {
std::shared_ptr<ov::Meta> meta = info.as<std::shared_ptr<ov::Meta>>();
return *info.as<std::shared_ptr<ov::Meta>>();
}
throw ov::Exception("Cannot get rt attribute. Keys are incorrect.");
}
ov::AnyMap& ov::Model::get_map_from_attr(ov::Any& info) const {
// lock to get meta from different threads in order to avoid thread safety
// implementations of meta information for each frontend
std::lock_guard<mutex> lock(m_model_mutex);
if (info.empty()) {
info = ov::AnyMap();
}
if (info.is<ov::AnyMap>()) {
return info.as<ov::AnyMap>();
} else if (info.is<std::shared_ptr<ov::Meta>>()) {
std::shared_ptr<ov::Meta> meta = info.as<std::shared_ptr<ov::Meta>>();
return *info.as<std::shared_ptr<ov::Meta>>();
}
throw ov::Exception("Cannot get rt attribute. Keys are incorrect.");
}
const ov::Any& ov::Model::get_attr(const ov::Any& info) const {
// lock to get meta from different threads in order to avoid thread safety
// implementations of meta information for each frontend
std::lock_guard<mutex> lock(m_model_mutex);
if (info.is<std::shared_ptr<ov::Meta>>()) {
std::shared_ptr<ov::Meta> meta = info.as<std::shared_ptr<ov::Meta>>();
ov::AnyMap& map = *info.as<std::shared_ptr<ov::Meta>>();
const_cast<ov::Any&>(info) = map;
}
return info;
}
ov::Any& ov::Model::get_attr(ov::Any& info) const {
// lock to get meta from different threads in order to avoid thread safety
// implementations of meta information for each frontend
std::lock_guard<mutex> lock(m_model_mutex);
if (info.empty()) {
info = ov::AnyMap();
}
if (info.is<std::shared_ptr<ov::Meta>>()) {
std::shared_ptr<ov::Meta> meta = info.as<std::shared_ptr<ov::Meta>>();
ov::AnyMap& map = *info.as<std::shared_ptr<ov::Meta>>();
info = map;
}
return info;
}
namespace bs_util {
static int64_t get_batch(const ov::Layout& layout, const ov::PartialShape& shape) {
auto batch_idx = ov::layout::batch_idx(layout);

View File

@ -13,6 +13,7 @@
#include <unordered_map>
#include <unordered_set>
#include "meta_data.hpp"
#include "ngraph/ops.hpp"
#include "ngraph/opsets/opset.hpp"
#include "ngraph/opsets/opset1.hpp"
@ -765,6 +766,25 @@ void auto_pad_resolving(ov::Node* node) {
}
}
void serialize_rt_info(pugi::xml_node& root, const std::string& name, const ov::Any& data) {
auto child = root.append_child(name.c_str());
if (data.is<std::shared_ptr<ov::Meta>>()) {
std::shared_ptr<ov::Meta> meta = data.as<std::shared_ptr<ov::Meta>>();
ov::AnyMap& map = *meta;
for (const auto& it : map) {
serialize_rt_info(child, it.first, it.second);
}
} else if (data.is<ov::AnyMap>()) {
const ov::AnyMap& any_map = data.as<ov::AnyMap>();
for (const auto& it : any_map) {
serialize_rt_info(child, it.first, it.second);
}
} else {
std::string value = data.as<std::string>();
child.append_attribute("value").set_value(value.c_str());
}
}
void ngfunction_2_ir(pugi::xml_node& netXml,
const ngraph::Function& f,
const std::map<std::string, ngraph::OpSet>& custom_opsets,
@ -955,6 +975,15 @@ void ngfunction_2_ir(pugi::xml_node& netXml,
edge.append_attribute("to-layer").set_value(e.to_layer);
edge.append_attribute("to-port").set_value(e.to_port);
}
// Serialize rt info
pugi::xml_node rt_info_node = netXml.append_child("rt_info");
for (const auto& it : f.get_rt_info()) {
// Skip IR version
if (it.first == "version")
continue;
serialize_rt_info(rt_info_node, it.first, it.second);
}
}
std::string valid_xml_path(const std::string& path) {

View File

@ -10,6 +10,7 @@
#include <test_common.hpp>
#include "common_test_utils/graph_comparator.hpp"
#include "openvino/core/except.hpp"
#include "openvino/core/partial_shape.hpp"
#include "openvino/opsets/opset8.hpp"
@ -1932,3 +1933,42 @@ TEST(model, clone_model) {
const auto res = fc.compare(model, cloned_model);
EXPECT_TRUE(res.valid) << res.message;
}
TEST(model, set_meta_information) {
auto arg0 = std::make_shared<ov::opset8::Parameter>(ov::element::f32, ov::PartialShape{1});
arg0->set_friendly_name("data");
arg0->get_output_tensor(0).set_names({"input"});
auto relu = std::make_shared<ov::opset8::Relu>(arg0);
relu->set_friendly_name("relu");
relu->get_output_tensor(0).set_names({"relu_t", "identity"});
auto f = std::make_shared<ov::Model>(relu, ov::ParameterVector{arg0});
std::string key = "data";
EXPECT_FALSE(f->has_rt_info(key, "test"));
EXPECT_THROW(f->get_rt_info<std::string>(key, "test"), ov::Exception);
EXPECT_FALSE(f->has_rt_info(key, "test1"));
EXPECT_THROW(f->get_rt_info<std::string>(key, "test1"), ov::Exception);
EXPECT_FALSE(f->has_rt_info({key, "test1"}));
EXPECT_THROW(f->get_rt_info<std::string>({key, "test1"}), ov::Exception);
f->set_rt_info("test_value", key, "test");
f->set_rt_info("1", {key, "test1"});
EXPECT_TRUE(f->has_rt_info(key, "test"));
EXPECT_NO_THROW(f->get_rt_info<std::string>(key, "test"));
EXPECT_EQ(f->get_rt_info<std::string>(key, "test"), "test_value");
EXPECT_THROW(f->get_rt_info<int>(key, "test"), ov::Exception);
EXPECT_TRUE(f->has_rt_info(key, "test1"));
EXPECT_NO_THROW(f->get_rt_info<std::string>(key, "test1"));
EXPECT_EQ(f->get_rt_info<std::string>(key, "test1"), "1");
EXPECT_EQ(f->get_rt_info<int>(key, "test1"), 1);
EXPECT_TRUE(f->has_rt_info({key, "test1"}));
EXPECT_NO_THROW(f->get_rt_info<std::string>({key, "test1"}));
EXPECT_EQ(f->get_rt_info<std::string>({key, "test1"}), "1");
EXPECT_EQ(f->get_rt_info<int>({key, "test1"}), 1);
}

View File

@ -106,3 +106,303 @@ INSTANTIATE_TEST_SUITE_P(ONNXSerialization,
std::make_tuple("add_abc_initializers.onnx", "")));
#endif
class MetaDataSerialize : public ov::test::TestsCommon {
public:
std::string ir_with_meta = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
<meta_data>
<MO_version value="TestVersion"/>
<Runtime_version value="TestVersion"/>
<cli_parameters>
<input_shape value="[1, 3, 22, 22]"/>
<transform value=""/>
<use_new_frontend value="False"/>
</cli_parameters>
</meta_data>
<framework_meta>
<batch value="1"/>
<chunk_size value="16"/>
</framework_meta>
<quantization_parameters>
<config>{
'compression': {
'algorithms': [
{
'name': 'DefaultQuantization',
'params': {
'num_samples_for_tuning': 2000,
'preset': 'performance',
'stat_subset_size': 300,
'use_layerwise_tuning': false
}
}
],
'dump_intermediate_model': true,
'target_device': 'ANY'
},
'engine': {
'models': [
{
'name': 'bert-small-uncased-whole-word-masking-squad-0001',
'launchers': [
{
'framework': 'openvino',
'adapter': {
'type': 'bert_question_answering',
'start_token_logits_output': 'output_s',
'end_token_logits_output': 'output_e'
},
'inputs': [
{
'name': 'input_ids',
'type': 'INPUT',
'value': 'input_ids'
},
{
'name': 'attention_mask',
'type': 'INPUT',
'value': 'input_mask'
},
{
'name': 'token_type_ids',
'type': 'INPUT',
'value': 'segment_ids'
}
],
'device': 'cpu'
}
],
'datasets': [
{
'name': 'squad_v1_1_msl384_mql64_ds128_lowercase',
'annotation_conversion': {
'converter': 'squad',
'testing_file': 'PATH',
'max_seq_length': 384,
'max_query_length': 64,
'doc_stride': 128,
'lower_case': true,
'vocab_file': 'PATH'
},
'reader': {
'type': 'annotation_features_extractor',
'features': [
'input_ids',
'input_mask',
'segment_ids'
]
},
'postprocessing': [
{
'type': 'extract_answers_tokens',
'max_answer': 30,
'n_best_size': 20
}
],
'metrics': [
{
'name': 'F1',
'type': 'f1',
'reference': 0.9157
},
{
'name': 'EM',
'type': 'exact_match',
'reference': 0.8504
}
],
'_command_line_mapping': {
'testing_file': 'PATH',
'vocab_file': [
'PATH'
]
}
}
]
}
],
'stat_requests_number': null,
'eval_requests_number': null,
'type': 'accuracy_checker'
}
}</config>
<version value="invalid version"/>
<cli_params value="{'quantize': None, 'preset': None, 'model': None, 'weights': None, 'name': None, 'engine': None, 'ac_config': None, 'max_drop': None, 'evaluate': False, 'output_dir': 'PATH', 'direct_dump': True, 'log_level': 'INFO', 'pbar': False, 'stream_output': False, 'keep_uncompressed_weights': False, 'data_source': None}"/>
</quantization_parameters>
</net>
)V0G0N";
std::string m_out_xml_path;
std::string m_out_bin_path;
void SetUp() override {
const std::string test_name = GetTestName() + "_" + GetTimestamp();
m_out_xml_path = test_name + ".xml";
m_out_bin_path = test_name + ".bin";
}
void check_meta_info(const std::shared_ptr<ov::Model>& model) {
auto& rt_info = model->get_rt_info();
const std::string pot_conf_ref =
"{ 'compression': { 'algorithms': [ { 'name': 'DefaultQuantization', 'params': { 'num_samples_for_tuning': "
"2000, 'preset': 'performance', 'stat_subset_size': 300, 'use_layerwise_tuning': false } } ], "
"'dump_intermediate_model': true, 'target_device': 'ANY' }, 'engine': { 'models': [ { 'name': "
"'bert-small-uncased-whole-word-masking-squad-0001', 'launchers': [ { 'framework': 'openvino', 'adapter': "
"{ 'type': 'bert_question_answering', 'start_token_logits_output': 'output_s', 'end_token_logits_output': "
"'output_e' }, 'inputs': [ { 'name': 'input_ids', 'type': 'INPUT', 'value': 'input_ids' }, { 'name': "
"'attention_mask', 'type': 'INPUT', 'value': 'input_mask' }, { 'name': 'token_type_ids', 'type': 'INPUT', "
"'value': 'segment_ids' } ], 'device': 'cpu' } ], 'datasets': [ { 'name': "
"'squad_v1_1_msl384_mql64_ds128_lowercase', 'annotation_conversion': { 'converter': 'squad', "
"'testing_file': 'PATH', 'max_seq_length': 384, 'max_query_length': 64, 'doc_stride': 128, 'lower_case': "
"true, 'vocab_file': 'PATH' }, 'reader': { 'type': 'annotation_features_extractor', 'features': [ "
"'input_ids', 'input_mask', 'segment_ids' ] }, 'postprocessing': [ { 'type': 'extract_answers_tokens', "
"'max_answer': 30, 'n_best_size': 20 } ], 'metrics': [ { 'name': 'F1', 'type': 'f1', 'reference': 0.9157 "
"}, { 'name': 'EM', 'type': 'exact_match', 'reference': 0.8504 } ], '_command_line_mapping': { "
"'testing_file': 'PATH', 'vocab_file': [ 'PATH' ] } } ] } ], 'stat_requests_number': null, "
"'eval_requests_number': null, 'type': 'accuracy_checker' } }";
ASSERT_TRUE(!rt_info.empty());
std::string version;
EXPECT_NO_THROW(version = model->get_rt_info<std::string>("MO_version"));
EXPECT_EQ(version, "TestVersion");
EXPECT_NO_THROW(version = model->get_rt_info<std::string>("Runtime_version"));
EXPECT_EQ(version, "TestVersion");
std::string pot_config;
EXPECT_NO_THROW(pot_config = model->get_rt_info<std::string>("optimization", "config"));
EXPECT_EQ(pot_config, pot_conf_ref);
ov::AnyMap cli_map;
EXPECT_NO_THROW(cli_map = model->get_rt_info<ov::AnyMap>("conversion_parameters"));
auto it = cli_map.find("input_shape");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "[1, 3, 22, 22]");
it = cli_map.find("transform");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "");
it = cli_map.find("use_new_frontend");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "False");
}
void TearDown() override {
std::remove(m_out_xml_path.c_str());
std::remove(m_out_bin_path.c_str());
}
};
TEST_F(MetaDataSerialize, get_meta_serialized_without_init) {
auto model = ov::test::readModel(ir_with_meta);
{
auto& rt_info = model->get_rt_info();
ASSERT_FALSE(rt_info.empty());
}
// Serialize the model
ov::serialize(model, m_out_xml_path, m_out_bin_path);
auto s_model = ov::test::readModel(m_out_xml_path, m_out_bin_path);
{
auto& rt_info = s_model->get_rt_info();
ASSERT_FALSE(rt_info.empty());
check_meta_info(s_model);
}
}
TEST_F(MetaDataSerialize, get_meta_serialized_with_init) {
auto model = ov::test::readModel(ir_with_meta);
{
auto& rt_info = model->get_rt_info();
ASSERT_FALSE(rt_info.empty());
check_meta_info(model);
}
// Serialize the model
ov::serialize(model, m_out_xml_path, m_out_bin_path);
auto s_model = ov::test::readModel(m_out_xml_path, m_out_bin_path);
{
auto& rt_info = s_model->get_rt_info();
ASSERT_FALSE(rt_info.empty());
check_meta_info(s_model);
}
}
TEST_F(MetaDataSerialize, get_meta_serialized_changed_meta) {
auto model = ov::test::readModel(ir_with_meta);
{
auto& rt_info = model->get_rt_info();
ASSERT_FALSE(rt_info.empty());
check_meta_info(model);
// Add new property to meta information
model->set_rt_info("my_value", "meta_data", "my_property");
}
// Serialize the model
ov::serialize(model, m_out_xml_path, m_out_bin_path);
auto s_model = ov::test::readModel(m_out_xml_path, m_out_bin_path);
{
std::string prop;
EXPECT_NO_THROW(prop = model->get_rt_info<std::string>("meta_data", "my_property"));
EXPECT_EQ(prop, "my_value");
auto& rt_info = s_model->get_rt_info();
ASSERT_NE(rt_info.find("meta_data"), rt_info.end());
check_meta_info(s_model);
}
}

View File

@ -7,4 +7,3 @@ add_subdirectory(src)
if(ENABLE_TESTS)
add_subdirectory(tests)
endif()

View File

@ -5,10 +5,13 @@
#include "ir_deserializer.hpp"
#include <pugixml.hpp>
#include <regex>
#include "ie_ngraph_utils.hpp"
#include "meta_data.hpp"
#include "ngraph/op/util/framework_node.hpp"
#include "ngraph/opsets/opset1.hpp"
#include "openvino/core/except.hpp"
#include "rt_info_deserializer.hpp"
#include "transformations/rt_info/attributes.hpp"
#include "utils.hpp"
@ -541,9 +544,137 @@ std::shared_ptr<ngraph::Function> XmlDeserializer::parse_function(
}
}
// Read meta data from legacy representation
if (root.child("rt_info").empty()) {
// Legacy representation
// meta_data - MO meta
// quantization_parameters - NNCF quantization section
std::unordered_set<std::string> meta_names = {"meta_data", "quantization_parameters"};
read_legacy_meta_data(function, meta_names, root);
} else {
read_meta_data(function, root.child("rt_info"));
}
return function;
}
class MetaDataParser : public ov::Meta {
public:
MetaDataParser(const std::string& name, const pugi::xml_node& meta) : m_name(name) {
m_meta.append_copy(meta);
}
operator const ov::AnyMap&() const override {
parse();
return m_parsed_map;
}
operator ov::AnyMap&() override {
parse();
return m_parsed_map;
}
private:
bool has_attr(const pugi::xml_node& node, const std::string& name = "value") const {
auto attr = node.attribute(name.c_str());
return !attr.empty();
}
ov::Any parse_value(const pugi::xml_node& node) const {
if (has_attr(node)) {
return XMLParseUtils::GetStrAttr(node, "value");
} else if (std::string(node.name()) == "unset" && has_attr(node, "unset_cli_parameters")) {
return XMLParseUtils::GetStrAttr(node, "unset_cli_parameters");
} else {
return parse_node(node);
}
}
ov::AnyMap parse_node(const pugi::xml_node& node) const {
ov::AnyMap result;
const std::string node_name = node.name();
for (const auto& data : node.children()) {
const std::string data_name = data.name();
// WA for legacy POT config
if (data_name == "config" && node_name == "quantization_parameters") {
// Read legacy pot config
std::stringstream stream;
data.print(stream);
std::string str_config = stream.str();
str_config = std::regex_replace(str_config, std::regex("<config>"), "");
str_config = std::regex_replace(str_config, std::regex("</config>"), "");
str_config = std::regex_replace(str_config, std::regex("\n"), "");
str_config = std::regex_replace(str_config, std::regex("( +)"), " ");
result[data_name] = str_config;
} else {
result[data_name] = parse_value(data);
}
}
return result;
}
void parse() const {
// Thread safety is implemented on ov::Model level
if (m_parsed)
return;
const pugi::xml_node& node = m_meta.child(m_name.c_str());
m_parsed_map = parse_node(node);
m_parsed = true;
}
pugi::xml_document m_meta;
const std::string m_name;
mutable ov::AnyMap m_parsed_map;
mutable bool m_parsed{false};
};
void XmlDeserializer::read_meta_data(const std::shared_ptr<ov::Model>& model, const pugi::xml_node& meta_section) {
if (meta_section.empty())
return;
auto& rt_info = model->get_rt_info();
for (const auto& data : meta_section.children()) {
if (data.empty())
continue;
if (!data.attribute("value").empty()) {
rt_info[data.name()] = XMLParseUtils::GetStrAttr(data, "value");
} else {
// Use meta data for set of parameters
std::shared_ptr<ov::Meta> meta = std::make_shared<MetaDataParser>(data.name(), data);
rt_info[data.name()] = meta;
}
}
}
void XmlDeserializer::read_legacy_meta_data(const std::shared_ptr<ov::Model>& model,
const std::unordered_set<std::string>& names,
const pugi::xml_node& root_section) {
const auto& read_meta = [](const std::shared_ptr<ov::Model>& model,
const std::string& name,
const pugi::xml_node& meta_section) {
auto& rt_info = model->get_rt_info();
if (name == "meta_data") {
for (const auto& data : meta_section.children()) {
const std::string& section_name = data.name();
// Rename cli_parameters to conversion_parameters
if (section_name == "cli_parameters") {
std::shared_ptr<ov::Meta> meta = std::make_shared<MetaDataParser>("cli_parameters", data);
rt_info["conversion_parameters"] = meta;
} else if (!data.attribute("value").empty()) {
rt_info[data.name()] = XMLParseUtils::GetStrAttr(data, "value");
} else {
throw ov::Exception(std::string("Unsupported legacy argument: ") + data.name());
}
}
} else if (name == "quantization_parameters") {
// Rename quantization_parameters to optimization
std::shared_ptr<ov::Meta> meta = std::make_shared<MetaDataParser>("quantization_parameters", meta_section);
rt_info["optimization"] = meta;
}
};
for (const auto& it : names)
read_meta(model, it, root_section.child(it.c_str()));
}
GenericLayerParams XmlDeserializer::parseGenericParams(const pugi::xml_node& node) {
const auto parsePort = [this](const pugi::xml_node& parentNode,
const GenericLayerParams& params,
@ -742,6 +873,8 @@ std::shared_ptr<ngraph::Node> XmlDeserializer::createNode(
return;
for (const auto& item : rt_attrs) {
std::string attribute_name, attribute_version;
// For view:
// <attribute name="old_api_map_order" version="0" value="0,3,1,2"/>
if (!getStrAttribute(item, "name", attribute_name)) {
std::stringstream ss;
item.print(ss);

View File

@ -180,6 +180,12 @@ private:
const std::shared_ptr<ngraph::runtime::AlignedBuffer>& weights,
const GenericLayerParams& params);
void read_meta_data(const std::shared_ptr<ov::Model>& model, const pugi::xml_node& meta_section);
void read_legacy_meta_data(const std::shared_ptr<ov::Model>& model,
const std::unordered_set<std::string>& names,
const pugi::xml_node& root_section);
// -- DATA --
const pugi::xml_node m_node;
const std::shared_ptr<ngraph::runtime::AlignedBuffer>& m_weights;

View File

@ -0,0 +1,414 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <string>
#include "file_utils.h"
#include "openvino/core/any.hpp"
#include "openvino/openvino.hpp"
class MetaData : public ::testing::Test {
public:
ov::Core core;
std::string ir_with_meta = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
<meta_data>
<MO_version value="TestVersion"/>
<Runtime_version value="TestVersion"/>
<cli_parameters>
<input_shape value="[1, 3, 22, 22]"/>
<transform value=""/>
<use_new_frontend value="False"/>
</cli_parameters>
</meta_data>
<quantization_parameters>
<config>{
'compression': {
'algorithms': [
{
'name': 'DefaultQuantization',
'params': {
'num_samples_for_tuning': 2000,
'preset': 'performance',
'stat_subset_size': 300,
'use_layerwise_tuning': false
}
}
],
'dump_intermediate_model': true,
'target_device': 'ANY'
},
'engine': {
'models': [
{
'name': 'bert-small-uncased-whole-word-masking-squad-0001',
'launchers': [
{
'framework': 'openvino',
'adapter': {
'type': 'bert_question_answering',
'start_token_logits_output': 'output_s',
'end_token_logits_output': 'output_e'
},
'inputs': [
{
'name': 'input_ids',
'type': 'INPUT',
'value': 'input_ids'
},
{
'name': 'attention_mask',
'type': 'INPUT',
'value': 'input_mask'
},
{
'name': 'token_type_ids',
'type': 'INPUT',
'value': 'segment_ids'
}
],
'device': 'cpu'
}
],
'datasets': [
{
'name': 'squad_v1_1_msl384_mql64_ds128_lowercase',
'annotation_conversion': {
'converter': 'squad',
'testing_file': 'PATH',
'max_seq_length': 384,
'max_query_length': 64,
'doc_stride': 128,
'lower_case': true,
'vocab_file': 'PATH'
},
'reader': {
'type': 'annotation_features_extractor',
'features': [
'input_ids',
'input_mask',
'segment_ids'
]
},
'postprocessing': [
{
'type': 'extract_answers_tokens',
'max_answer': 30,
'n_best_size': 20
}
],
'metrics': [
{
'name': 'F1',
'type': 'f1',
'reference': 0.9157
},
{
'name': 'EM',
'type': 'exact_match',
'reference': 0.8504
}
],
'_command_line_mapping': {
'testing_file': 'PATH',
'vocab_file': [
'PATH'
]
}
}
]
}
],
'stat_requests_number': null,
'eval_requests_number': null,
'type': 'accuracy_checker'
}
}</config>
<version value="invalid version"/>
<cli_params value="{'quantize': None, 'preset': None, 'model': None, 'weights': None, 'name': None, 'engine': None, 'ac_config': None, 'max_drop': None, 'evaluate': False, 'output_dir': 'PATH', 'direct_dump': True, 'log_level': 'INFO', 'pbar': False, 'stream_output': False, 'keep_uncompressed_weights': False, 'data_source': None}"/>
</quantization_parameters>
</net>
)V0G0N";
std::string ir_with_new_meta = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
<rt_info>
<MO_version value="TestVersion" />
<Runtime_version value="TestVersion" />
<conversion_parameters>
<input_shape value="[1, 3, 22, 22]" />
<transform value="" />
<use_new_frontend value="False" />
</conversion_parameters>
<optimization>
<cli_params value="{'quantize': None, 'preset': None, 'model': None, 'weights': None, 'name': None, 'engine': None, 'ac_config': None, 'max_drop': None, 'evaluate': False, 'output_dir': 'PATH', 'direct_dump': True, 'log_level': 'INFO', 'pbar': False, 'stream_output': False, 'keep_uncompressed_weights': False, 'data_source': None}" />
<config value="{ 'compression': { 'algorithms': [ { 'name': 'DefaultQuantization', 'params': { 'num_samples_for_tuning': 2000, 'preset': 'performance', 'stat_subset_size': 300, 'use_layerwise_tuning': false } } ], 'dump_intermediate_model': true, 'target_device': 'ANY' }, 'engine': { 'models': [ { 'name': 'bert-small-uncased-whole-word-masking-squad-0001', 'launchers': [ { 'framework': 'openvino', 'adapter': { 'type': 'bert_question_answering', 'start_token_logits_output': 'output_s', 'end_token_logits_output': 'output_e' }, 'inputs': [ { 'name': 'input_ids', 'type': 'INPUT', 'value': 'input_ids' }, { 'name': 'attention_mask', 'type': 'INPUT', 'value': 'input_mask' }, { 'name': 'token_type_ids', 'type': 'INPUT', 'value': 'segment_ids' } ], 'device': 'cpu' } ], 'datasets': [ { 'name': 'squad_v1_1_msl384_mql64_ds128_lowercase', 'annotation_conversion': { 'converter': 'squad', 'testing_file': 'PATH', 'max_seq_length': 384, 'max_query_length': 64, 'doc_stride': 128, 'lower_case': true, 'vocab_file': 'PATH' }, 'reader': { 'type': 'annotation_features_extractor', 'features': [ 'input_ids', 'input_mask', 'segment_ids' ] }, 'postprocessing': [ { 'type': 'extract_answers_tokens', 'max_answer': 30, 'n_best_size': 20 } ], 'metrics': [ { 'name': 'F1', 'type': 'f1', 'reference': 0.9157 }, { 'name': 'EM', 'type': 'exact_match', 'reference': 0.8504 } ], '_command_line_mapping': { 'testing_file': 'PATH', 'vocab_file': [ 'PATH' ] } } ] } ], 'stat_requests_number': null, 'eval_requests_number': null, 'type': 'accuracy_checker' } }" />
<version value="invalid version" />
</optimization>
<framework>
<batch value="1"/>
<chunk_size value="16"/>
</framework>
</rt_info>
</net>
)V0G0N";
std::string ir_without_meta = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
</net>
)V0G0N";
void SetUp() override {}
void check_rt_info(const std::shared_ptr<ov::Model>& model, bool with_framework = false) {
const std::string pot_conf_ref =
"{ 'compression': { 'algorithms': [ { 'name': 'DefaultQuantization', 'params': { 'num_samples_for_tuning': "
"2000, 'preset': 'performance', 'stat_subset_size': 300, 'use_layerwise_tuning': false } } ], "
"'dump_intermediate_model': true, 'target_device': 'ANY' }, 'engine': { 'models': [ { 'name': "
"'bert-small-uncased-whole-word-masking-squad-0001', 'launchers': [ { 'framework': 'openvino', 'adapter': "
"{ 'type': 'bert_question_answering', 'start_token_logits_output': 'output_s', 'end_token_logits_output': "
"'output_e' }, 'inputs': [ { 'name': 'input_ids', 'type': 'INPUT', 'value': 'input_ids' }, { 'name': "
"'attention_mask', 'type': 'INPUT', 'value': 'input_mask' }, { 'name': 'token_type_ids', 'type': 'INPUT', "
"'value': 'segment_ids' } ], 'device': 'cpu' } ], 'datasets': [ { 'name': "
"'squad_v1_1_msl384_mql64_ds128_lowercase', 'annotation_conversion': { 'converter': 'squad', "
"'testing_file': 'PATH', 'max_seq_length': 384, 'max_query_length': 64, 'doc_stride': 128, 'lower_case': "
"true, 'vocab_file': 'PATH' }, 'reader': { 'type': 'annotation_features_extractor', 'features': [ "
"'input_ids', 'input_mask', 'segment_ids' ] }, 'postprocessing': [ { 'type': 'extract_answers_tokens', "
"'max_answer': 30, 'n_best_size': 20 } ], 'metrics': [ { 'name': 'F1', 'type': 'f1', 'reference': 0.9157 "
"}, { 'name': 'EM', 'type': 'exact_match', 'reference': 0.8504 } ], '_command_line_mapping': { "
"'testing_file': 'PATH', 'vocab_file': [ 'PATH' ] } } ] } ], 'stat_requests_number': null, "
"'eval_requests_number': null, 'type': 'accuracy_checker' } }";
auto& rt_info = model->get_rt_info();
ASSERT_TRUE(!rt_info.empty());
std::string value;
EXPECT_NO_THROW(value = model->get_rt_info<std::string>("MO_version"));
EXPECT_EQ(value, "TestVersion");
value = "";
EXPECT_NO_THROW(value = model->get_rt_info<std::string>("Runtime_version"));
EXPECT_EQ(value, "TestVersion");
ov::AnyMap cli_map;
EXPECT_NO_THROW(cli_map = model->get_rt_info<ov::AnyMap>("conversion_parameters"));
auto it = cli_map.find("input_shape");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "[1, 3, 22, 22]");
it = cli_map.find("transform");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "");
it = cli_map.find("use_new_frontend");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "False");
EXPECT_NO_THROW(value = model->get_rt_info<std::string>("optimization", "config"));
EXPECT_EQ(value, pot_conf_ref);
if (with_framework) {
EXPECT_NO_THROW(value = model->get_rt_info<std::string>("framework", "batch"));
EXPECT_EQ(value, "1");
EXPECT_NO_THROW(value = model->get_rt_info<std::string>("framework", "chunk_size"));
EXPECT_EQ(value, "16");
}
}
};
TEST_F(MetaData, get_meta_data_from_model_without_info) {
auto model = core.read_model(ir_without_meta, ov::Tensor());
auto& rt_info = model->get_rt_info();
ASSERT_EQ(rt_info.find("meta_data"), rt_info.end());
}
TEST_F(MetaData, get_meta_data_as_map_from_model_without_info) {
auto model = core.read_model(ir_without_meta, ov::Tensor());
auto& rt_info = model->get_rt_info();
auto it = rt_info.find("meta_data");
EXPECT_EQ(it, rt_info.end());
it = rt_info.find("MO_version");
EXPECT_EQ(it, rt_info.end());
ov::AnyMap meta;
ASSERT_THROW(meta = model->get_rt_info<ov::AnyMap>("meta_data"), ov::Exception);
ASSERT_TRUE(meta.empty());
}
TEST_F(MetaData, get_meta_data) {
auto model = core.read_model(ir_with_meta, ov::Tensor());
auto& rt_info = model->get_rt_info();
ASSERT_NE(rt_info.find("MO_version"), rt_info.end());
ASSERT_NE(rt_info.find("Runtime_version"), rt_info.end());
ASSERT_NE(rt_info.find("conversion_parameters"), rt_info.end());
ASSERT_NE(rt_info.find("optimization"), rt_info.end());
}
TEST_F(MetaData, get_meta_data_as_map) {
auto model = core.read_model(ir_with_meta, ov::Tensor());
check_rt_info(model);
}
TEST_F(MetaData, get_meta_data_from_removed_file) {
std::string file_path =
InferenceEngine::getIELibraryPath() + ov::util::FileTraits<char>::file_separator + "test_model.xml";
// Create file
{
std::ofstream ir(file_path);
ir << ir_with_meta;
}
auto model = core.read_model(file_path);
// Remove file (meta section wasn't read)
std::remove(file_path.c_str());
check_rt_info(model);
}
TEST_F(MetaData, get_meta_data_as_map_from_new_format) {
auto model = core.read_model(ir_with_new_meta, ov::Tensor());
check_rt_info(model, true);
}

View File

@ -1176,4 +1176,4 @@ TEST_F(RTInfoDeserialization, v11_to_v10_without_rt_info) {
ASSERT_NE(nullptr, f_10);
check_version(f_10, 10);
}
}

View File

@ -0,0 +1,147 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gtest/gtest.h>
#include <string>
#include <thread>
#include "openvino/openvino.hpp"
class IRFRThreadingTests : public ::testing::Test {
public:
void run_parallel(std::function<void(void)> func,
const unsigned int iterations = 100,
const unsigned int threadsNum = 8) {
std::vector<std::thread> threads(threadsNum);
for (auto& thread : threads) {
thread = std::thread([&]() {
for (unsigned int i = 0; i < iterations; ++i) {
func();
}
});
}
for (auto& thread : threads) {
if (thread.joinable())
thread.join();
}
}
ov::Core core;
std::string ir_with_meta = R"V0G0N(
<net name="Network" version="11">
<layers>
<layer name="in1" type="Parameter" id="0" version="opset1">
<data element_type="f32" shape="1,3,22,22"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="activation" id="1" type="ReLU" version="opset1">
<input>
<port id="1" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</output>
</layer>
<layer name="output" type="Result" id="2" version="opset1">
<input>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>3</dim>
<dim>22</dim>
<dim>22</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
<edge from-layer="0" from-port="0" to-layer="1" to-port="1"/>
</edges>
<meta_data>
<MO_version value="TestVersion"/>
<Runtime_version value="TestVersion"/>
<cli_parameters>
<input_shape value="[1, 3, 22, 22]"/>
<transform value=""/>
<use_new_frontend value="False"/>
<cli_parameters>
<input_shape value="[1, 3, 22, 22]"/>
<transform value=""/>
<use_new_frontend value="False"/>
<MO_version value="TestVersion"/>
<Runtime_version value="TestVersion"/>
<cli_parameters>
<input_shape value="[1, 3, 22, 22]"/>
<transform value=""/>
<use_new_frontend value="False"/>
<cli_parameters>
<input_shape value="[1, 3, 22, 22]"/>
<transform value=""/>
<use_new_frontend value="False"/>
</cli_parameters>
</cli_parameters>
</cli_parameters>
</cli_parameters>
</meta_data>
</net>
)V0G0N";
void SetUp() override {}
};
TEST_F(IRFRThreadingTests, get_meta_data_in_different_threads) {
auto model = core.read_model(ir_with_meta, ov::Tensor());
run_parallel([&]() {
auto& rt_info = model->get_rt_info();
auto it = rt_info.find("MO_version");
ASSERT_NE(it, rt_info.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "TestVersion");
it = rt_info.find("Runtime_version");
ASSERT_NE(it, rt_info.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "TestVersion");
ov::AnyMap cli_map;
EXPECT_NO_THROW(cli_map = model->get_rt_info<ov::AnyMap>("conversion_parameters"));
it = cli_map.find("input_shape");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "[1, 3, 22, 22]");
it = cli_map.find("transform");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "");
it = cli_map.find("use_new_frontend");
ASSERT_NE(it, cli_map.end());
EXPECT_TRUE(it->second.is<std::string>());
EXPECT_EQ(it->second.as<std::string>(), "False");
});
}

View File

@ -149,9 +149,8 @@ std::shared_ptr<IExecutableNetworkInternal> IInferencePlugin::LoadNetwork(
function->get_rt_info() = orig_function->get_rt_info();
}
if (function && !IsNewAPI()) {
auto& rt_info = function->get_rt_info();
if (rt_info.find("version") == rt_info.end()) {
rt_info["version"] = int64_t(10);
if (!function->has_rt_info("version")) {
function->set_rt_info(int64_t(10), "version");
// re-create `network` with new patched `function`
using namespace InferenceEngine;
@ -422,10 +421,8 @@ void SetExeNetworkInfo(const std::shared_ptr<IExecutableNetworkInternal>& exeNet
std::unordered_set<std::string> leaf_names;
bool add_operation_names = false;
const auto& rt_info = function->get_rt_info();
const auto it = rt_info.find("version");
if (it != rt_info.end()) {
const int64_t ir_version = it->second.as<int64_t>();
if (function->has_rt_info("version")) {
const int64_t ir_version = function->get_rt_info<int64_t>("version");
// here we decide whether we need to add operation_names as tensor names for
// getInputs / getOutputs. Since these functions are designed to be used in new API only
// always need to add operation names for IR v10

View File

@ -255,15 +255,14 @@ CNNNetwork convert_to_cnnnetwork(std::shared_ptr<ngraph::Function>& function,
bool newAPI,
bool frontendMode = false) {
auto& rt_info = function->get_rt_info();
const auto it = rt_info.find("version");
const bool is_ir = it != rt_info.end();
const bool is_ir = function->has_rt_info("version");
// only for IR cases we need preprocessing or postprocessing steps
if (is_ir) {
using namespace ov::preprocess;
PrePostProcessor prepost(function);
const int64_t ir_version = it->second.as<int64_t>();
const int64_t ir_version = function->get_rt_info<int64_t>("version");
if (ir_version == 10 && newAPI) {
std::unordered_map<std::string, std::shared_ptr<ov::descriptor::Tensor>> leaf_names;