Add nGraph function serialization. (#2579)

* Add nGraph function serialization.

* Turn of execption throwing on failed visitation.

* CNNNetworkNgrapImpl serialize also support fallback to v7 serialization.

* Add error message for legacy IR not implemented case.

* Store tests models in files.

* Add tests with multiple layers.

* Style aligned to IE rules.

* Add visit_attributes to ExecutionNode.

* Layer version XML atribute implementation.

* Fix opset atribute creation for ExecutionGraph. Refactoring.

* Add missing header.

* Move opset collecting to private scope.

* Add missing header.

* Add test wit multiple oututs. Fix found issues: constant name, result
outputs.

* Move serialization to transformation library.

* Add versioning to serialization transformation.

* Add functional tests with ONNX importer path.

* Add nodes unique name checking and correction.

* Add checks for unsuported cases: dynamic shapes & GenericIE node

* General refactoring.

* Add comment describing type name translations.

* Add serialization deterministicity tests.

It's needed to ensure that subsequent calls to serialize() on the same function are giving the same results.

* Serialization in CNNNetworkNGraphImpl::serialize executed via pass::Manager.

Co-authored-by: Gleb Kazantaev <gleb.nnstu@gmail.com>

* NGRAPH_CHECK messages refactored.

* Performance and const correctness refactoring.

* Style formatting applied.

* Code simplifaction.

* Serialize transformation documentation refactoring.

* Changed compare_function() to throw on functions with multiple outputs.

Before this check was implemented with assert which means it was working
only in debug builds. Now it is working also in release build.

* Adjust tests to new compare_functions() behaviour.

* Replace cmakes add_definitions with more modern target_compile_definitions

Co-authored-by: Gleb Kazantaev <gleb.nnstu@gmail.com>
This commit is contained in:
Jozef Daniecki 2020-10-27 04:57:48 +01:00 committed by GitHub
parent 940eb43095
commit c6fc247f99
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 1640 additions and 229 deletions

View File

@ -26,8 +26,6 @@ set(IE_BASE_SOURCE_FILES
${CMAKE_CURRENT_SOURCE_DIR}/ie_rtti.cpp
${CMAKE_CURRENT_SOURCE_DIR}/precision_utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/shape_infer/ie_built_in_holder.cpp
${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.hpp
${CMAKE_CURRENT_SOURCE_DIR}/system_allocator.cpp
${CMAKE_CURRENT_SOURCE_DIR}/system_allocator.hpp)

View File

@ -23,13 +23,13 @@
#include <transformations/utils/utils.hpp>
#include <transformations/smart_reshape/set_batch_size.hpp>
#include <transformations/smart_reshape/smart_reshape.hpp>
#include "transformations/serialize.hpp"
#include <legacy/transformations/convert_opset1_to_legacy/convert_one_hot_to_one_hot_ie.hpp>
#include <legacy/ie_ngraph_utils.hpp>
#include "exec_graph_info.hpp"
#include "ie_itt.hpp"
#include "network_serializer.hpp"
#include "generic_ie.hpp"
#include "shape_infer/ie_built_in_holder.hpp"
@ -418,43 +418,31 @@ CNNNetworkNGraphImpl::reshape(const std::map<std::string, std::vector<size_t>>&
}
}
StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, const std::string& binPath,
StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath,
const std::string& binPath,
ResponseDesc* resp) const noexcept {
auto network = cnnNetwork;
if (!network) {
// TODO: once Serialization::SerializeV10 supports true IR v10
// remove this conversion and WA for execution graph
try {
bool isExecutionGraph = true;
for (const auto & op : _ngraph_function->get_ops()) {
auto & rtInfo = op->get_rt_info();
if (rtInfo.find(ExecGraphInfoSerialization::PERF_COUNTER) == rtInfo.end()) {
isExecutionGraph = false;
break;
}
}
if (isExecutionGraph) {
Serialization::SerializeV10(xmlPath, binPath, (InferenceEngine::ICNNNetwork&)*this);
return OK;
}
try {
if (getFunction()) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::Serialize>(xmlPath, binPath);
manager.run_passes(_ngraph_function);
} else {
#ifdef ENABLE_V7_SERIALIZE
network = std::make_shared<details::CNNNetworkImpl>(*this);
#endif
} catch (const InferenceEngineException& e) {
return DescriptionBuffer(GENERAL_ERROR, resp) << e.what();
} catch (const std::exception& e) {
return DescriptionBuffer(UNEXPECTED, resp) << e.what();
} catch (...) {
return DescriptionBuffer(UNEXPECTED, resp);
}
}
#ifdef ENABLE_V7_SERIALIZE
return network->serialize(xmlPath, binPath, resp);
auto network = std::make_shared<details::CNNNetworkImpl>(*this);
return network->serialize(xmlPath, binPath, resp);
#else
return DescriptionBuffer(NOT_IMPLEMENTED, resp) << "The serialize for IR v10 is not implemented";
return DescriptionBuffer(NOT_IMPLEMENTED, resp)
<< "The serialization of legacy IR is not implemented";
#endif
}
} catch (const InferenceEngineException& e) {
return DescriptionBuffer(GENERAL_ERROR, resp) << e.what();
} catch (const std::exception& e) {
return DescriptionBuffer(UNEXPECTED, resp) << e.what();
} catch (...) {
return DescriptionBuffer(UNEXPECTED, resp);
}
return OK;
}
StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept {

View File

@ -1,168 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "network_serializer.hpp"
#include <map>
#include <deque>
#include <string>
#include <vector>
#include "exec_graph_info.hpp"
#include "xml_parse_utils.h"
#include <legacy/ie_ngraph_utils.hpp>
#include <ngraph/variant.hpp>
#include <ngraph/function.hpp>
namespace InferenceEngine {
namespace Serialization {
namespace {
void FillXmlDocWithExecutionNGraph(const InferenceEngine::ICNNNetwork& network,
pugi::xml_document& doc) {
std::shared_ptr<const ngraph::Function> function = network.getFunction();
if (function == nullptr) {
THROW_IE_EXCEPTION << network.getName() << " does not represent ngraph::Function";
}
std::vector<std::shared_ptr<ngraph::Node>> ordered = function->get_ordered_ops();
pugi::xml_node netXml = doc.append_child("net");
netXml.append_attribute("name").set_value(network.getName().c_str());
pugi::xml_node layers = netXml.append_child("layers");
std::unordered_map<std::shared_ptr<ngraph::Node>, size_t> matching;
for (size_t i = 0; i < ordered.size(); ++i) {
matching[ordered[i]] = i;
const std::shared_ptr<ngraph::Node> node = ordered[i];
auto params = node->get_rt_info();
auto layerTypeVariant = params.find(ExecGraphInfoSerialization::LAYER_TYPE);
if (layerTypeVariant == params.end()) {
THROW_IE_EXCEPTION << node->get_friendly_name() << " does not define "
<< ExecGraphInfoSerialization::LAYER_TYPE << " attribute.";
}
using VariantString = ngraph::VariantImpl<std::string>;
auto layerTypeValueStr = std::dynamic_pointer_cast<VariantString>(layerTypeVariant->second);
IE_ASSERT(layerTypeValueStr != nullptr);
params.erase(layerTypeVariant);
pugi::xml_node layer = layers.append_child("layer");
layer.append_attribute("name").set_value(node->get_friendly_name().c_str());
layer.append_attribute("type").set_value(layerTypeValueStr->get().c_str());
layer.append_attribute("id").set_value(i);
if (!params.empty()) {
pugi::xml_node data = layer.append_child("data");
for (const auto& it : params) {
if (auto strValue = std::dynamic_pointer_cast<VariantString>(it.second))
data.append_attribute(it.first.c_str()).set_value(strValue->get().c_str());
}
}
if (node->get_input_size() > 0) {
pugi::xml_node input = layer.append_child("input");
for (size_t iport = 0; iport < node->get_input_size(); iport++) {
const ngraph::Shape & dims = node->get_input_shape(iport);
pugi::xml_node port = input.append_child("port");
port.append_attribute("id").set_value(iport);
for (auto dim : dims) {
port.append_child("dim").text().set(dim);
}
}
}
if (node->get_output_size() > 0 &&
// ngraph::op::Result still have single output while we should not print it
!std::dynamic_pointer_cast<ngraph::op::Result>(node)) {
pugi::xml_node output = layer.append_child("output");
for (size_t oport = 0; oport < node->get_output_size(); oport++) {
pugi::xml_node port = output.append_child("port");
Precision outputPrecision = details::convertPrecision(node->get_output_element_type(oport));
port.append_attribute("id").set_value(node->get_input_size() + oport);
port.append_attribute("precision").set_value(outputPrecision.name());
for (const auto dim : node->get_output_shape(oport)) {
port.append_child("dim").text().set(dim);
}
}
}
}
pugi::xml_node edges = netXml.append_child("edges");
for (const auto& ord : ordered) {
const std::shared_ptr<ngraph::Node> parentNode = ord;
if (parentNode->get_output_size() > 0) {
auto itFrom = matching.find(parentNode);
if (itFrom == matching.end()) {
THROW_IE_EXCEPTION << "Internal error, cannot find " << parentNode->get_friendly_name()
<< " in matching container during serialization of IR";
}
for (size_t oport = 0; oport < parentNode->get_output_size(); oport++) {
ngraph::Output<ngraph::Node> parentPort = parentNode->output(oport);
for (const auto& childPort : parentPort.get_target_inputs()) {
ngraph::Node * childNode = childPort.get_node();
for (int iport = 0; iport < childNode->get_input_size(); iport++) {
if (childNode->input_value(iport).get_node() == parentPort.get_node()) {
auto itTo = matching.find(childNode->shared_from_this());
if (itTo == matching.end()) {
THROW_IE_EXCEPTION << "Broken edge form layer "
<< parentNode->get_friendly_name() << " to layer "
<< childNode->get_friendly_name()
<< "during serialization of IR";
}
pugi::xml_node edge = edges.append_child("edge");
edge.append_attribute("from-layer").set_value(itFrom->second);
edge.append_attribute("from-port").set_value(oport + parentNode->get_input_size());
edge.append_attribute("to-layer").set_value(itTo->second);
edge.append_attribute("to-port").set_value(iport);
}
}
}
}
}
}
}
} // namespace
void SerializeV10(const std::string& xmlPath, const std::string& binPath,
const InferenceEngine::ICNNNetwork& network) {
if (auto function = network.getFunction()) {
// A flag for serializing executable graph information (not complete IR)
bool execGraphInfoSerialization = true;
// go over all operations and check whether performance stat is set
for (const auto & op : function->get_ops()) {
auto & rtInfo = op->get_rt_info();
if (rtInfo.find(ExecGraphInfoSerialization::PERF_COUNTER) == rtInfo.end()) {
execGraphInfoSerialization = false;
break;
}
}
if (execGraphInfoSerialization) {
pugi::xml_document doc;
FillXmlDocWithExecutionNGraph(network, doc);
if (!doc.save_file(xmlPath.c_str())) {
THROW_IE_EXCEPTION << "File '" << xmlPath << "' was not serialized";
}
} else {
THROW_IE_EXCEPTION << "Serialization to IR v10 is not implemented in Inference Engine";
}
} else {
THROW_IE_EXCEPTION << "Serialization to IR v7 is removed from Inference Engine";
}
}
} // namespace Serialization
} // namespace InferenceEngine

View File

@ -1,24 +0,0 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ie_icnn_network.hpp>
#include <string>
#include <vector>
namespace InferenceEngine {
namespace Serialization {
/**
* @brief Serializes a network into IE IR v10 XML file and binary weights file
* @param xmlPath Path to XML file
* @param binPath Path to BIN file
* @param network network to be serialized
*/
void SerializeV10(const std::string& xmlPath, const std::string& binPath,
const InferenceEngine::ICNNNetwork& network);
} // namespace Serialization
} // namespace InferenceEngine

View File

@ -128,6 +128,10 @@ public:
return cloned;
}
bool visit_attributes(ngraph::AttributeVisitor&) override {
return true;
}
};
} // namespace ExecGraphInfoSerialization

View File

@ -25,7 +25,7 @@ ie_faster_build(${TARGET_NAME}
)
target_link_libraries(${TARGET_NAME} PUBLIC ${NGRAPH_LIBRARIES}
PRIVATE openvino::itt ngraph::builder)
PRIVATE openvino::itt ngraph::builder pugixml)
target_include_directories(${TARGET_NAME} PUBLIC ${PUBLIC_HEADERS_DIR}
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src")

View File

@ -0,0 +1,43 @@
// Copyright (C) 2018-2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#pragma once
#include <string>
#include "ngraph/pass/pass.hpp"
#include "transformations_visibility.hpp"
namespace ngraph {
namespace pass {
class TRANSFORMATIONS_API Serialize;
} // namespace pass
} // namespace ngraph
/**
* @ingroup ie_transformation_common_api
* @brief Serialize transformation converts ngraph::Function into IR files
* @attention
* - dynamic shapes are not supported
* - GenericIE operation type (experimental opset) is not supported
* - order of generated layers in xml file is ngraph specific (given by
* get_ordered_ops()); MO generates file with different order, but they are
* logically equivalent
*/
class ngraph::pass::Serialize : public ngraph::pass::FunctionPass {
public:
enum class Version { IR_V10 };
NGRAPH_RTTI_DECLARATION;
bool run_on_function(std::shared_ptr<ngraph::Function> f) override;
Serialize(const std::string& xmlPath, const std::string& binPath,
Version version = Version::IR_V10)
: m_xmlPath{xmlPath}, m_binPath{binPath}, m_version{version} {}
private:
const std::string m_xmlPath;
const std::string m_binPath;
const Version m_version;
};

View File

@ -0,0 +1,369 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <array>
#include <fstream>
#include <unordered_map>
#include <unordered_set>
#include "ngraph/ops.hpp"
#include "ngraph/opsets/opset.hpp"
#include "pugixml.hpp"
#include "transformations/serialize.hpp"
using namespace ngraph;
NGRAPH_RTTI_DEFINITION(ngraph::pass::Serialize, "Serialize", 0);
namespace { // helpers
template <typename T, typename A>
std::string joinVec(std::vector<T, A> const& vec,
std::string const& glue = std::string(",")) {
if (vec.empty()) return "";
std::stringstream oss;
oss << vec[0];
for (size_t i = 1; i < vec.size(); i++) oss << glue << vec[i];
return oss.str();
}
} // namespace
namespace { // implementation details
struct Edge {
int from_layer = 0;
int from_port = 0;
int to_layer = 0;
int to_port = 0;
};
struct ConstantAtributes {
int size = 0;
int offset = 0;
};
class XmlVisitor : public ngraph::AttributeVisitor {
pugi::xml_node m_data;
template <typename T>
std::string create_atribute_list(
ngraph::ValueAccessor<std::vector<T>>& adapter) {
return joinVec(adapter.get(), std::string(","));
}
public:
XmlVisitor(pugi::xml_node& data) : m_data(data) {}
void on_adapter(const std::string& name,
ngraph::ValueAccessor<void>& adapter) override {
#if 0 // TODO: remove when Constant will support VisitorAPI
m_data.append_attribute(name.c_str());
#endif
}
void on_adapter(const std::string& name,
ngraph::ValueAccessor<bool>& adapter) override {
m_data.append_attribute(name.c_str()).set_value(adapter.get());
}
void on_adapter(const std::string& name,
ngraph::ValueAccessor<std::string>& adapter) override {
m_data.append_attribute(name.c_str()).set_value(adapter.get().c_str());
}
void on_adapter(const std::string& name,
ngraph::ValueAccessor<int64_t>& adapter) override {
m_data.append_attribute(name.c_str()).set_value(adapter.get());
}
void on_adapter(const std::string& name,
ngraph::ValueAccessor<double>& adapter) override {
m_data.append_attribute(name.c_str()).set_value(adapter.get());
}
void on_adapter(
const std::string& name,
ngraph::ValueAccessor<std::vector<int64_t>>& adapter) override {
m_data.append_attribute(name.c_str())
.set_value(create_atribute_list(adapter).c_str());
}
void on_adapter(
const std::string& name,
ngraph::ValueAccessor<std::vector<uint64_t>>& adapter) override {
m_data.append_attribute(name.c_str())
.set_value(create_atribute_list(adapter).c_str());
}
void on_adapter(
const std::string& name,
ngraph::ValueAccessor<std::vector<float>>& adapter) override {
m_data.append_attribute(name.c_str())
.set_value(create_atribute_list(adapter).c_str());
}
void on_adapter(
const std::string& name,
ngraph::ValueAccessor<std::vector<std::string>>& adapter) override {
m_data.append_attribute(name.c_str())
.set_value(create_atribute_list(adapter).c_str());
}
};
const std::unordered_map<ngraph::Node*, int> create_layer_ids(
const ngraph::Function& f) {
std::unordered_map<ngraph::Node*, int> layer_ids;
int id = 0;
for (const auto& node : f.get_ordered_ops()) {
layer_ids[node.get()] = id++;
}
return layer_ids;
}
const std::vector<Edge> create_edge_mapping(
const std::unordered_map<ngraph::Node*, int>& layer_ids,
const ngraph::Function& f) {
std::vector<Edge> edges;
for (const auto& node : f.get_ordered_ops()) {
if (ngraph::op::is_parameter(node)) {
continue;
}
for (const auto& i : node->inputs()) {
auto source_output = i.get_source_output();
auto source_node = source_output.get_node();
auto current_node = i.get_node();
NGRAPH_CHECK(layer_ids.find(source_node) != layer_ids.end(),
"Internal error");
NGRAPH_CHECK(layer_ids.find(current_node) != layer_ids.end(),
"Internal error");
Edge e{};
e.from_layer = layer_ids.find(source_node)->second;
e.from_port =
source_node->get_input_size() + source_output.get_index();
e.to_layer = layer_ids.find(current_node)->second;
e.to_port = i.get_index();
edges.push_back(e);
}
}
std::sort(begin(edges), end(edges),
[](const Edge& a, const Edge& b) -> bool {
return a.from_layer < b.from_layer;
});
return edges;
}
// TODO: refactor to Vistor API when Constant will be supporting it
ConstantAtributes dump_constant_data(std::vector<uint8_t>& bin,
const ngraph::op::Constant& c) {
NGRAPH_CHECK(c.get_output_partial_shape(0.).is_static(),
"Unsupported dynamic output shape in ", c);
ConstantAtributes attr;
const uint8_t* p = reinterpret_cast<const uint8_t*>(c.get_data_ptr());
attr.size = ngraph::shape_size(c.get_shape()) * c.get_element_type().size();
attr.offset = bin.size();
bin.insert(end(bin), p, p + attr.size);
return attr;
}
std::string get_opset_name(const ngraph::Node* n) {
auto opsets = std::array<std::reference_wrapper<const ngraph::OpSet>, 5>{
ngraph::get_opset1(), ngraph::get_opset2(), ngraph::get_opset3(),
ngraph::get_opset4(), ngraph::get_opset5()};
// return the oldest opset name where node type is present
for (int idx = 0; idx < opsets.size(); idx++) {
if (opsets[idx].get().contains_op_type(n)) {
return "opset" + std::to_string(idx + 1);
}
}
return "experimental";
}
// Here operation type names are translated from ngraph convention to IR
// convention. Most of them are the same, but there are exceptions, e.g
// Constant (ngraph name) and Const (IR name). If there will be more
// discrepancies discoverd, translations needs to be added here.
std::string get_type_name(const ngraph::Node* n) {
std::string name = n->get_type_name();
NGRAPH_CHECK(name != "GenericIE", "Unsupported type in ", n);
const std::unordered_map<std::string, std::string> translator = {
{"Constant", "Const"}};
if (translator.count(name) > 0) {
name = translator.at(name);
}
return name;
}
std::string get_output_precision_name(ngraph::Output<Node>& o) {
auto elem_type = o.get_element_type();
switch (elem_type) {
case ::ngraph::element::Type_t::undefined:
return "UNSPECIFIED";
case ::ngraph::element::Type_t::f16:
return "FP16";
case ::ngraph::element::Type_t::f32:
return "FP32";
case ::ngraph::element::Type_t::bf16:
return "BF16";
case ::ngraph::element::Type_t::i8:
return "I8";
case ::ngraph::element::Type_t::i16:
return "I16";
case ::ngraph::element::Type_t::i32:
return "I32";
case ::ngraph::element::Type_t::i64:
return "I64";
case ::ngraph::element::Type_t::u8:
return "U8";
case ::ngraph::element::Type_t::u16:
return "U16";
case ::ngraph::element::Type_t::u32:
return "U32";
case ::ngraph::element::Type_t::u64:
return "U64";
case ::ngraph::element::Type_t::u1:
return "BIN";
case ::ngraph::element::Type_t::boolean:
return "BOOL";
default:
NGRAPH_CHECK(false, "Unsupported precision in ", o);
return "";
}
}
std::string generate_unique_name(
const std::unordered_set<std::string>& unique_names, std::string base_name,
int suffix) {
std::string new_name = base_name + std::to_string(suffix);
if (unique_names.find(new_name) == unique_names.end()) {
return new_name;
} else {
suffix++;
return generate_unique_name(unique_names, base_name, suffix);
}
}
// TODO: remove when CNNNetwork will be supporting not-unique names
std::string get_node_unique_name(std::unordered_set<std::string>& unique_names,
const ngraph::Node* n) {
std::string name = n->get_friendly_name();
if (unique_names.find(name) != unique_names.end()) {
name = generate_unique_name(unique_names, name, 0);
}
unique_names.insert(name);
return name;
}
void ngfunction_2_irv10(pugi::xml_document& doc, std::vector<uint8_t>& bin,
const ngraph::Function& f) {
pugi::xml_node netXml = doc.append_child("net");
netXml.append_attribute("name").set_value(f.get_friendly_name().c_str());
netXml.append_attribute("version").set_value("10");
pugi::xml_node layers = netXml.append_child("layers");
const std::unordered_map<ngraph::Node*, int> layer_ids =
create_layer_ids(f);
std::unordered_set<std::string> unique_names;
for (const auto& n : f.get_ordered_ops()) {
ngraph::Node* node = n.get();
NGRAPH_CHECK(layer_ids.find(node) != layer_ids.end(), "Internal error");
// <layers>
pugi::xml_node layer = layers.append_child("layer");
layer.append_attribute("id").set_value(layer_ids.find(node)->second);
layer.append_attribute("name").set_value(
get_node_unique_name(unique_names, node).c_str());
layer.append_attribute("type").set_value(get_type_name(node).c_str());
layer.append_attribute("version").set_value(
get_opset_name(node).c_str());
// <layers/data>
pugi::xml_node data = layer.append_child("data");
// <layers/data> general atributes
XmlVisitor visitor{data};
NGRAPH_CHECK(node->visit_attributes(visitor),
"Visitor API is not supported in ", node);
// <layers/data> constant atributes (special case)
if (auto constant = dynamic_cast<ngraph::op::Constant*>(node)) {
ConstantAtributes attr = dump_constant_data(bin, *constant);
data.append_attribute("offset").set_value(attr.offset);
data.append_attribute("size").set_value(attr.size);
}
int port_id = 0;
// <layers/input>
if (node->get_input_size() > 0) {
pugi::xml_node input = layer.append_child("input");
for (auto i : node->inputs()) {
NGRAPH_CHECK(i.get_partial_shape().is_static(),
"Unsupported dynamic input shape in ", node);
pugi::xml_node port = input.append_child("port");
port.append_attribute("id").set_value(port_id++);
for (auto d : i.get_shape()) {
pugi::xml_node dim = port.append_child("dim");
dim.append_child(pugi::xml_node_type::node_pcdata)
.set_value(std::to_string(d).c_str());
}
}
}
// <layers/output>
if ((node->get_output_size() > 0) && !ngraph::op::is_output(node)) {
pugi::xml_node output = layer.append_child("output");
for (auto o : node->outputs()) {
NGRAPH_CHECK(o.get_partial_shape().is_static(),
"Unsupported dynamic output shape in ", node);
pugi::xml_node port = output.append_child("port");
port.append_attribute("id").set_value(port_id++);
port.append_attribute("precision")
.set_value(get_output_precision_name(o).c_str());
for (auto d : o.get_shape()) {
pugi::xml_node dim = port.append_child("dim");
dim.append_child(pugi::xml_node_type::node_pcdata)
.set_value(std::to_string(d).c_str());
}
}
}
}
// <edges>
const std::vector<Edge> edge_mapping = create_edge_mapping(layer_ids, f);
pugi::xml_node edges = netXml.append_child("edges");
for (auto e : edge_mapping) {
pugi::xml_node edge = edges.append_child("edge");
edge.append_attribute("from-layer").set_value(e.from_layer);
edge.append_attribute("from-port").set_value(e.from_port);
edge.append_attribute("to-layer").set_value(e.to_layer);
edge.append_attribute("to-port").set_value(e.to_port);
}
}
} // namespace
// ! [function_pass:serialize_cpp]
// serialize.cpp
bool pass::Serialize::run_on_function(std::shared_ptr<ngraph::Function> f) {
// prepare data
pugi::xml_document xml_doc;
std::vector<uint8_t> constants;
switch (m_version) {
case Version::IR_V10:
ngfunction_2_irv10(xml_doc, constants, *f);
break;
default:
NGRAPH_UNREACHABLE("Unsupported version");
break;
}
// create xml file
std::ofstream xml_file(m_xmlPath, std::ios::out);
xml_doc.save(xml_file);
// create bin file
std::ofstream bin_file(m_binPath, std::ios::out | std::ios::binary);
bin_file.write(reinterpret_cast<const char*>(constants.data()),
constants.size() * sizeof(constants[0]));
// Return false because we didn't change nGraph Function
return false;
}
// ! [function_pass:serialize_cpp]

View File

@ -215,3 +215,7 @@ else()
# FLAGS "/we4996 /W4 /WX"
# PLUGIN_API)
endif()
# ir serialization functional tests variables
target_compile_definitions(${TARGET_NAME} PRIVATE IR_SERIALIZATION_MODELS_PATH="${CMAKE_CURRENT_SOURCE_DIR}/ir_serialization/models/")

View File

@ -0,0 +1,122 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <fstream>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "gtest/gtest.h"
#include "ie_core.hpp"
#ifndef IR_SERIALIZATION_MODELS_PATH // should be already defined by cmake
#define IR_SERIALIZATION_MODELS_PATH ""
#endif
class SerializationDeterministicityTest : public ::testing::Test {
protected:
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string m_out_xml_path_1 = test_name + "1" + ".xml";
std::string m_out_bin_path_1 = test_name + "1" + ".bin";
std::string m_out_xml_path_2 = test_name + "2" + ".xml";
std::string m_out_bin_path_2 = test_name + "2" + ".bin";
void TearDown() override {
std::remove(m_out_xml_path_1.c_str());
std::remove(m_out_xml_path_2.c_str());
std::remove(m_out_bin_path_1.c_str());
std::remove(m_out_bin_path_2.c_str());
}
bool files_equal(std::ifstream& f1, std::ifstream& f2) {
if (!f1.good()) return false;
if (!f2.good()) return false;
while (!f1.eof() && !f2.eof()) {
if (f1.get() != f2.get()) {
return false;
}
}
if (f1.eof() != f2.eof()) {
return false;
}
return true;
}
};
TEST_F(SerializationDeterministicityTest, BasicModel) {
const std::string model = IR_SERIALIZATION_MODELS_PATH "add_abc.prototxt";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model);
expected.serialize(m_out_xml_path_1, m_out_bin_path_1);
expected.serialize(m_out_xml_path_2, m_out_bin_path_2);
std::ifstream xml_1(m_out_xml_path_1, std::ios::in | std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::in | std::ios::binary);
std::ifstream xml_2(m_out_xml_path_2, std::ios::in | std::ios::binary);
std::ifstream bin_2(m_out_bin_path_2, std::ios::in | std::ios::binary);
ASSERT_TRUE(files_equal(xml_1, xml_2));
ASSERT_TRUE(files_equal(bin_1, bin_2));
}
TEST_F(SerializationDeterministicityTest, ModelWithMultipleOutputs) {
const std::string model =
IR_SERIALIZATION_MODELS_PATH "split_equal_parts_2d.xml";
const std::string weights =
IR_SERIALIZATION_MODELS_PATH "split_equal_parts_2d.bin";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model, weights);
expected.serialize(m_out_xml_path_1, m_out_bin_path_1);
expected.serialize(m_out_xml_path_2, m_out_bin_path_2);
std::ifstream xml_1(m_out_xml_path_1, std::ios::in | std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::in | std::ios::binary);
std::ifstream xml_2(m_out_xml_path_2, std::ios::in | std::ios::binary);
std::ifstream bin_2(m_out_bin_path_2, std::ios::in | std::ios::binary);
ASSERT_TRUE(files_equal(xml_1, xml_2));
ASSERT_TRUE(files_equal(bin_1, bin_2));
}
TEST_F(SerializationDeterministicityTest, ModelWithMultipleLayers) {
const std::string model =
IR_SERIALIZATION_MODELS_PATH "addmul_abc.prototxt";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model);
expected.serialize(m_out_xml_path_1, m_out_bin_path_1);
expected.serialize(m_out_xml_path_2, m_out_bin_path_2);
std::ifstream xml_1(m_out_xml_path_1, std::ios::in | std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::in | std::ios::binary);
std::ifstream xml_2(m_out_xml_path_2, std::ios::in | std::ios::binary);
std::ifstream bin_2(m_out_bin_path_2, std::ios::in | std::ios::binary);
ASSERT_TRUE(files_equal(xml_1, xml_2));
ASSERT_TRUE(files_equal(bin_1, bin_2));
}
TEST_F(SerializationDeterministicityTest, ModelWithConstants) {
const std::string model =
IR_SERIALIZATION_MODELS_PATH "add_abc_initializers.xml";
const std::string weights =
IR_SERIALIZATION_MODELS_PATH "add_abc_initializers.bin";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model, weights);
expected.serialize(m_out_xml_path_1, m_out_bin_path_1);
expected.serialize(m_out_xml_path_2, m_out_bin_path_2);
std::ifstream xml_1(m_out_xml_path_1, std::ios::in | std::ios::binary);
std::ifstream bin_1(m_out_bin_path_1, std::ios::in | std::ios::binary);
std::ifstream xml_2(m_out_xml_path_2, std::ios::in | std::ios::binary);
std::ifstream bin_2(m_out_bin_path_2, std::ios::in | std::ios::binary);
ASSERT_TRUE(files_equal(xml_1, xml_2));
ASSERT_TRUE(files_equal(bin_1, bin_2));
}

View File

@ -0,0 +1,74 @@
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "A"
input: "B"
output: "X"
name: "add_node1"
op_type: "Add"
}
node {
input: "X"
input: "C"
output: "Y"
name: "add_node2"
op_type: "Add"
}
name: "test_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "B"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "C"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "Y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 4
}

View File

@ -0,0 +1,112 @@
<?xml version="1.0" ?>
<net name="add_abc" version="10">
<layers>
<layer id="0" name="A" type="Parameter" version="opset1">
<data element_type="f32" shape="1"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1" name="B" type="Parameter" version="opset1">
<data element_type="f32" shape="1"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2" name="add_node1" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="3" name="C" type="Parameter" version="opset1">
<data element_type="f32" shape="1"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="4" name="Y" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="5" name="Y/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
<edge from-layer="3" from-port="0" to-layer="4" to-port="1"/>
<edge from-layer="4" from-port="2" to-layer="5" to-port="0"/>
</edges>
<meta_data>
<MO_version value="unknown version"/>
<cli_parameters>
<caffe_parser_path value="DIR"/>
<data_type value="float"/>
<disable_nhwc_to_nchw value="False"/>
<disable_omitting_optional value="False"/>
<disable_resnet_optimization value="False"/>
<disable_weights_compression value="False"/>
<enable_concat_optimization value="False"/>
<enable_flattening_nested_params value="False"/>
<enable_ssd_gluoncv value="False"/>
<extensions value="DIR"/>
<framework value="onnx"/>
<freeze_placeholder_with_value value="{}"/>
<generate_deprecated_IR_V7 value="False"/>
<input_model value="DIR/add_abc.onnx"/>
<input_model_is_text value="False"/>
<k value="DIR/CustomLayersMapping.xml"/>
<keep_shape_ops value="True"/>
<legacy_mxnet_model value="False"/>
<log_level value="ERROR"/>
<mean_scale_values value="{}"/>
<mean_values value="()"/>
<model_name value="add_abc"/>
<output_dir value="DIR"/>
<placeholder_data_types value="{}"/>
<progress value="False"/>
<remove_memory value="False"/>
<remove_output_softmax value="False"/>
<reverse_input_channels value="False"/>
<save_params_from_nd value="False"/>
<scale_values value="()"/>
<silent value="False"/>
<static_shape value="False"/>
<stream_output value="False"/>
<unset unset_cli_parameters="batch, counts, disable_fusing, disable_gfusing, finegrain_fusing, input, input_checkpoint, input_meta_graph, input_proto, input_shape, input_symbol, mean_file, mean_file_offsets, move_to_preprocess, nd_prefix_name, output, placeholder_shapes, pretrained_model_name, saved_model_dir, saved_model_tags, scale, tensorboard_logdir, tensorflow_custom_layer_libraries, tensorflow_custom_operations_config_update, tensorflow_object_detection_api_pipeline_config, tensorflow_use_custom_operations_config, transformations_config"/>
</cli_parameters>
</meta_data>
</net>

View File

@ -0,0 +1,95 @@
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
output: "B"
op_type: "Constant"
attribute {
name: "value"
t {
dims: 2
dims: 2
data_type: 1
float_data: 1
float_data: 2
float_data: 3
float_data: 4
name: "const_tensor"
}
type: TENSOR
}
}
node {
input: "A"
input: "B"
output: "X"
name: "add_node1"
op_type: "Add"
}
node {
input: "X"
input: "C"
output: "Y"
name: "add_node2"
op_type: "Add"
}
name: "test_graph"
initializer {
dims: 2
dims: 2
data_type: 1
name: "A"
raw_data: "\000\000\200?\000\000\000@\000\000@@\000\000\200@"
}
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "C"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
output {
name: "Y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
version: 4
}

View File

@ -0,0 +1,93 @@
<?xml version="1.0" ?>
<net name="add_abc_const" version="10">
<layers>
<layer id="0" name="add_node1/Output_0/Data__const" type="Const" version="opset1">
<data element_type="f32" offset="0" shape="2,2" size="16"/>
<output>
<port id="1" precision="FP32">
<dim>2</dim>
<dim>2</dim>
</port>
</output>
</layer>
<layer id="1" name="C" type="Parameter" version="opset1">
<data element_type="f32" shape="2,2"/>
<output>
<port id="0" precision="FP32">
<dim>2</dim>
<dim>2</dim>
</port>
</output>
</layer>
<layer id="2" name="Y" type="Add" version="opset1">
<input>
<port id="0">
<dim>2</dim>
<dim>2</dim>
</port>
<port id="1">
<dim>2</dim>
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>2</dim>
<dim>2</dim>
</port>
</output>
</layer>
<layer id="3" name="Y/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>2</dim>
<dim>2</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="1" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="3" to-port="0"/>
</edges>
<meta_data>
<MO_version value="unknown version"/>
<cli_parameters>
<caffe_parser_path value="DIR"/>
<data_type value="float"/>
<disable_nhwc_to_nchw value="False"/>
<disable_omitting_optional value="False"/>
<disable_resnet_optimization value="False"/>
<disable_weights_compression value="False"/>
<enable_concat_optimization value="False"/>
<enable_flattening_nested_params value="False"/>
<enable_ssd_gluoncv value="False"/>
<extensions value="DIR"/>
<framework value="onnx"/>
<freeze_placeholder_with_value value="{}"/>
<generate_deprecated_IR_V7 value="False"/>
<input_model value="DIR/add_abc_const.onnx"/>
<input_model_is_text value="False"/>
<k value="DIR/CustomLayersMapping.xml"/>
<keep_shape_ops value="True"/>
<legacy_mxnet_model value="False"/>
<log_level value="ERROR"/>
<mean_scale_values value="{}"/>
<mean_values value="()"/>
<model_name value="add_abc_const"/>
<output_dir value="DIR"/>
<placeholder_data_types value="{}"/>
<progress value="False"/>
<remove_memory value="False"/>
<remove_output_softmax value="False"/>
<reverse_input_channels value="False"/>
<save_params_from_nd value="False"/>
<scale_values value="()"/>
<silent value="False"/>
<static_shape value="False"/>
<stream_output value="False"/>
<unset unset_cli_parameters="batch, counts, disable_fusing, disable_gfusing, finegrain_fusing, input, input_checkpoint, input_meta_graph, input_proto, input_shape, input_symbol, mean_file, mean_file_offsets, move_to_preprocess, nd_prefix_name, output, placeholder_shapes, pretrained_model_name, saved_model_dir, saved_model_tags, scale, tensorboard_logdir, tensorflow_custom_layer_libraries, tensorflow_custom_operations_config_update, tensorflow_object_detection_api_pipeline_config, tensorflow_use_custom_operations_config, transformations_config"/>
</cli_parameters>
</meta_data>
</net>

View File

@ -0,0 +1,100 @@
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "A"
input: "B"
output: "X1"
name: "add_node1"
op_type: "Add"
}
node {
input: "A"
input: "B"
output: "X2"
name: "add_node2"
op_type: "Mul"
}
node {
input: "X1"
input: "X2"
output: "Z1"
name: "add_node3"
op_type: "Add"
}
node {
input: "Z1"
input: "C"
output: "Z2"
name: "add_node4"
op_type: "Add"
}
node {
input: "Z2"
input: "C"
output: "Y"
name: "add_node5"
op_type: "Add"
}
name: "test_graph"
input {
name: "A"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "B"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "C"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "Y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 4
}

View File

@ -0,0 +1,163 @@
<?xml version="1.0" ?>
<net name="addmul_abc" version="10">
<layers>
<layer id="0" name="A" type="Parameter" version="opset1">
<data element_type="f32" shape="1"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="1" name="B" type="Parameter" version="opset1">
<data element_type="f32" shape="1"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="2" name="add_node1" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="3" name="add_node2" type="Multiply" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="4" name="add_node3" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="5" name="C" type="Parameter" version="opset1">
<data element_type="f32" shape="1"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="6" name="add_node4" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="7" name="Y" type="Add" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
<port id="1">
<dim>1</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
</port>
</output>
</layer>
<layer id="8" name="Y/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
<edge from-layer="3" from-port="2" to-layer="4" to-port="1"/>
<edge from-layer="4" from-port="2" to-layer="6" to-port="0"/>
<edge from-layer="5" from-port="0" to-layer="6" to-port="1"/>
<edge from-layer="6" from-port="2" to-layer="7" to-port="0"/>
<edge from-layer="5" from-port="0" to-layer="7" to-port="1"/>
<edge from-layer="7" from-port="2" to-layer="8" to-port="0"/>
</edges>
<meta_data>
<MO_version value="unknown version"/>
<cli_parameters>
<caffe_parser_path value="DIR"/>
<data_type value="float"/>
<disable_nhwc_to_nchw value="False"/>
<disable_omitting_optional value="False"/>
<disable_resnet_optimization value="False"/>
<disable_weights_compression value="False"/>
<enable_concat_optimization value="False"/>
<enable_flattening_nested_params value="False"/>
<enable_ssd_gluoncv value="False"/>
<extensions value="DIR"/>
<framework value="onnx"/>
<freeze_placeholder_with_value value="{}"/>
<generate_deprecated_IR_V7 value="False"/>
<input_model value="DIR/addmul_abc.onnx"/>
<input_model_is_text value="False"/>
<k value="DIR/CustomLayersMapping.xml"/>
<keep_shape_ops value="True"/>
<legacy_mxnet_model value="False"/>
<log_level value="ERROR"/>
<mean_scale_values value="{}"/>
<mean_values value="()"/>
<model_name value="addmul_abc"/>
<output_dir value="DIR"/>
<placeholder_data_types value="{}"/>
<progress value="False"/>
<remove_memory value="False"/>
<remove_output_softmax value="False"/>
<reverse_input_channels value="False"/>
<save_params_from_nd value="False"/>
<scale_values value="()"/>
<silent value="False"/>
<static_shape value="False"/>
<stream_output value="False"/>
<unset unset_cli_parameters="batch, counts, disable_fusing, disable_gfusing, finegrain_fusing, input, input_checkpoint, input_meta_graph, input_proto, input_shape, input_symbol, mean_file, mean_file_offsets, move_to_preprocess, nd_prefix_name, output, placeholder_shapes, pretrained_model_name, saved_model_dir, saved_model_tags, scale, tensorboard_logdir, tensorflow_custom_layer_libraries, tensorflow_custom_operations_config_update, tensorflow_object_detection_api_pipeline_config, tensorflow_use_custom_operations_config, transformations_config"/>
</cli_parameters>
</meta_data>
</net>

View File

@ -0,0 +1,67 @@
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "input"
output: "output_1"
output: "output_2"
op_type: "Split"
attribute {
name: "axis"
i: 1
type: INT
}
}
name: "test_split_equal_parts_2d"
input {
name: "input"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 6
}
}
}
}
}
output {
name: "output_1"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 3
}
}
}
}
}
output {
name: "output_2"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 3
}
}
}
}
}
}
opset_import {
version: 8
}

View File

@ -0,0 +1,147 @@
<?xml version="1.0" ?>
<net name="split_equal_parts_2d" version="10">
<layers>
<layer id="0" name="input" type="Parameter" version="opset1">
<data element_type="f32" shape="2,6"/>
<output>
<port id="0" precision="FP32">
<dim>2</dim>
<dim>6</dim>
</port>
</output>
</layer>
<layer id="1" name="output_14/Split_input_port_1/value/Output_0/Data__const" type="Const" version="opset1">
<data element_type="i64" offset="0" shape="" size="8"/>
<output>
<port id="1" precision="I64"/>
</output>
</layer>
<layer id="2" name="output_14/Split" type="Split" version="opset1">
<data num_splits="2"/>
<input>
<port id="0">
<dim>2</dim>
<dim>6</dim>
</port>
<port id="1"/>
</input>
<output>
<port id="2" precision="FP32">
<dim>2</dim>
<dim>3</dim>
</port>
<port id="3" precision="FP32">
<dim>2</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="3" name="67_input_port_1/value69_const" type="Const" version="opset1">
<data element_type="f32" offset="8" shape="" size="4"/>
<output>
<port id="1" precision="FP32"/>
</output>
</layer>
<layer id="4" name="output_1" type="Add" version="opset1">
<input>
<port id="0">
<dim>2</dim>
<dim>3</dim>
</port>
<port id="1"/>
</input>
<output>
<port id="2" precision="FP32">
<dim>2</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="5" name="output_1/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>2</dim>
<dim>3</dim>
</port>
</input>
</layer>
<layer id="6" name="71_input_port_1/value73_const" type="Const" version="opset1">
<data element_type="f32" offset="8" shape="" size="4"/>
<output>
<port id="1" precision="FP32"/>
</output>
</layer>
<layer id="7" name="output_2" type="Add" version="opset1">
<input>
<port id="0">
<dim>2</dim>
<dim>3</dim>
</port>
<port id="1"/>
</input>
<output>
<port id="2" precision="FP32">
<dim>2</dim>
<dim>3</dim>
</port>
</output>
</layer>
<layer id="8" name="output_2/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>2</dim>
<dim>3</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
<edge from-layer="3" from-port="1" to-layer="4" to-port="1"/>
<edge from-layer="4" from-port="2" to-layer="5" to-port="0"/>
<edge from-layer="2" from-port="3" to-layer="7" to-port="0"/>
<edge from-layer="6" from-port="1" to-layer="7" to-port="1"/>
<edge from-layer="7" from-port="2" to-layer="8" to-port="0"/>
</edges>
<meta_data>
<MO_version value="unknown version"/>
<cli_parameters>
<caffe_parser_path value="DIR"/>
<data_type value="float"/>
<disable_nhwc_to_nchw value="False"/>
<disable_omitting_optional value="False"/>
<disable_resnet_optimization value="False"/>
<disable_weights_compression value="False"/>
<enable_concat_optimization value="False"/>
<enable_flattening_nested_params value="False"/>
<enable_ssd_gluoncv value="False"/>
<extensions value="DIR"/>
<framework value="onnx"/>
<freeze_placeholder_with_value value="{}"/>
<generate_deprecated_IR_V7 value="False"/>
<input_model value="DIR/split_equal_parts_2d.onnx"/>
<input_model_is_text value="False"/>
<k value="DIR/CustomLayersMapping.xml"/>
<keep_shape_ops value="True"/>
<legacy_mxnet_model value="False"/>
<log_level value="ERROR"/>
<mean_scale_values value="{}"/>
<mean_values value="()"/>
<model_name value="split_equal_parts_2d"/>
<output_dir value="DIR"/>
<placeholder_data_types value="{}"/>
<progress value="False"/>
<remove_memory value="False"/>
<remove_output_softmax value="False"/>
<reverse_input_channels value="False"/>
<save_params_from_nd value="False"/>
<scale_values value="()"/>
<silent value="False"/>
<static_shape value="False"/>
<stream_output value="False"/>
<unset unset_cli_parameters="batch, counts, disable_fusing, disable_gfusing, finegrain_fusing, input, input_checkpoint, input_meta_graph, input_proto, input_shape, input_symbol, mean_file, mean_file_offsets, move_to_preprocess, nd_prefix_name, output, placeholder_shapes, pretrained_model_name, saved_model_dir, saved_model_tags, scale, tensorboard_logdir, tensorflow_custom_layer_libraries, tensorflow_custom_operations_config_update, tensorflow_object_detection_api_pipeline_config, tensorflow_use_custom_operations_config, transformations_config"/>
</cli_parameters>
</meta_data>
</net>

View File

@ -0,0 +1,167 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <fstream>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "gtest/gtest.h"
#include "ie_core.hpp"
#ifndef IR_SERIALIZATION_MODELS_PATH // should be already defined by cmake
#define IR_SERIALIZATION_MODELS_PATH ""
#endif
class SerializationTest : public ::testing::Test {
protected:
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string m_out_xml_path = test_name + ".xml";
std::string m_out_bin_path = test_name + ".bin";
void TearDown() override {
std::remove(m_out_xml_path.c_str());
std::remove(m_out_bin_path.c_str());
}
};
TEST_F(SerializationTest, BasicModel_MO) {
const std::string model = IR_SERIALIZATION_MODELS_PATH "add_abc.xml";
const std::string weights = IR_SERIALIZATION_MODELS_PATH "add_abc.bin";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model, weights);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction());
ASSERT_TRUE(success) << message;
}
TEST_F(SerializationTest, BasicModel_ONNXImporter) {
const std::string model = IR_SERIALIZATION_MODELS_PATH "add_abc.prototxt";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction());
ASSERT_TRUE(success) << message;
}
TEST_F(SerializationTest, ModelWithMultipleOutputs_MO) {
const std::string model =
IR_SERIALIZATION_MODELS_PATH "split_equal_parts_2d.xml";
const std::string weights =
IR_SERIALIZATION_MODELS_PATH "split_equal_parts_2d.bin";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model, weights);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
// Compare function does not support models with multiple outputs
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction());
ASSERT_FALSE(success) << message;
}
TEST_F(SerializationTest, ModelWithMultipleOutputs_ONNXImporter) {
const std::string model =
IR_SERIALIZATION_MODELS_PATH "split_equal_parts_2d.prototxt";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
// Compare function does not support models with multiple outputs
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction());
ASSERT_FALSE(success) << message;
}
TEST_F(SerializationTest, ModelWithMultipleLayers_MO) {
const std::string model = IR_SERIALIZATION_MODELS_PATH "addmul_abc.xml";
const std::string weights = IR_SERIALIZATION_MODELS_PATH "addmul_abc.bin";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model, weights);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction());
ASSERT_TRUE(success) << message;
}
TEST_F(SerializationTest, ModelWithMultipleLayers_ONNXImporter) {
const std::string model =
IR_SERIALIZATION_MODELS_PATH "addmul_abc.prototxt";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction());
ASSERT_TRUE(success) << message;
}
TEST_F(SerializationTest, ModelWithConstants_MO) {
const std::string model =
IR_SERIALIZATION_MODELS_PATH "add_abc_initializers.xml";
const std::string weights =
IR_SERIALIZATION_MODELS_PATH "add_abc_initializers.bin";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model, weights);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction());
ASSERT_TRUE(success) << message;
}
TEST_F(SerializationTest, ModelWithConstants_ONNXImporter) {
const std::string model =
IR_SERIALIZATION_MODELS_PATH "add_abc_initializers.prototxt";
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(model);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
bool success;
std::string message;
std::tie(success, message) =
compare_functions(result.getFunction(), expected.getFunction());
ASSERT_TRUE(success) << message;
}

View File

@ -0,0 +1,57 @@
// Copyright (C) 2020 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <fstream>
#include "gtest/gtest.h"
#include "ie_core.hpp"
#include "ngraph/ngraph.hpp"
#include "transformations/serialize.hpp"
#ifndef IR_SERIALIZATION_MODELS_PATH // should be already defined by cmake
#define IR_SERIALIZATION_MODELS_PATH ""
#endif
class SerializationTransformationTest : public ::testing::Test {
protected:
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string m_out_xml_path = test_name + ".xml";
std::string m_out_bin_path = test_name + ".bin";
std::shared_ptr<ngraph::Function> m_function;
void SetUp() override {
const std::string model = IR_SERIALIZATION_MODELS_PATH "add_abc.xml";
const std::string weights = IR_SERIALIZATION_MODELS_PATH "add_abc.bin";
InferenceEngine::Core ie;
m_function = ie.ReadNetwork(model, weights).getFunction();
}
void TearDown() override {
std::remove(m_out_xml_path.c_str());
std::remove(m_out_bin_path.c_str());
}
};
TEST_F(SerializationTransformationTest, DirectInstantiation) {
ngraph::pass::Serialize transform{m_out_xml_path, m_out_bin_path};
transform.run_on_function(m_function);
std::ifstream xml(m_out_xml_path);
std::ifstream bin(m_out_bin_path);
ASSERT_TRUE(xml.good());
ASSERT_TRUE(bin.good());
}
TEST_F(SerializationTransformationTest, PassManagerInstantiation) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::Serialize>(m_out_xml_path,
m_out_bin_path);
manager.run_passes(m_function);
std::ifstream xml(m_out_xml_path);
std::ifstream bin(m_out_bin_path);
ASSERT_TRUE(xml.good());
ASSERT_TRUE(bin.good());
}