Visitor api ti serialization (#3777)

* Add on_adapter(Function) for serialization.

* Add port_map and back_edges serialization.

* Add 2  unit tests for TI serialization.

* Convert lambda expression into function pointer.

* Add single layer test for tensor iterator.

* Add limitation for file name length during serialization.

* Add file name length limitation for Serialize().

* Add WA for LSTMCell v0 in serialize class, new test class for TI serialization with dynamic weights, add bin path to SerializationParams, replace call to ngfunction_2_irv10 with visitor.on_attribute().

* Remove hacks for TI from ngfunction_2_irv10(), validate buffers in port_map.

* Changed year in new added test files.

* Add check for version of LSTMv0 WA, add assert for model read from file.

* Remove append_copy for xml Function, changed comparison for LSTMvo WA.

* Update second WA for LSTMCell v0 with version check.

* Remove find_child when searching for port_map and back_edges.
This commit is contained in:
Szymon Durawa 2021-01-19 13:26:29 +01:00 committed by GitHub
parent f7e0d90292
commit 4f5230bb03
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 886 additions and 29 deletions

View File

@ -57,6 +57,11 @@ std::string translate_type_name(const std::string& name) {
return name;
}
void ngfunction_2_irv10(pugi::xml_node& node,
std::ostream& bin_file,
const ngraph::Function& f,
const std::map<std::string, ngraph::OpSet>& custom_opsets);
// Some of the operators were added to wrong opsets. This is a mapping
// that allows such operators to be serialized with proper opsets.
// If new operators are discovered that have the same problem, the mapping
@ -76,6 +81,7 @@ class XmlSerializer : public ngraph::AttributeVisitor {
pugi::xml_node& m_xml_node;
std::ostream& m_bin_data;
std::string& m_node_type_name;
const std::map<std::string, ngraph::OpSet>& m_custom_opsets;
template <typename T>
std::string create_atribute_list(
@ -86,16 +92,109 @@ class XmlSerializer : public ngraph::AttributeVisitor {
public:
XmlSerializer(pugi::xml_node& data,
std::ostream& bin_data,
std::string& node_type_name)
std::string& node_type_name,
const std::map<std::string, ngraph::OpSet>& custom_opsets)
: m_xml_node(data)
, m_bin_data(bin_data)
, m_node_type_name(node_type_name) {
, m_node_type_name(node_type_name)
, m_custom_opsets(custom_opsets) {
}
std::vector<std::string> map_type_from_body(const pugi::xml_node& xml_node,
const std::string& map_type) {
std::vector<std::string> output;
for (pugi::xml_node node : xml_node.child("body").child("layers")) {
if (!map_type.compare(node.attribute("type").value())) {
output.push_back(node.attribute("id").value());
}
}
// ops for serialized body function are provided in reversed order
std::reverse(output.begin(), output.end());
return output;
}
void on_adapter(const std::string& name,
ngraph::ValueAccessor<void>& adapter) override {
(void)name;
(void)adapter;
if (m_xml_node.parent().child("body")) {
// parameters and results from body are required for port_map attributes serialization
std::vector<std::string> parameter_mapping = map_type_from_body(m_xml_node.parent(), "Parameter");
std::vector<std::string> result_mapping = map_type_from_body(m_xml_node.parent(), "Result");
NGRAPH_CHECK(!parameter_mapping.empty() || !result_mapping.empty(), "No parameters or results found in body Function.");
// TI, Loop do not have attributtes as regular ops, it is necessary to append "port_map" and
// "back_edges" to layer above (m_xml_node.parent()) as in ngfunction_2_irv10() layer (here "m_xml_node")
// with empty attributes is removed.
if (const auto& a = ngraph::as_type<ngraph::AttributeAdapter<std::vector<std::shared_ptr
<ngraph::op::util::SubGraphOp::InputDescription>>>>(&adapter)) {
pugi::xml_node port_map = m_xml_node.parent().child("port_map");
if (!m_xml_node.parent().child("port_map")) {
port_map = m_xml_node.parent().insert_child_before("port_map", m_xml_node.parent().first_child());
}
for (const auto& input_description : a->get()) {
pugi::xml_node input = port_map.append_child("input");
input.append_attribute("external_port_id").set_value(input_description->m_input_index);
input.append_attribute("internal_layer_id").set_value(parameter_mapping[input_description->m_body_parameter_index].c_str());
if (auto slice_input = as_type_ptr<ngraph::op::util::SubGraphOp::SliceInputDescription>(input_description)) {
input.prepend_attribute("axis").set_value(slice_input->m_axis);
if (slice_input->m_start) {
input.append_attribute("start").set_value(slice_input->m_start);
}
if (slice_input->m_end != -1) {
input.append_attribute("end").set_value(slice_input->m_end);
}
if (slice_input->m_stride != 1) {
input.append_attribute("stride").set_value(slice_input->m_stride);
}
if (slice_input->m_part_size != 1) {
input.append_attribute("part_size").set_value(slice_input->m_part_size);
}
} else if (auto merged_input = as_type_ptr<ngraph::op::util::SubGraphOp::MergedInputDescription>(input_description)) {
pugi::xml_node back_edges = m_xml_node.parent().child("back_edges");
if (!back_edges) {
back_edges = m_xml_node.parent().insert_child_after("back_edges", port_map);
}
pugi::xml_node edge = back_edges.append_child("edge");
edge.append_attribute("from-layer").set_value(result_mapping[merged_input->m_body_value_index].c_str());
edge.append_attribute("to-layer").set_value(parameter_mapping[merged_input->m_body_parameter_index].c_str());
}
}
} else if (const auto& a = ngraph::as_type<ngraph::AttributeAdapter<std::vector<std::shared_ptr
<ngraph::op::util::SubGraphOp::OutputDescription>>>>(&adapter)) {
pugi::xml_node port_map = m_xml_node.parent().find_child([](pugi::xml_node node) {return strcmp(node.name(), "port_map") == 0;});
if (!port_map) {
port_map = m_xml_node.parent().insert_child_before("port_map", m_xml_node.parent().first_child());
}
for (const auto& output_description : a->get()) {
pugi::xml_node output = port_map.append_child("output");
output.append_attribute("external_port_id").set_value(parameter_mapping.size() + output_description->m_output_index);
output.append_attribute("internal_layer_id").set_value(result_mapping[output_description->m_body_value_index].c_str());
if (auto concat_output = as_type_ptr<ngraph::op::util::SubGraphOp::ConcatOutputDescription>(output_description)) {
output.prepend_attribute("axis").set_value(concat_output->m_axis);
if (concat_output->m_start) {
output.append_attribute("start").set_value(concat_output->m_start);
}
if (concat_output->m_end != -1) {
output.append_attribute("end").set_value(concat_output->m_end);
}
if (concat_output->m_stride != 1) {
output.append_attribute("stride").set_value(concat_output->m_stride);
}
if (concat_output->m_part_size != 1) {
output.append_attribute("part_size").set_value(concat_output->m_part_size);
}
}
}
}
}
}
void on_adapter(const std::string& name,
@ -165,6 +264,23 @@ public:
m_xml_node.append_attribute(name.c_str())
.set_value(create_atribute_list(adapter).c_str());
}
void on_adapter(
const std::string& name,
ngraph::ValueAccessor<std::shared_ptr<Function>>& adapter) override {
if (name == "body") {
// TI, Loop do not have attributtes as regular ops, it is necessary to append "body"
// to layer above (m_xml_node.parent()) as in ngfunction_2_irv10() layer (m_xml_node) with empty attributes
// is removed.
pugi::xml_node xml_body = m_xml_node.parent().append_child(name.c_str());
ngfunction_2_irv10(xml_body, m_bin_data, *adapter.get(), m_custom_opsets);
xml_body.first_child().remove_attribute("name");
xml_body.first_child().remove_attribute("version");
} else if (name == "net") {
ngfunction_2_irv10(m_xml_node, m_bin_data, *adapter.get(), m_custom_opsets);
} else {
NGRAPH_CHECK(false, "Unsupported Function name.");
}
}
};
void visit_exec_graph_node(pugi::xml_node& data, std::string& node_type_name,
@ -393,13 +509,12 @@ bool resolve_dynamic_shapes(const ngraph::Function& f) {
return true;
}
void ngfunction_2_irv10(pugi::xml_document& doc,
void ngfunction_2_irv10(pugi::xml_node& netXml,
std::ostream& bin_file,
const ngraph::Function& f,
const std::map<std::string, ngraph::OpSet>& custom_opsets) {
const bool exec_graph = is_exec_graph(f);
pugi::xml_node netXml = doc.append_child("net");
netXml.append_attribute("name").set_value(f.get_friendly_name().c_str());
netXml.append_attribute("version").set_value("10");
pugi::xml_node layers = netXml.append_child("layers");
@ -424,24 +539,25 @@ void ngfunction_2_irv10(pugi::xml_document& doc,
layer.append_attribute("version").set_value(
get_opset_name(node, custom_opsets).c_str());
}
// <layers/data>
pugi::xml_node data = layer.append_child("data");
std::string node_type_name{node->get_type_name()};
// <layers/data> general attributes
std::string node_type_name{node->get_type_name()};
if (exec_graph) {
visit_exec_graph_node(data, node_type_name, node);
} else {
XmlSerializer visitor(data, bin_file, node_type_name);
XmlSerializer visitor(data, bin_file, node_type_name, custom_opsets);
NGRAPH_CHECK(node->visit_attributes(visitor),
"Visitor API is not supported in ", node);
}
layer_type_attribute.set_value(
translate_type_name(node_type_name).c_str());
const auto data_attr_size =
std::distance(data.attributes().begin(), data.attributes().end());
if (data_attr_size == 0) {
const bool data_attr_size =
data.attributes().begin() == data.attributes().end();
if (data_attr_size) {
layer.remove_child(data);
}
@ -453,6 +569,15 @@ void ngfunction_2_irv10(pugi::xml_document& doc,
NGRAPH_CHECK(i.get_partial_shape().is_static(),
"Unsupported dynamic input shape in ", node);
// WA for LSTMCellv0, peephole input shall not be serialized
if (i.get_index() == 6) {
auto type_info = node->get_type_info();
if (!strcmp(type_info.name, "LSTMCell") && type_info.version == 0) {
port_id++;
continue;
}
}
pugi::xml_node port = input.append_child("port");
port.append_attribute("id").set_value(port_id++);
for (auto d : i.get_shape()) {
@ -461,6 +586,10 @@ void ngfunction_2_irv10(pugi::xml_document& doc,
.set_value(std::to_string(d).c_str());
}
}
if (node_type_name == "TensorIterator") {
layer.prepend_move(input);
}
}
// <layers/output>
if ((node->get_output_size() > 0) && !ngraph::op::is_output(node)) {
@ -479,12 +608,22 @@ void ngfunction_2_irv10(pugi::xml_document& doc,
.set_value(std::to_string(d).c_str());
}
}
if (node_type_name == "TensorIterator") {
layer.insert_move_after(output, layer.first_child());
}
}
}
// <edges>
const std::vector<Edge> edge_mapping = create_edge_mapping(layer_ids, f);
pugi::xml_node edges = netXml.append_child("edges");
for (auto e : edge_mapping) {
// WA for LSTMCellv0, peephole input shall not be serialized
if (e.to_port == 6) {
auto type_info = f.get_ordered_ops()[e.to_layer]->get_type_info();
if (!strcmp(type_info.name, "LSTMCell") && type_info.version == 0) {
continue;
}
}
pugi::xml_node edge = edges.append_child("edge");
edge.append_attribute("from-layer").set_value(e.from_layer);
edge.append_attribute("from-port").set_value(e.from_port);
@ -496,7 +635,6 @@ void ngfunction_2_irv10(pugi::xml_document& doc,
f.validate_nodes_and_infer_types();
}
}
} // namespace
// ! [function_pass:serialize_cpp]
@ -509,7 +647,12 @@ bool pass::Serialize::run_on_function(std::shared_ptr<ngraph::Function> f) {
NGRAPH_CHECK(bin_file, "Can't open bin file: \"" + m_binPath + "\"");
switch (m_version) {
case Version::IR_V10:
ngfunction_2_irv10(xml_doc, bin_file, *f, m_custom_opsets);
{
std::string name = "net";
pugi::xml_node net_node = xml_doc.append_child(name.c_str());
XmlSerializer visitor(net_node, bin_file, name, m_custom_opsets);
visitor.on_attribute(name, f);
}
break;
default:
NGRAPH_UNREACHABLE("Unsupported version");

View File

@ -0,0 +1,272 @@
<?xml version="1.0"?>
<net name="Transpose" version="10">
<layers>
<layer id="0" name="data1" type="Parameter" version="opset1">
<data element_type="f32" shape="1,25,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>25</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="1" name="data2" type="Parameter" version="opset1">
<data element_type="f32" shape="1,256"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>256</dim>
</port>
</output>
</layer>
<layer id="2" name="data3" type="Parameter" version="opset1">
<data element_type="f32" shape="1,256"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>256</dim>
</port>
</output>
</layer>
<layer id="3" name="TensorIterator" type="TensorIterator" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>25</dim>
<dim>512</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>256</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>256</dim>
</port>
</input>
<output>
<port id="3" precision="FP32">
<dim>1</dim>
<dim>25</dim>
<dim>256</dim>
</port>
</output>
<port_map>
<input axis="1" end="0" external_port_id="0" internal_layer_id="0" start="-1" stride="-1"/>
<input external_port_id="1" internal_layer_id="3"/>
<input external_port_id="2" internal_layer_id="4"/>
<output axis="1" end="0" external_port_id="3" internal_layer_id="13" start="-1" stride="-1"/>
</port_map>
<back_edges>
<edge from-layer="10" to-layer="3"/>
<edge from-layer="9" to-layer="4"/>
</back_edges>
<body>
<layers>
<layer id="0" name="32" type="Parameter" version="opset1">
<data element_type="f32" shape="1,1,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="1" name="25_const" type="Const" version="opset1">
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="2" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/TensorArrayReadV3/Output_0/Data_/InputSqueeze" type="Reshape" version="opset1">
<data special_zero="True"/>
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
<port id="1">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="3" name="34" type="Parameter" version="opset1">
<data element_type="f32" shape="1,256"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>256</dim>
</port>
</output>
</layer>
<layer id="4" name="36" type="Parameter" version="opset1">
<data element_type="f32" shape="1,256"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>256</dim>
</port>
</output>
</layer>
<layer id="5" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Split269_const" type="Const" version="opset1">
<data element_type="f32" offset="16" shape="1024,512" size="2097152"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="6" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Split270_const" type="Const" version="opset1">
<data element_type="f32" offset="2097168" shape="1024,256" size="1048576"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
<dim>256</dim>
</port>
</output>
</layer>
<layer id="7" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/BiasAdd/Enter/Output_0/Data__const" type="Const" version="opset1">
<data element_type="f32" offset="3145744" shape="1024" size="4096"/>
<output>
<port id="1" precision="FP32">
<dim>1024</dim>
</port>
</output>
</layer>
<layer id="8" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell" type="LSTMCell" version="opset1">
<data hidden_size="256"/>
<input>
<port id="0">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>256</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>256</dim>
</port>
<port id="3">
<dim>1024</dim>
<dim>512</dim>
</port>
<port id="4">
<dim>1024</dim>
<dim>256</dim>
</port>
<port id="5">
<dim>1024</dim>
</port>
</input>
<output>
<port id="6" precision="FP32">
<dim>1</dim>
<dim>256</dim>
</port>
<port id="7" precision="FP32">
<dim>1</dim>
<dim>256</dim>
</port>
</output>
</layer>
<layer id="9" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Output_1/Data_/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>256</dim>
</port>
</input>
</layer>
<layer id="10" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Output_0/Data_/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>256</dim>
</port>
</input>
</layer>
<layer id="11" name="28_const" type="Const" version="opset1">
<data element_type="i64" offset="3149840" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="12" name="shadow/LSTMLayers/stack_bidirectional_rnn/cell_1/bidirectional_rnn/bw/bw/while/bw/basic_lstm_cell/concat/LSTMCell/Output_0/Data_/OutputUnsqueeze" type="Reshape" version="opset1">
<data special_zero="True"/>
<input>
<port id="0">
<dim>1</dim>
<dim>256</dim>
</port>
<port id="1">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>256</dim>
</port>
</output>
</layer>
<layer id="13" name="30/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>256</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="8" to-port="0"/>
<edge from-layer="3" from-port="0" to-layer="8" to-port="1"/>
<edge from-layer="4" from-port="0" to-layer="8" to-port="2"/>
<edge from-layer="5" from-port="1" to-layer="8" to-port="3"/>
<edge from-layer="6" from-port="1" to-layer="8" to-port="4"/>
<edge from-layer="7" from-port="1" to-layer="8" to-port="5"/>
<edge from-layer="8" from-port="7" to-layer="9" to-port="0"/>
<edge from-layer="8" from-port="6" to-layer="10" to-port="0"/>
<edge from-layer="8" from-port="6" to-layer="12" to-port="0"/>
<edge from-layer="11" from-port="1" to-layer="12" to-port="1"/>
<edge from-layer="12" from-port="2" to-layer="13" to-port="0"/>
</edges>
</body>
</layer>
<layer id="4" name="result" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>25</dim>
<dim>256</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
<edge from-layer="3" from-port="3" to-layer="4" to-port="0"/>
</edges>
</net>

View File

@ -0,0 +1,300 @@
<?xml version="1.0"?>
<net name="Resnet" version="10">
<layers>
<layer id="0" name="data1" type="Parameter" version="opset1">
<data element_type="f32" shape="16,1,512"/>
<output>
<port id="0" precision="FP32">
<dim>16</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="1" name="data2" type="Parameter" version="opset1">
<data element_type="f32" shape="1,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="2" name="data3" type="Parameter" version="opset1">
<data element_type="f32" shape="1,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="3" name="471/TensorIterator" type="TensorIterator" version="opset1">
<input>
<port id="0">
<dim>16</dim>
<dim>1</dim>
<dim>512</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>512</dim>
</port>
</input>
<output>
<port id="3" precision="FP32">
<dim>16</dim>
<dim>1</dim>
<dim>512</dim>
</port>
<port id="4" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="5" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
<port_map>
<input axis="0" external_port_id="0" internal_layer_id="0" part_size="1" stride="1"/>
<input external_port_id="1" internal_layer_id="3"/>
<input external_port_id="2" internal_layer_id="4"/>
<output axis="0" external_port_id="3" internal_layer_id="13" part_size="1" stride="1"/>
<output external_port_id="4" internal_layer_id="9"/>
<output external_port_id="5" internal_layer_id="10"/>
</port_map>
<back_edges>
<edge from-layer="9" to-layer="3"/>
<edge from-layer="10" to-layer="4"/>
</back_edges>
<body>
<layers>
<layer id="0" name="20" type="Parameter" version="opset1">
<data element_type="f32" shape="1,1,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="1" name="7_const" type="Const" version="opset1">
<data element_type="i64" offset="0" shape="2" size="16"/>
<output>
<port id="1" precision="I64">
<dim>2</dim>
</port>
</output>
</layer>
<layer id="2" name="471/input_squeeze" type="Reshape" version="opset1">
<data special_zero="True"/>
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
<port id="1">
<dim>2</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="3" name="22" type="Parameter" version="opset1">
<data element_type="f32" shape="1,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="4" name="24" type="Parameter" version="opset1">
<data element_type="f32" shape="1,512"/>
<output>
<port id="0" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="5" name="471/LSTMCell/Split149_const" type="Const" version="opset1">
<data element_type="f32" offset="16" shape="2048,512" size="4194304"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="6" name="471/LSTMCell/Split150_const" type="Const" version="opset1">
<data element_type="f32" offset="4194320" shape="2048,512" size="4194304"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="7" name="471/inport/2_const" type="Const" version="opset1">
<data element_type="f32" offset="8388624" shape="2048" size="8192"/>
<output>
<port id="1" precision="FP32">
<dim>2048</dim>
</port>
</output>
</layer>
<layer id="8" name="471/LSTMCell" type="LSTMCell" version="opset1">
<data hidden_size="512"/>
<input>
<port id="0">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="1">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="2">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="3">
<dim>2048</dim>
<dim>512</dim>
</port>
<port id="4">
<dim>2048</dim>
<dim>512</dim>
</port>
<port id="5">
<dim>2048</dim>
</port>
</input>
<output>
<port id="6" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="7" precision="FP32">
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="9" name="471/outport/0/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>512</dim>
</port>
</input>
</layer>
<layer id="10" name="471/outport/1/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>512</dim>
</port>
</input>
</layer>
<layer id="11" name="15_const" type="Const" version="opset1">
<data element_type="i64" offset="8396816" shape="3" size="24"/>
<output>
<port id="1" precision="I64">
<dim>3</dim>
</port>
</output>
</layer>
<layer id="12" name="471output_unsqueeze" type="Reshape" version="opset1">
<data special_zero="True"/>
<input>
<port id="0">
<dim>1</dim>
<dim>512</dim>
</port>
<port id="1">
<dim>3</dim>
</port>
</input>
<output>
<port id="2" precision="FP32">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</output>
</layer>
<layer id="13" name="18/sink_port_0" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
<edge from-layer="2" from-port="2" to-layer="8" to-port="0"/>
<edge from-layer="3" from-port="0" to-layer="8" to-port="1"/>
<edge from-layer="4" from-port="0" to-layer="8" to-port="2"/>
<edge from-layer="5" from-port="1" to-layer="8" to-port="3"/>
<edge from-layer="6" from-port="1" to-layer="8" to-port="4"/>
<edge from-layer="7" from-port="1" to-layer="8" to-port="5"/>
<edge from-layer="8" from-port="6" to-layer="9" to-port="0"/>
<edge from-layer="8" from-port="7" to-layer="10" to-port="0"/>
<edge from-layer="8" from-port="6" to-layer="12" to-port="0"/>
<edge from-layer="11" from-port="1" to-layer="12" to-port="1"/>
<edge from-layer="12" from-port="2" to-layer="13" to-port="0"/>
</edges>
</body>
</layer>
<layer id="4" name="result" type="Result" version="opset1">
<input>
<port id="0">
<dim>16</dim>
<dim>1</dim>
<dim>512</dim>
</port>
</input>
</layer>
<layer id="5" name="result_2" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>512</dim>
</port>
</input>
</layer>
<layer id="6" name="result_3" type="Result" version="opset1">
<input>
<port id="0">
<dim>1</dim>
<dim>512</dim>
</port>
</input>
</layer>
</layers>
<edges>
<edge from-layer="0" from-port="0" to-layer="3" to-port="0"/>
<edge from-layer="1" from-port="0" to-layer="3" to-port="1"/>
<edge from-layer="2" from-port="0" to-layer="3" to-port="2"/>
<edge from-layer="3" from-port="3" to-layer="4" to-port="0"/>
<edge from-layer="3" from-port="4" to-layer="5" to-port="0"/>
<edge from-layer="3" from-port="5" to-layer="6" to-port="0"/>
</edges>
</net>

View File

@ -12,17 +12,21 @@
#define IR_SERIALIZATION_MODELS_PATH ""
#endif
typedef std::tuple<std::string> SerializationParams;
typedef std::tuple<std::string, std::string> SerializationParams;
class SerializationTest: public CommonTestUtils::TestsCommon,
public testing::WithParamInterface<SerializationParams> {
public:
std::string m_model_path;
std::string m_binary_path;
std::string m_out_xml_path;
std::string m_out_bin_path;
void SetUp() override {
m_model_path = IR_SERIALIZATION_MODELS_PATH + std::get<0>(GetParam());
if (!std::get<1>(GetParam()).empty()) {
m_binary_path = IR_SERIALIZATION_MODELS_PATH + std::get<1>(GetParam());
}
const std::string test_name = GetTestName() + "_" + GetTimestamp();
m_out_xml_path = test_name + ".xml";
@ -37,7 +41,13 @@ public:
TEST_P(SerializationTest, CompareFunctions) {
InferenceEngine::Core ie;
auto expected = ie.ReadNetwork(m_model_path);
InferenceEngine::CNNNetwork expected;
if (!m_binary_path.empty()) {
expected = ie.ReadNetwork(m_model_path, m_binary_path);
} else {
expected = ie.ReadNetwork(m_model_path);
}
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
@ -48,19 +58,19 @@ TEST_P(SerializationTest, CompareFunctions) {
}
INSTANTIATE_TEST_CASE_P(IRSerialization, SerializationTest,
testing::Values(std::make_tuple("add_abc.xml"),
std::make_tuple("add_abc_f64.xml"),
std::make_tuple("split_equal_parts_2d.xml"),
std::make_tuple("addmul_abc.xml"),
std::make_tuple("add_abc_initializers.xml"),
std::make_tuple("experimental_detectron_roi_feature_extractor.xml"),
std::make_tuple("experimental_detectron_detection_output.xml"),
std::make_tuple("experimental_detectron_detection_output_opset6.xml"),
std::make_tuple("nms5.xml"),
std::make_tuple("shape_of.xml")));
testing::Values(std::make_tuple("add_abc.xml", "add_abc.bin"),
std::make_tuple("add_abc_f64.xml", ""),
std::make_tuple("split_equal_parts_2d.xml", "split_equal_parts_2d.bin"),
std::make_tuple("addmul_abc.xml", "addmul_abc.bin"),
std::make_tuple("add_abc_initializers.xml", "add_abc_initializers.bin"),
std::make_tuple("experimental_detectron_roi_feature_extractor.xml", ""),
std::make_tuple("experimental_detectron_detection_output.xml", ""),
std::make_tuple("experimental_detectron_detection_output_opset6.xml", ""),
std::make_tuple("nms5.xml", "nms5.bin"),
std::make_tuple("shape_of.xml", "")));
INSTANTIATE_TEST_CASE_P(ONNXSerialization, SerializationTest,
testing::Values(std::make_tuple("add_abc.prototxt"),
std::make_tuple("split_equal_parts_2d.prototxt"),
std::make_tuple("addmul_abc.prototxt"),
std::make_tuple("add_abc_initializers.prototxt")));
testing::Values(std::make_tuple("add_abc.prototxt", ""),
std::make_tuple("split_equal_parts_2d.prototxt", ""),
std::make_tuple("addmul_abc.prototxt", ""),
std::make_tuple("add_abc_initializers.prototxt", "")));

View File

@ -0,0 +1,86 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <fstream>
#include "common_test_utils/ngraph_test_utils.hpp"
#include "gtest/gtest.h"
#include "ie_core.hpp"
#include "ie_blob.h"
#include "common_test_utils/data_utils.hpp"
#ifndef IR_SERIALIZATION_MODELS_PATH // should be already defined by cmake
#define IR_SERIALIZATION_MODELS_PATH ""
#endif
class SerializationTensorIteratorTest : public ::testing::Test {
protected:
std::string test_name =
::testing::UnitTest::GetInstance()->current_test_info()->name();
std::string m_out_xml_path = test_name + ".xml";
std::string m_out_bin_path = test_name + ".bin";
void TearDown() override {
std::remove(m_out_xml_path.c_str());
std::remove(m_out_xml_path.c_str());
}
void serialize_and_compare(const std::string& model_path, InferenceEngine::Blob::Ptr weights) {
std::stringstream buffer;
InferenceEngine::Core ie;
std::ifstream model(model_path);
ASSERT_TRUE(model);
buffer << model.rdbuf();
auto expected = ie.ReadNetwork(buffer.str(), weights);
expected.serialize(m_out_xml_path, m_out_bin_path);
auto result = ie.ReadNetwork(m_out_xml_path, m_out_bin_path);
bool success;
std::string message;
std::tie(success, message) = compare_functions(result.getFunction(), expected.getFunction(), true);
ASSERT_TRUE(success) << message;
}
};
TEST_F(SerializationTensorIteratorTest, TiResnet) {
const std::string model_path = IR_SERIALIZATION_MODELS_PATH "ti_resnet.xml";
size_t weights_size = 8396840;
auto weights = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {weights_size}, InferenceEngine::Layout::C));
weights->allocate();
CommonTestUtils::fill_data(weights->buffer().as<float *>(), weights->size() / sizeof(float));
auto *data = weights->buffer().as<int64_t *>();
data[0] = 1;
data[1] = 512;
data[1049602] = 1;
data[1049603] = 1;
data[1049604] = 512;
serialize_and_compare(model_path, weights);
}
TEST_F(SerializationTensorIteratorTest, TiNegativeStride) {
const std::string model_path = IR_SERIALIZATION_MODELS_PATH "ti_negative_stride.xml";
size_t weights_size = 3149864;
auto weights = InferenceEngine::make_shared_blob<uint8_t>(
InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, {weights_size}, InferenceEngine::Layout::C));
weights->allocate();
CommonTestUtils::fill_data(weights->buffer().as<float *>(), weights->size() / sizeof(float));
auto *data = weights->buffer().as<int64_t *>();
data[0] = 1;
data[1] = 512;
data[393730] = 1;
data[393731] = 1;
data[393732] = 256;
serialize_and_compare(model_path, weights);
}

View File

@ -0,0 +1,43 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
#include "common_test_utils/test_constants.hpp"
#include "shared_test_classes/single_layer/tensor_iterator.hpp"
using namespace LayerTestsDefinitions;
namespace {
TEST_P(TensorIteratorTest, Serialize) {
Serialize();
}
const std::vector<InferenceEngine::Precision> netPrecisions = {
InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP16};
const std::vector<ngraph::helpers::TensorIteratorBody> body = {
ngraph::helpers::TensorIteratorBody::GRU, ngraph::helpers::TensorIteratorBody::LSTM, ngraph::helpers::TensorIteratorBody::RNN};
const std::vector<bool> decompose = {true, false};
const std::vector<size_t> sequenceLength = {2};
const std::vector<size_t> batch = {1, 10};
const std::vector<size_t> hiddenSize = {128};
const std::vector<size_t> sequenceAxis = {1};
const std::vector<float> clip = {0.f};
const std::vector<ngraph::op::RecurrentSequenceDirection> direction = {
ngraph::op::RecurrentSequenceDirection::FORWARD, ngraph::op::RecurrentSequenceDirection::REVERSE};
INSTANTIATE_TEST_CASE_P(smoke_TensorIterator, TensorIteratorTest,
::testing::Combine(
::testing::ValuesIn(decompose),
::testing::ValuesIn(sequenceLength),
::testing::ValuesIn(batch),
::testing::ValuesIn(hiddenSize),
::testing::ValuesIn(sequenceAxis),
::testing::ValuesIn(clip),
::testing::ValuesIn(body),
::testing::ValuesIn(direction),
::testing::ValuesIn(netPrecisions),
::testing::Values(CommonTestUtils::DEVICE_CPU)),
TensorIteratorTest::getTestCaseName);
} // namespace

View File

@ -31,6 +31,9 @@
namespace LayerTestsUtils {
// filename length limitation due to Windows constraints (max 256 characters)
constexpr std::size_t maxFileNameLength = 140;
class Summary;
class SummaryDestroyer {

View File

@ -195,7 +195,7 @@ void LayerTestsCommon::Run() {
void LayerTestsCommon::Serialize() {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
std::string output_name = GetTestName() + "_" + GetTimestamp();
std::string output_name = GetTestName().substr(0, maxFileNameLength) + "_" + GetTimestamp();
std::string out_xml_path = output_name + ".xml";
std::string out_bin_path = output_name + ".bin";