From 07bedc5d6f7b125d1bd3ca4247e2d1408f980071 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 23 Jul 2020 16:23:19 +0300 Subject: [PATCH] Network serializer for v7 is removed (#1414) * Network serializer for v7 is removed * Fixed compilation * Fixed Windows build * WA for GPU * Create function 2 times * Fixed compilation * Added return --- .../src/template_executable_network.cpp | 1 - .../hetero_executable_network.cpp | 7 +- .../src/inference_engine/CMakeLists.txt | 3 + .../cnn_network_ngraph_impl.cpp | 11 +- .../inference_engine/network_serializer.cpp | 167 ++++ .../inference_engine/network_serializer.hpp | 24 + .../legacy_api/include/network_serializer.h | 54 -- .../src/legacy_api/src/cnn_network_impl.cpp | 27 +- .../src/legacy_api/src/network_serializer.cpp | 730 ------------------ .../legacy_api/src/network_serializer_v7.cpp | 210 +++++ .../legacy_api/src/network_serializer_v7.hpp | 31 + .../src/concat_multi_channels.cpp | 1 - .../inference_engine/local_test.cpp | 2 - .../network_serializer_test.cpp | 99 --- .../cpu/single_layer_tests/cpu_test_utils.hpp | 1 - .../shared/include/behavior/test_plugin.hpp | 40 +- .../network_serializer.cpp | 95 +++ .../num_inputs_fusing_bin_conv.cpp | 4 +- .../unique_node_names.cpp | 4 +- 19 files changed, 573 insertions(+), 938 deletions(-) create mode 100644 inference-engine/src/inference_engine/network_serializer.cpp create mode 100644 inference-engine/src/inference_engine/network_serializer.hpp delete mode 100644 inference-engine/src/legacy_api/include/network_serializer.h delete mode 100644 inference-engine/src/legacy_api/src/network_serializer.cpp create mode 100644 inference-engine/src/legacy_api/src/network_serializer_v7.cpp create mode 100644 inference-engine/src/legacy_api/src/network_serializer_v7.hpp delete mode 100644 inference-engine/tests/functional/inference_engine/network_serializer_test.cpp create mode 100644 inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/network_serializer.cpp diff --git a/docs/template_plugin/src/template_executable_network.cpp b/docs/template_plugin/src/template_executable_network.cpp index 041b6fd0af8..d0acc263c84 100644 --- a/docs/template_plugin/src/template_executable_network.cpp +++ b/docs/template_plugin/src/template_executable_network.cpp @@ -13,7 +13,6 @@ #include #include #include -#include #include #include
diff --git a/inference-engine/src/hetero_plugin/hetero_executable_network.cpp b/inference-engine/src/hetero_plugin/hetero_executable_network.cpp index 2d97aba58a6..cddb9d8244a 100644 --- a/inference-engine/src/hetero_plugin/hetero_executable_network.cpp +++ b/inference-engine/src/hetero_plugin/hetero_executable_network.cpp @@ -25,7 +25,6 @@ #include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" #include "hetero/hetero_plugin_config.hpp" #include "hetero_plugin.hpp" -#include "network_serializer.h" #include #include @@ -908,6 +907,11 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) { subnetwork._network.Export(heteroModel); } catch (InferenceEngine::details::InferenceEngineException& ie_ex) { if (std::string::npos != std::string{ie_ex.what()}.find(NOT_IMPLEMENTED_str)) { + // TODO: enable once serialization to IR v10 is implemented +#if 1 + THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str + << "Device " << subnetwork._device << " does not implement Export method"; +#else pugi::xml_document doc; auto subnet = subnetwork._clonedNetwork; if (subnet.getFunction()) { @@ -918,6 +922,7 @@ void HeteroExecutableNetwork::ExportImpl(std::ostream& heteroModel) { heteroModel << std::endl; heteroModel.write(reinterpret_cast(&dataSize), sizeof(dataSize)); InferenceEngine::Serialization::SerializeBlobs(heteroModel, subnet); +#endif } else { throw; } diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt index 7bb09be4c73..bd5962e3576 100644 --- a/inference-engine/src/inference_engine/CMakeLists.txt +++ b/inference-engine/src/inference_engine/CMakeLists.txt @@ -26,6 +26,8 @@ set(IE_BASE_SOURCE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/ie_parameter.cpp ${CMAKE_CURRENT_SOURCE_DIR}/ie_rtti.cpp ${CMAKE_CURRENT_SOURCE_DIR}/precision_utils.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/network_serializer.hpp ${CMAKE_CURRENT_SOURCE_DIR}/system_allocator.cpp ${CMAKE_CURRENT_SOURCE_DIR}/system_allocator.hpp) list(REMOVE_ITEM LIBRARY_SRC ${IE_BASE_SOURCE_FILES}) @@ -108,6 +110,7 @@ target_include_directories(${TARGET_NAME}_common_obj PRIVATE $) target_include_directories(${TARGET_NAME}_common_obj SYSTEM PRIVATE + $ $) # Create object library diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index 8be08fec799..f93ed91f5d6 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -28,7 +28,7 @@ #include "ie_util_internal.hpp" #include "ie_ngraph_utils.hpp" #include "ie_profiling.hpp" -#include "network_serializer.h" +#include "network_serializer.hpp" #include "generic_ie.hpp" #include @@ -380,8 +380,7 @@ CNNNetworkNGraphImpl::reshape(const std::map>& StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const noexcept { - auto network = cnnNetwork; - if (!network) { + if (!cnnNetwork) { // TODO: once Serialization::Serialize supports true IR v10 // remove this conversion and WA for execution graph try { @@ -404,11 +403,9 @@ StatusCode CNNNetworkNGraphImpl::serialize(const std::string& xmlPath, const std } catch (...) { return DescriptionBuffer(UNEXPECTED, resp); } - - network = std::make_shared(*this); } - if (!network) return GENERAL_ERROR; - return network->serialize(xmlPath, binPath, resp); + + return DescriptionBuffer(NOT_IMPLEMENTED, resp) << "The serialize for IR v10 is not implemented"; } StatusCode CNNNetworkNGraphImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept { diff --git a/inference-engine/src/inference_engine/network_serializer.cpp b/inference-engine/src/inference_engine/network_serializer.cpp new file mode 100644 index 00000000000..97f57da63a2 --- /dev/null +++ b/inference-engine/src/inference_engine/network_serializer.cpp @@ -0,0 +1,167 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "network_serializer.hpp" + +#include +#include +#include +#include + +#include "exec_graph_info.hpp" +#include "xml_parse_utils.h" +#include "ie_ngraph_utils.hpp" +#include + +namespace InferenceEngine { +namespace Serialization { + +namespace { + +void FillXmlDocWithExecutionNGraph(const InferenceEngine::ICNNNetwork& network, + pugi::xml_document& doc) { + std::shared_ptr function = network.getFunction(); + if (function == nullptr) { + THROW_IE_EXCEPTION << network.getName() << " does not represent ngraph::Function"; + } + + std::vector> ordered = function->get_ordered_ops(); + pugi::xml_node netXml = doc.append_child("net"); + netXml.append_attribute("name").set_value(network.getName().c_str()); + + pugi::xml_node layers = netXml.append_child("layers"); + std::unordered_map, size_t> matching; + + for (size_t i = 0; i < ordered.size(); ++i) { + matching[ordered[i]] = i; + const std::shared_ptr node = ordered[i]; + auto params = node->get_rt_info(); + + auto layerTypeVariant = params.find(ExecGraphInfoSerialization::LAYER_TYPE); + if (layerTypeVariant == params.end()) { + THROW_IE_EXCEPTION << node->get_friendly_name() << " does not define " + << ExecGraphInfoSerialization::LAYER_TYPE << " attribute."; + } + using VariantString = ngraph::VariantImpl; + auto layerTypeValueStr = std::dynamic_pointer_cast(layerTypeVariant->second); + IE_ASSERT(layerTypeValueStr != nullptr); + params.erase(layerTypeVariant); + + pugi::xml_node layer = layers.append_child("layer"); + layer.append_attribute("name").set_value(node->get_friendly_name().c_str()); + layer.append_attribute("type").set_value(layerTypeValueStr->get().c_str()); + layer.append_attribute("id").set_value(i); + + if (!params.empty()) { + pugi::xml_node data = layer.append_child("data"); + + for (const auto& it : params) { + if (auto strValue = std::dynamic_pointer_cast(it.second)) + data.append_attribute(it.first.c_str()).set_value(strValue->get().c_str()); + } + } + + if (node->get_input_size() > 0) { + pugi::xml_node input = layer.append_child("input"); + + for (size_t iport = 0; iport < node->get_input_size(); iport++) { + const ngraph::Shape & dims = node->get_input_shape(iport); + pugi::xml_node port = input.append_child("port"); + + port.append_attribute("id").set_value(iport); + for (auto dim : dims) { + port.append_child("dim").text().set(dim); + } + } + } + if (node->get_output_size() > 0 && + // ngraph::op::Result still have single output while we should not print it + !std::dynamic_pointer_cast(node)) { + pugi::xml_node output = layer.append_child("output"); + + for (size_t oport = 0; oport < node->get_output_size(); oport++) { + pugi::xml_node port = output.append_child("port"); + Precision outputPrecision = details::convertPrecision(node->get_output_element_type(oport)); + + port.append_attribute("id").set_value(node->get_input_size() + oport); + port.append_attribute("precision").set_value(outputPrecision.name()); + + for (const auto dim : node->get_output_shape(oport)) { + port.append_child("dim").text().set(dim); + } + } + } + } + + pugi::xml_node edges = netXml.append_child("edges"); + + for (const auto& ord : ordered) { + const std::shared_ptr parentNode = ord; + + if (parentNode->get_output_size() > 0) { + auto itFrom = matching.find(parentNode); + if (itFrom == matching.end()) { + THROW_IE_EXCEPTION << "Internal error, cannot find " << parentNode->get_friendly_name() + << " in matching container during serialization of IR"; + } + for (size_t oport = 0; oport < parentNode->get_output_size(); oport++) { + ngraph::Output parentPort = parentNode->output(oport); + for (const auto& childPort : parentPort.get_target_inputs()) { + ngraph::Node * childNode = childPort.get_node(); + for (int iport = 0; iport < childNode->get_input_size(); iport++) { + if (childNode->input_value(iport).get_node() == parentPort.get_node()) { + auto itTo = matching.find(childNode->shared_from_this()); + if (itTo == matching.end()) { + THROW_IE_EXCEPTION << "Broken edge form layer " + << parentNode->get_friendly_name() << " to layer " + << childNode->get_friendly_name() + << "during serialization of IR"; + } + pugi::xml_node edge = edges.append_child("edge"); + edge.append_attribute("from-layer").set_value(itFrom->second); + edge.append_attribute("from-port").set_value(oport + parentNode->get_input_size()); + + edge.append_attribute("to-layer").set_value(itTo->second); + edge.append_attribute("to-port").set_value(iport); + } + } + } + } + } + } +} + +} // namespace + +void Serialize(const std::string& xmlPath, const std::string& binPath, + const InferenceEngine::ICNNNetwork& network) { + if (auto function = network.getFunction()) { + // A flag for serializing executable graph information (not complete IR) + bool execGraphInfoSerialization = true; + + // go over all operations and check whether performance stat is set + for (const auto & op : function->get_ops()) { + auto & rtInfo = op->get_rt_info(); + if (rtInfo.find(ExecGraphInfoSerialization::PERF_COUNTER) == rtInfo.end()) { + execGraphInfoSerialization = false; + break; + } + } + + if (execGraphInfoSerialization) { + pugi::xml_document doc; + FillXmlDocWithExecutionNGraph(network, doc); + + if (!doc.save_file(xmlPath.c_str())) { + THROW_IE_EXCEPTION << "File '" << xmlPath << "' was not serialized"; + } + } else { + THROW_IE_EXCEPTION << "Serialization to IR v10 is not implemented in Inference Engine"; + } + } else { + THROW_IE_EXCEPTION << "Serialization to IR v7 is removed from Inference Engine"; + } +} +} // namespace Serialization +} // namespace InferenceEngine diff --git a/inference-engine/src/inference_engine/network_serializer.hpp b/inference-engine/src/inference_engine/network_serializer.hpp new file mode 100644 index 00000000000..87434a9aa94 --- /dev/null +++ b/inference-engine/src/inference_engine/network_serializer.hpp @@ -0,0 +1,24 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include + +namespace InferenceEngine { +namespace Serialization { + +/** + * @brief Serializes a network into IE IR v10 XML file and binary weights file + * @param xmlPath Path to XML file + * @param binPath Path to BIN file + * @param network network to be serialized + */ +INFERENCE_ENGINE_API_CPP(void) Serialize(const std::string& xmlPath, const std::string& binPath, + const InferenceEngine::ICNNNetwork& network); + +} // namespace Serialization +} // namespace InferenceEngine diff --git a/inference-engine/src/legacy_api/include/network_serializer.h b/inference-engine/src/legacy_api/include/network_serializer.h deleted file mode 100644 index 0386abfb12c..00000000000 --- a/inference-engine/src/legacy_api/include/network_serializer.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include - -#include -#include - -namespace pugi { -class xml_document; -} - -namespace InferenceEngine { -namespace Serialization { -/** - * @brief Serialize network into IE IR XML file and binary weights file - * @param xmlPath Path to XML file - * @param binPath Path to BIN file - * @param network network to be serialized - */ -INFERENCE_ENGINE_API_CPP(void) Serialize(const std::string& xmlPath, const std::string& binPath, - const InferenceEngine::ICNNNetwork& network); - -/** - * @brief Fill XML representation using network - * @param network Loaded network - * @param doc XML object - * @param execGraphInfoSerialization If true scip some info serialization - * @param dumpWeights If false does not serialize waights info - * @return Size of all weights in network - */ -INFERENCE_ENGINE_API_CPP(std::size_t) FillXmlDoc(const InferenceEngine::ICNNNetwork& network, pugi::xml_document& doc, - const bool execGraphInfoSerialization = false, const bool dumpWeights = true); - -/** - * @brief Write all weights in network into output stream - * @param stream Output stream - * @param network Loaded network - */ -INFERENCE_ENGINE_API_CPP(void) SerializeBlobs(std::ostream& stream, - const InferenceEngine::ICNNNetwork& network); - -/** - * @brief Returns set of topologically sorted layers - * @param network network to be sorted - * @return `std::vector` of topologically sorted CNN layers - */ -INFERENCE_ENGINE_API_CPP(std::vector) TopologicalSort(const InferenceEngine::ICNNNetwork& network); -} // namespace Serialization -} // namespace InferenceEngine diff --git a/inference-engine/src/legacy_api/src/cnn_network_impl.cpp b/inference-engine/src/legacy_api/src/cnn_network_impl.cpp index e77978d299e..58537c8bbea 100644 --- a/inference-engine/src/legacy_api/src/cnn_network_impl.cpp +++ b/inference-engine/src/legacy_api/src/cnn_network_impl.cpp @@ -19,7 +19,8 @@ #include "debug.h" #include "graph_tools.hpp" #include "ie_profiling.hpp" -#include "network_serializer.h" +#include "network_serializer_v7.hpp" +#include "exec_graph_info.hpp" #include "details/ie_cnn_network_tools.h" #include "generic_ie.hpp" @@ -387,7 +388,26 @@ StatusCode CNNNetworkImpl::AddExtension(const InferenceEngine::IShapeInferExtens StatusCode CNNNetworkImpl::serialize(const std::string& xmlPath, const std::string& binPath, ResponseDesc* resp) const noexcept { try { - Serialization::Serialize(xmlPath, binPath, (InferenceEngine::ICNNNetwork&)*this); + // A flag for serializing executable graph information (not complete IR) + bool execGraphInfoSerialization = false; + + const std::vector ordered = Serialization::TopologicalSort((InferenceEngine::ICNNNetwork&)*this); + // If first layer has perfCounter parameter set then it's executable graph info serialization. + // All other layers must also have this parameter set. + if (ordered[0]->params.find(ExecGraphInfoSerialization::PERF_COUNTER) != ordered[0]->params.end()) { + execGraphInfoSerialization = true; + for (const auto& layer : ordered) { + if (layer->params.find(ExecGraphInfoSerialization::PERF_COUNTER) == layer->params.end()) { + THROW_IE_EXCEPTION << "Each node must have " << ExecGraphInfoSerialization::PERF_COUNTER + << " parameter set in case of executable graph info serialization"; + } + } + } + + if (execGraphInfoSerialization) { + Serialization::Serialize(xmlPath, (InferenceEngine::ICNNNetwork&)*this); + return OK; + } } catch (const InferenceEngineException& e) { return DescriptionBuffer(GENERAL_ERROR, resp) << e.what(); } catch (const std::exception& e) { @@ -395,7 +415,8 @@ StatusCode CNNNetworkImpl::serialize(const std::string& xmlPath, const std::stri } catch (...) { return DescriptionBuffer(UNEXPECTED, resp); } - return OK; + + return DescriptionBuffer(NOT_IMPLEMENTED, resp) << "The CNNNetworkImpl::serialize is not implemented"; } StatusCode CNNNetworkImpl::setBatchSize(size_t size, ResponseDesc* responseDesc) noexcept { diff --git a/inference-engine/src/legacy_api/src/network_serializer.cpp b/inference-engine/src/legacy_api/src/network_serializer.cpp deleted file mode 100644 index 483a2099ffc..00000000000 --- a/inference-engine/src/legacy_api/src/network_serializer.cpp +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright (C) 2018-2020 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "network_serializer.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ie_layers.h" -#include "details/caseless.hpp" -#include "exec_graph_info.hpp" -#include "xml_parse_utils.h" -#include "ie_ngraph_utils.hpp" -#include - -namespace InferenceEngine { -namespace Serialization { - -namespace { -template -std::string arrayToIRProperty(const T& property) { - std::string sProperty; - for (size_t i = 0; i < property.size(); i++) { - sProperty = sProperty + std::to_string(property[i]) + std::string((i != property.size() - 1) ? "," : ""); - } - return sProperty; -} - -template -std::string arrayRevertToIRProperty(const T& property) { - std::string sProperty; - for (size_t i = 0; i < property.size(); i++) { - sProperty = sProperty + std::to_string(property[property.size() - i - 1]) + - std::string((i != property.size() - 1) ? "," : ""); - } - return sProperty; -} - -std::size_t updatePreProcInfo(const InferenceEngine::ICNNNetwork& network, pugi::xml_node& netXml, - const std::size_t weightsDataOffset) { - InputsDataMap inputInfo; - network.getInputsInfo(inputInfo); - - // Assume that you preprocess only one input - auto dataOffset = weightsDataOffset; - for (auto ii : inputInfo) { - const PreProcessInfo& pp = ii.second->getPreProcess(); - size_t nInChannels = pp.getNumberOfChannels(); - if (nInChannels) { - pugi::xml_node preproc = netXml.append_child("pre-process"); - - preproc.append_attribute("reference-layer-name").set_value(ii.first.c_str()); - preproc.append_attribute("mean-precision").set_value(Precision(Precision::FP32).name()); - - for (size_t ch = 0; ch < nInChannels; ch++) { - const PreProcessChannel::Ptr& preProcessChannel = pp[ch]; - auto channel = preproc.append_child("channel"); - channel.append_attribute("id").set_value(ch); - - auto mean = channel.append_child("mean"); - - if (!preProcessChannel->meanData) { - mean.append_attribute("value").set_value(preProcessChannel->meanValue); - } else { - auto size = preProcessChannel->meanData->byteSize(); - mean.append_attribute("size").set_value(size); - mean.append_attribute("offset").set_value(dataOffset); - dataOffset += size; - } - - if (1.f != preProcessChannel->stdScale) { - channel.append_child("scale").append_attribute("value").set_value( - CNNLayer::ie_serialize_float(preProcessChannel->stdScale).c_str()); - } - } - } - } - return dataOffset; -} - -void UpdateStdLayerParams(const CNNLayer::Ptr& layer) { - auto layerPtr = layer.get(); - auto& params = layer->params; - using ::InferenceEngine::details::CaselessEq; - if (CaselessEq()(layer->type, "power")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of PowerLayer class"; - } - params["scale"] = CNNLayer::ie_serialize_float(lr->scale); - params["shift"] = CNNLayer::ie_serialize_float(lr->offset); - params["power"] = CNNLayer::ie_serialize_float(lr->power); - } else if (CaselessEq()(layer->type, "convolution") || - CaselessEq()(layer->type, "deconvolution")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of ConvolutionLayer class"; - } - params["kernel"] = arrayRevertToIRProperty(lr->_kernel); - params["pads_begin"] = arrayRevertToIRProperty(lr->_padding); - params["pads_end"] = arrayRevertToIRProperty(lr->_pads_end); - params["strides"] = arrayRevertToIRProperty(lr->_stride); - params["dilations"] = arrayRevertToIRProperty(lr->_dilation); - params["output"] = std::to_string(lr->_out_depth); - params["group"] = std::to_string(lr->_group); - } else if (CaselessEq()(layer->type, "deformable_convolution")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of DeformableConvolutionLayer class"; - } - params["kernel"] = arrayRevertToIRProperty(lr->_kernel); - params["pads_begin"] = arrayRevertToIRProperty(lr->_padding); - params["pads_end"] = arrayRevertToIRProperty(lr->_pads_end); - params["strides"] = arrayRevertToIRProperty(lr->_stride); - params["dilations"] = arrayRevertToIRProperty(lr->_dilation); - params["output"] = std::to_string(lr->_out_depth); - params["group"] = std::to_string(lr->_group); - params["deformable_group"] = std::to_string(lr->_deformable_group); - } else if (CaselessEq()(layer->type, "relu")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of ReLULayer class"; - } - if (lr->negative_slope != 0.0f) { - params["negative_slope"] = CNNLayer::ie_serialize_float(lr->negative_slope); - } - } else if (CaselessEq()(layer->type, "norm") || CaselessEq()(layer->type, "lrn")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of NormLayer class"; - } - params["alpha"] = CNNLayer::ie_serialize_float(lr->_alpha); - params["beta"] = CNNLayer::ie_serialize_float(lr->_beta); - params["local-size"] = std::to_string(lr->_size); - params["region"] = lr->_isAcrossMaps ? "across" : "same"; - } else if (CaselessEq()(layer->type, "pooling")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of PoolingLayer class"; - } - params["kernel"] = arrayRevertToIRProperty(lr->_kernel); - params["pads_begin"] = arrayRevertToIRProperty(lr->_padding); - params["pads_end"] = arrayRevertToIRProperty(lr->_pads_end); - params["strides"] = arrayRevertToIRProperty(lr->_stride); - - switch (lr->_type) { - case PoolingLayer::MAX: - params["pool-method"] = "max"; - break; - case PoolingLayer::AVG: - params["pool-method"] = "avg"; - break; - - default: - THROW_IE_EXCEPTION << "Found unsupported pooling method: " << lr->_type; - } - } else if (CaselessEq()(layer->type, "split")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of SplitLayer class"; - } - params["axis"] = std::to_string(lr->_axis); - } else if (CaselessEq()(layer->type, "concat")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of ConcatLayer class"; - } - params["axis"] = std::to_string(lr->_axis); - } else if (CaselessEq()(layer->type, "FullyConnected") || - CaselessEq()(layer->type, "InnerProduct")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of FullyConnectedLayer class"; - } - params["out-size"] = std::to_string(lr->_out_num); - } else if (CaselessEq()(layer->type, "softmax")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of SoftMaxLayer class"; - } - params["axis"] = std::to_string(lr->axis); - } else if (CaselessEq()(layer->type, "reshape")) { - // need to add here support of flatten layer if it is created from API - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of ReshapeLayer class"; - } - params["dim"] = arrayToIRProperty(lr->shape); - } else if (CaselessEq()(layer->type, "Eltwise")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of EltwiseLayer class"; - } - - std::string op; - - switch (lr->_operation) { - case EltwiseLayer::Sum: - op = "sum"; - break; - case EltwiseLayer::Prod: - op = "prod"; - break; - case EltwiseLayer::Max: - op = "max"; - break; - case EltwiseLayer::Sub: - op = "sub"; - break; - default: - break; - } - - params["operation"] = op; - } else if (CaselessEq()(layer->type, "scaleshift")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of ScaleShiftLayer class"; - } - params["broadcast"] = std::to_string(lr->_broadcast); - } else if (CaselessEq()(layer->type, "crop")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of CropLayer class"; - } - params["axis"] = arrayToIRProperty(lr->axis); - params["offset"] = arrayToIRProperty(lr->offset); - params["dim"] = arrayToIRProperty(lr->dim); - } else if (CaselessEq()(layer->type, "tile")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of TileLayer class"; - } - params["axis"] = std::to_string(lr->axis); - params["tiles"] = std::to_string(lr->tiles); - } else if (CaselessEq()(layer->type, "prelu")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of PReLULayer class"; - } - params["channel_shared"] = std::to_string(lr->_channel_shared); - } else if (CaselessEq()(layer->type, "clamp")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of ClampLayer class"; - } - params["min"] = CNNLayer::ie_serialize_float(lr->min_value); - params["max"] = CNNLayer::ie_serialize_float(lr->max_value); - } else if (CaselessEq()(layer->type, "BatchNormalization")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of BatchNormalizationLayer class"; - } - params["epsilon"] = CNNLayer::ie_serialize_float(lr->epsilon); - } else if (CaselessEq()(layer->type, "grn")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of GRNLayer class"; - } - params["bias"] = CNNLayer::ie_serialize_float(lr->bias); - } else if (CaselessEq()(layer->type, "mvn")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of MVNLayer class"; - } - params["across_channels"] = std::to_string(lr->across_channels); - params["normalize_variance"] = std::to_string(lr->normalize); - } else if (CaselessEq()(layer->type, "LSTMCell")) { - auto* lr = dynamic_cast(layerPtr); - if (lr == nullptr) { - THROW_IE_EXCEPTION << "Layer " << layerPtr->name << " is not instance of LSTMCell class"; - } - params["hidden_size"] = std::to_string(lr->hidden_size); - } else if (CaselessEq()(layer->type, "rnn") || - CaselessEq()(layer->type, "TensorIterator")) { - THROW_IE_EXCEPTION << "Not covered layers for writing to IR"; - } - - if (layer->params.find("quantization_level") != layer->params.end()) { - params["quantization_level"] = layer->params["quantization_level"]; - } - - // update of weightable layers - auto* pwlayer = dynamic_cast(layerPtr); - if (pwlayer) { - if (pwlayer->_weights) { - pwlayer->blobs["weights"] = pwlayer->_weights; - } - if (pwlayer->_biases) { - pwlayer->blobs["biases"] = pwlayer->_biases; - } - } -} -} // namespace - -std::vector TopologicalSort(const ICNNNetwork& network) { - std::vector ordered; - std::unordered_set used; - - OutputsDataMap outputs; - network.getOutputsInfo(outputs); - - InputsDataMap inputs; - network.getInputsInfo(inputs); - - auto get_consumers = [](const CNNLayerPtr& node) -> std::vector { - std::vector consumers; - for (const auto & output : node->outData) { - for (const auto &consumer : getInputTo(output)) { - consumers.push_back(consumer.second); - } - } - return consumers; - }; - auto bfs = [&used, &ordered, &get_consumers](const CNNLayerPtr& start_node, bool traverse_via_outputs = false) { - if (!start_node) return; - std::deque q; - q.push_front(start_node); - while (!q.empty()) { - auto node = q.front(); - q.pop_front(); - if (used.insert(node->name).second) { - ordered.push_back(node); - } - - // Traverse via inputs - for (const auto & input : node->insData) { - auto locked_input = input.lock(); - if (!locked_input) { - THROW_IE_EXCEPTION << "insData for " << node->name << " is not valid."; - } - if (auto next_node = getCreatorLayer(locked_input).lock()) { - if (!used.count(next_node->name)) { - // Check that all consumers were used - bool all_consumers_used(true); - for (const auto & consumer : get_consumers(next_node)) { - if (!used.count(consumer->name)) all_consumers_used = false; - } - if (all_consumers_used) { - q.push_front(next_node); - } - } - } - } - - // Traverse via outputs - if (traverse_via_outputs) { - for (const auto &consumer : get_consumers(node)) { - if (!used.count(consumer->name)) { - q.push_front(consumer); - } - } - } - } - }; - - // First we run bfs starting from outputs that provides deterministic graph traverse - for (const auto & output : outputs) { - if (!used.count(output.first)) { - bfs(getCreatorLayer(output.second).lock()); - } - } - - // For cases when graph has no outputs we start bfs from inputs to ensure topological sort - for (const auto & input : inputs) { - const auto data_ptr = input.second->getInputData(); - for (const auto & consumer : getInputTo(data_ptr)) - if (!used.count(consumer.first)) { - bfs(consumer.second, true); - } - } - - std::reverse(ordered.begin(), ordered.end()); - return ordered; -} - -namespace { - -void FillXmlDocWithExecutionNGraph(const InferenceEngine::ICNNNetwork& network, - pugi::xml_document& doc) { - std::shared_ptr function = network.getFunction(); - if (function == nullptr) { - THROW_IE_EXCEPTION << network.getName() << " does not represent ngraph::Function"; - } - - std::vector> ordered = function->get_ordered_ops(); - pugi::xml_node netXml = doc.append_child("net"); - netXml.append_attribute("name").set_value(network.getName().c_str()); - - pugi::xml_node layers = netXml.append_child("layers"); - std::unordered_map, size_t> matching; - - for (size_t i = 0; i < ordered.size(); ++i) { - matching[ordered[i]] = i; - const std::shared_ptr node = ordered[i]; - auto params = node->get_rt_info(); - - auto layerTypeVariant = params.find(ExecGraphInfoSerialization::LAYER_TYPE); - if (layerTypeVariant == params.end()) { - THROW_IE_EXCEPTION << node->get_friendly_name() << " does not define " - << ExecGraphInfoSerialization::LAYER_TYPE << " attribute."; - } - using VariantString = ngraph::VariantImpl; - auto layerTypeValueStr = std::dynamic_pointer_cast(layerTypeVariant->second); - IE_ASSERT(layerTypeValueStr != nullptr); - params.erase(layerTypeVariant); - - pugi::xml_node layer = layers.append_child("layer"); - layer.append_attribute("name").set_value(node->get_friendly_name().c_str()); - layer.append_attribute("type").set_value(layerTypeValueStr->get().c_str()); - layer.append_attribute("id").set_value(i); - - if (!params.empty()) { - pugi::xml_node data = layer.append_child("data"); - - for (const auto& it : params) { - if (auto strValue = std::dynamic_pointer_cast(it.second)) - data.append_attribute(it.first.c_str()).set_value(strValue->get().c_str()); - } - } - - if (node->get_input_size() > 0) { - pugi::xml_node input = layer.append_child("input"); - - for (size_t iport = 0; iport < node->get_input_size(); iport++) { - const ngraph::Shape & dims = node->get_input_shape(iport); - pugi::xml_node port = input.append_child("port"); - - port.append_attribute("id").set_value(iport); - for (auto dim : dims) { - port.append_child("dim").text().set(dim); - } - } - } - if (node->get_output_size() > 0 && - // ngraph::op::Result still have single output while we should not print it - !std::dynamic_pointer_cast(node)) { - pugi::xml_node output = layer.append_child("output"); - - for (size_t oport = 0; oport < node->get_output_size(); oport++) { - pugi::xml_node port = output.append_child("port"); - Precision outputPrecision = details::convertPrecision(node->get_output_element_type(oport)); - - port.append_attribute("id").set_value(node->get_input_size() + oport); - port.append_attribute("precision").set_value(outputPrecision.name()); - - for (const auto dim : node->get_output_shape(oport)) { - port.append_child("dim").text().set(dim); - } - } - } - } - - pugi::xml_node edges = netXml.append_child("edges"); - - for (const auto& ord : ordered) { - const std::shared_ptr parentNode = ord; - - if (parentNode->get_output_size() > 0) { - auto itFrom = matching.find(parentNode); - if (itFrom == matching.end()) { - THROW_IE_EXCEPTION << "Internal error, cannot find " << parentNode->get_friendly_name() - << " in matching container during serialization of IR"; - } - for (size_t oport = 0; oport < parentNode->get_output_size(); oport++) { - ngraph::Output parentPort = parentNode->output(oport); - for (const auto& childPort : parentPort.get_target_inputs()) { - ngraph::Node * childNode = childPort.get_node(); - for (int iport = 0; iport < childNode->get_input_size(); iport++) { - if (childNode->input_value(iport).get_node() == parentPort.get_node()) { - auto itTo = matching.find(childNode->shared_from_this()); - if (itTo == matching.end()) { - THROW_IE_EXCEPTION << "Broken edge form layer " - << parentNode->get_friendly_name() << " to layer " - << childNode->get_friendly_name() - << "during serialization of IR"; - } - pugi::xml_node edge = edges.append_child("edge"); - edge.append_attribute("from-layer").set_value(itFrom->second); - edge.append_attribute("from-port").set_value(oport + parentNode->get_input_size()); - - edge.append_attribute("to-layer").set_value(itTo->second); - edge.append_attribute("to-port").set_value(iport); - } - } - } - } - } - } -} - -} // namespace - -std::size_t FillXmlDoc(const InferenceEngine::ICNNNetwork& network, pugi::xml_document& doc, - const bool execGraphInfoSerialization, const bool dumpWeights) { - const std::vector ordered = TopologicalSort(network); - pugi::xml_node netXml = doc.append_child("net"); - netXml.append_attribute("name").set_value(network.getName().c_str()); - - // no need to print this information for executable graph information serialization because it is not IR. - if (!execGraphInfoSerialization) { - netXml.append_attribute("version").set_value("6"); - netXml.append_attribute("batch").set_value(network.getBatchSize()); - } - - pugi::xml_node layers = netXml.append_child("layers"); - - std::map matching; - for (size_t i = 0; i < ordered.size(); i++) { - matching[ordered[i]] = i; - } - - const std::string dataName = "data"; - size_t dataOffset = 0; - for (size_t i = 0; i < ordered.size(); ++i) { - const CNNLayerPtr node = ordered[i]; - - pugi::xml_node layer = layers.append_child("layer"); - const Precision precision = node->precision; - layer.append_attribute("name").set_value(node->name.c_str()); - layer.append_attribute("type").set_value(node->type.c_str()); - layer.append_attribute("precision").set_value(precision.name()); - layer.append_attribute("id").set_value(i); - - if (!execGraphInfoSerialization) { - UpdateStdLayerParams(node); - } - - const auto& params = node->params; - if (!params.empty()) { - pugi::xml_node data = layer.append_child(dataName.c_str()); - - for (const auto& it : params) { - data.append_attribute(it.first.c_str()).set_value(it.second.c_str()); - } - } - - if (!node->insData.empty()) { - pugi::xml_node input = layer.append_child("input"); - - for (size_t iport = 0; iport < node->insData.size(); iport++) { - const DataPtr d = node->insData[iport].lock(); - pugi::xml_node port = input.append_child("port"); - - port.append_attribute("id").set_value(iport); - - for (auto dim : d->getDims()) { - port.append_child("dim").text().set(dim); - } - } - } - if (!node->outData.empty()) { - pugi::xml_node output = layer.append_child("output"); - for (size_t oport = 0; oport < node->outData.size(); oport++) { - pugi::xml_node port = output.append_child("port"); - - port.append_attribute("id").set_value(node->insData.size() + oport); - port.append_attribute("precision").set_value(node->outData[oport]->getPrecision().name()); - - for (const auto dim : node->outData[oport]->getDims()) { - port.append_child("dim").text().set(dim); - } - } - } - if (dumpWeights && !node->blobs.empty()) { - auto blobsNode = layer.append_child("blobs"); - for (const auto& dataIt : node->blobs) { - if (!dataIt.second) continue; - size_t dataSize = dataIt.second->byteSize(); - pugi::xml_node data = blobsNode.append_child(dataIt.first.c_str()); - data.append_attribute("offset").set_value(dataOffset); - data.append_attribute("size").set_value(dataSize); - data.append_attribute("precision").set_value(dataIt.second->getTensorDesc().getPrecision().name()); - - dataOffset += dataSize; - } - } - } - - pugi::xml_node edges = netXml.append_child("edges"); - - for (const auto& ord : ordered) { - const CNNLayer::Ptr node = ord; - - if (!node->outData.empty()) { - auto itFrom = matching.find(node); - if (itFrom == matching.end()) { - THROW_IE_EXCEPTION << "Internal error, cannot find " << node->name - << " in matching container during serialization of IR"; - } - for (size_t oport = 0; oport < node->outData.size(); oport++) { - const DataPtr outData = node->outData[oport]; - for (const auto& inputTo : getInputTo(outData)) { - for (int iport = 0; iport < inputTo.second->insData.size(); iport++) { - if (inputTo.second->insData[iport].lock() == outData) { - auto itTo = matching.find(inputTo.second); - if (itTo == matching.end()) { - THROW_IE_EXCEPTION << "Broken edge form layer " << node->name << " to layer " - << inputTo.first << "during serialization of IR"; - } - pugi::xml_node edge = edges.append_child("edge"); - edge.append_attribute("from-layer").set_value(itFrom->second); - edge.append_attribute("from-port").set_value(oport + node->insData.size()); - - edge.append_attribute("to-layer").set_value(itTo->second); - edge.append_attribute("to-port").set_value(iport); - } - } - } - } - } - } - - return dataOffset; -} - -void SerializeBlobs(std::ostream& stream, const InferenceEngine::ICNNNetwork& network) { - const std::vector ordered = TopologicalSort(network); - for (auto&& node : ordered) { - if (!node->blobs.empty()) { - for (const auto& dataIt : node->blobs) { - if (!dataIt.second) continue; - const char* dataPtr = dataIt.second->buffer().as(); - size_t dataSize = dataIt.second->byteSize(); - stream.write(dataPtr, dataSize); - if (!stream.good()) { - THROW_IE_EXCEPTION << "Error during writing blob weights"; - } - } - } - } - - InputsDataMap inputInfo; - network.getInputsInfo(inputInfo); - - for (auto ii : inputInfo) { - const PreProcessInfo& pp = ii.second->getPreProcess(); - size_t nInChannels = pp.getNumberOfChannels(); - if (nInChannels) { - for (size_t ch = 0; ch < nInChannels; ch++) { - const PreProcessChannel::Ptr& preProcessChannel = pp[ch]; - if (preProcessChannel->meanData) { - const char* dataPtr = preProcessChannel->meanData->buffer().as(); - size_t dataSize = preProcessChannel->meanData->byteSize(); - stream.write(dataPtr, dataSize); - if (!stream.good()) { - THROW_IE_EXCEPTION << "Error during writing mean data"; - } - } - } - } - } -} - -void Serialize(const std::string& xmlPath, const std::string& binPath, - const InferenceEngine::ICNNNetwork& network) { - // A flag for serializing executable graph information (not complete IR) - bool execGraphInfoSerialization = false; - pugi::xml_document doc; - - if (auto function = network.getFunction()) { - execGraphInfoSerialization = true; - - // go over all operations and check whether performance stat is set - for (const auto & op : function->get_ops()) { - auto & rtInfo = op->get_rt_info(); - if (rtInfo.find(ExecGraphInfoSerialization::PERF_COUNTER) == rtInfo.end()) { - execGraphInfoSerialization = false; - break; - } - } - - if (execGraphInfoSerialization) { - FillXmlDocWithExecutionNGraph(network, doc); - - if (!doc.save_file(xmlPath.c_str())) { - THROW_IE_EXCEPTION << "file '" << xmlPath << "' was not serialized"; - } - - return; - } - } - - const std::vector ordered = TopologicalSort(network); - // If first layer has perfCounter parameter set then it's executable graph info serialization. - // All other layers must also have this parameter set. - if (ordered[0]->params.find(ExecGraphInfoSerialization::PERF_COUNTER) != ordered[0]->params.end()) { - execGraphInfoSerialization = true; - for (const auto& layer : ordered) { - if (layer->params.find(ExecGraphInfoSerialization::PERF_COUNTER) == layer->params.end()) { - THROW_IE_EXCEPTION << "Each node must have " << ExecGraphInfoSerialization::PERF_COUNTER - << " parameter set in case of executable graph info serialization"; - } - } - } - - bool dumpWeights = !execGraphInfoSerialization & !binPath.empty(); - FillXmlDoc(network, doc, execGraphInfoSerialization, dumpWeights); - - if (!doc.save_file(xmlPath.c_str())) { - THROW_IE_EXCEPTION << "file '" << xmlPath << "' was not serialized"; - } - - if (dumpWeights) { - std::ofstream ofsBin; - ofsBin.open(binPath, std::ofstream::out | std::ofstream::binary); - if (!ofsBin.is_open()) { - THROW_IE_EXCEPTION << "File '" << binPath << "' is not opened as out file stream"; - } - try { - SerializeBlobs(ofsBin, network); - } catch (std::exception& e) { - ofsBin.close(); - throw e; - } - ofsBin.close(); - if (!ofsBin.good()) { - THROW_IE_EXCEPTION << "Error during '" << binPath << "' closing"; - } - } -} -} // namespace Serialization -} // namespace InferenceEngine diff --git a/inference-engine/src/legacy_api/src/network_serializer_v7.cpp b/inference-engine/src/legacy_api/src/network_serializer_v7.cpp new file mode 100644 index 00000000000..a2c90e61105 --- /dev/null +++ b/inference-engine/src/legacy_api/src/network_serializer_v7.cpp @@ -0,0 +1,210 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "network_serializer_v7.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "xml_parse_utils.h" + +namespace InferenceEngine { +namespace Serialization { + +std::vector TopologicalSort(const ICNNNetwork& network) { + std::vector ordered; + std::unordered_set used; + + OutputsDataMap outputs; + network.getOutputsInfo(outputs); + + InputsDataMap inputs; + network.getInputsInfo(inputs); + + auto get_consumers = [](const CNNLayerPtr& node) -> std::vector { + std::vector consumers; + for (const auto & output : node->outData) { + for (const auto &consumer : getInputTo(output)) { + consumers.push_back(consumer.second); + } + } + return consumers; + }; + auto bfs = [&used, &ordered, &get_consumers](const CNNLayerPtr& start_node, bool traverse_via_outputs = false) { + if (!start_node) return; + std::deque q; + q.push_front(start_node); + while (!q.empty()) { + auto node = q.front(); + q.pop_front(); + if (used.insert(node->name).second) { + ordered.push_back(node); + } + + // Traverse via inputs + for (const auto & input : node->insData) { + auto locked_input = input.lock(); + if (!locked_input) { + THROW_IE_EXCEPTION << "insData for " << node->name << " is not valid."; + } + if (auto next_node = getCreatorLayer(locked_input).lock()) { + if (!used.count(next_node->name)) { + // Check that all consumers were used + bool all_consumers_used(true); + for (const auto & consumer : get_consumers(next_node)) { + if (!used.count(consumer->name)) all_consumers_used = false; + } + if (all_consumers_used) { + q.push_front(next_node); + } + } + } + } + + // Traverse via outputs + if (traverse_via_outputs) { + for (const auto &consumer : get_consumers(node)) { + if (!used.count(consumer->name)) { + q.push_front(consumer); + } + } + } + } + }; + + // First we run bfs starting from outputs that provides deterministic graph traverse + for (const auto & output : outputs) { + if (!used.count(output.first)) { + bfs(getCreatorLayer(output.second).lock()); + } + } + + // For cases when graph has no outputs we start bfs from inputs to ensure topological sort + for (const auto & input : inputs) { + const auto data_ptr = input.second->getInputData(); + for (const auto & consumer : getInputTo(data_ptr)) + if (!used.count(consumer.first)) { + bfs(consumer.second, true); + } + } + + std::reverse(ordered.begin(), ordered.end()); + return ordered; +} + +std::size_t FillXmlDoc(const InferenceEngine::ICNNNetwork& network, pugi::xml_document& doc) { + const std::vector ordered = TopologicalSort(network); + pugi::xml_node netXml = doc.append_child("net"); + netXml.append_attribute("name").set_value(network.getName().c_str()); + + pugi::xml_node layers = netXml.append_child("layers"); + + std::map matching; + for (size_t i = 0; i < ordered.size(); i++) { + matching[ordered[i]] = i; + } + + const std::string dataName = "data"; + size_t dataOffset = 0; + for (size_t i = 0; i < ordered.size(); ++i) { + const CNNLayerPtr node = ordered[i]; + + pugi::xml_node layer = layers.append_child("layer"); + const Precision precision = node->precision; + layer.append_attribute("name").set_value(node->name.c_str()); + layer.append_attribute("type").set_value(node->type.c_str()); + layer.append_attribute("precision").set_value(precision.name()); + layer.append_attribute("id").set_value(i); + + const auto& params = node->params; + if (!params.empty()) { + pugi::xml_node data = layer.append_child(dataName.c_str()); + + for (const auto& it : params) { + data.append_attribute(it.first.c_str()).set_value(it.second.c_str()); + } + } + + if (!node->insData.empty()) { + pugi::xml_node input = layer.append_child("input"); + + for (size_t iport = 0; iport < node->insData.size(); iport++) { + const DataPtr d = node->insData[iport].lock(); + pugi::xml_node port = input.append_child("port"); + + port.append_attribute("id").set_value(iport); + + for (auto dim : d->getDims()) { + port.append_child("dim").text().set(dim); + } + } + } + if (!node->outData.empty()) { + pugi::xml_node output = layer.append_child("output"); + for (size_t oport = 0; oport < node->outData.size(); oport++) { + pugi::xml_node port = output.append_child("port"); + + port.append_attribute("id").set_value(node->insData.size() + oport); + port.append_attribute("precision").set_value(node->outData[oport]->getPrecision().name()); + + for (const auto dim : node->outData[oport]->getDims()) { + port.append_child("dim").text().set(dim); + } + } + } + } + + pugi::xml_node edges = netXml.append_child("edges"); + + for (const auto& ord : ordered) { + const CNNLayer::Ptr node = ord; + + if (!node->outData.empty()) { + auto itFrom = matching.find(node); + if (itFrom == matching.end()) { + THROW_IE_EXCEPTION << "Internal error, cannot find " << node->name + << " in matching container during serialization of IR"; + } + for (size_t oport = 0; oport < node->outData.size(); oport++) { + const DataPtr outData = node->outData[oport]; + for (const auto& inputTo : getInputTo(outData)) { + for (int iport = 0; iport < inputTo.second->insData.size(); iport++) { + if (inputTo.second->insData[iport].lock() == outData) { + auto itTo = matching.find(inputTo.second); + if (itTo == matching.end()) { + THROW_IE_EXCEPTION << "Broken edge form layer " << node->name << " to layer " + << inputTo.first << "during serialization of IR"; + } + pugi::xml_node edge = edges.append_child("edge"); + edge.append_attribute("from-layer").set_value(itFrom->second); + edge.append_attribute("from-port").set_value(oport + node->insData.size()); + + edge.append_attribute("to-layer").set_value(itTo->second); + edge.append_attribute("to-port").set_value(iport); + } + } + } + } + } + } + + return dataOffset; +} + +void Serialize(const std::string& xmlPath, const InferenceEngine::ICNNNetwork& network) { + pugi::xml_document doc; + FillXmlDoc(network, doc); + + if (!doc.save_file(xmlPath.c_str())) { + THROW_IE_EXCEPTION << "file '" << xmlPath << "' was not serialized"; + } +} +} // namespace Serialization +} // namespace InferenceEngine diff --git a/inference-engine/src/legacy_api/src/network_serializer_v7.hpp b/inference-engine/src/legacy_api/src/network_serializer_v7.hpp new file mode 100644 index 00000000000..5dc08854d81 --- /dev/null +++ b/inference-engine/src/legacy_api/src/network_serializer_v7.hpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include + +#include +#include + +namespace InferenceEngine { +namespace Serialization { + +/** + * @brief Serialize execution network into IE IR-like XML file + * @param xmlPath Path to XML file + * @param network network to be serialized + */ +INFERENCE_ENGINE_API_CPP(void) Serialize(const std::string& xmlPath, const InferenceEngine::ICNNNetwork& network); + +/** + * @brief Returns set of topologically sorted layers + * @param network network to be sorted + * @return `std::vector` of topologically sorted CNN layers + */ +INFERENCE_ENGINE_API_CPP(std::vector) TopologicalSort(const InferenceEngine::ICNNNetwork& network); + +} // namespace Serialization +} // namespace InferenceEngine diff --git a/inference-engine/src/low_precision_transformations/src/concat_multi_channels.cpp b/inference-engine/src/low_precision_transformations/src/concat_multi_channels.cpp index 1fe4bcca21c..3c97312e869 100644 --- a/inference-engine/src/low_precision_transformations/src/concat_multi_channels.cpp +++ b/inference-engine/src/low_precision_transformations/src/concat_multi_channels.cpp @@ -17,7 +17,6 @@ #include "cnn_network_impl.hpp" #include "ie_util_internal.hpp" -#include "network_serializer.h" #include "low_precision_transformations/common/ie_lpt_exception.hpp" #include "low_precision_transformations/network_helper.hpp" diff --git a/inference-engine/tests/functional/inference_engine/local_test.cpp b/inference-engine/tests/functional/inference_engine/local_test.cpp index eeab57a479d..da9419bb3b8 100644 --- a/inference-engine/tests/functional/inference_engine/local_test.cpp +++ b/inference-engine/tests/functional/inference_engine/local_test.cpp @@ -229,8 +229,6 @@ protected: ASSERT_EQ(preproc[0]->meanValue, 104.006f); } else { InferenceEngine::NetPass::UnrollRNN_if(net, [] (const RNNCellBase& rnn) -> bool { return true; }); - net.serialize("UnrollRNN_if.xml"); - EXPECT_EQ(0, std::remove("UnrollRNN_if.xml")); auto lstmcell_layer = dynamic_pointer_cast(CommonTestUtils::getLayerByName(net, "LSTMCell:split_clip")); float ref_coeff = 0.2f; diff --git a/inference-engine/tests/functional/inference_engine/network_serializer_test.cpp b/inference-engine/tests/functional/inference_engine/network_serializer_test.cpp deleted file mode 100644 index 559b910521e..00000000000 --- a/inference-engine/tests/functional/inference_engine/network_serializer_test.cpp +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (C) 2019 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include -#include - -#include "common_test_utils/file_utils.hpp" -#include "common_test_utils/test_common.hpp" -#include "functional_test_utils/network_utils.hpp" -#include "functional_test_utils/test_model/test_model.hpp" -#include "ngraph_functions/subgraph_builders.hpp" - -class CNNNetworkSerializerTest - : public CommonTestUtils::TestsCommon, public testing::WithParamInterface { -protected: - void SetUp() override { - _netPrc = GetParam(); - /* generate test model */ - FuncTestUtils::TestModel::generateTestModel(_modelPath, _weightsPath, _netPrc); - } - - void TearDown() override { - CommonTestUtils::removeIRFiles(_modelPath, _weightsPath); - } - - InferenceEngine::Precision _netPrc; - const std::string _modelPath = "NetworkSerializer_test.xml"; - const std::string _weightsPath = "NetworkSerializer_test.bin"; -}; - -TEST_P(CNNNetworkSerializerTest, SerializeEmptyFilePathsThrowsException) { - InferenceEngine::Core ie; - InferenceEngine::CNNNetwork network = ie.ReadNetwork(_modelPath, _weightsPath); - ASSERT_THROW(network.serialize("", ""), InferenceEngine::details::InferenceEngineException); -} - -TEST_P(CNNNetworkSerializerTest, Serialize) { - InferenceEngine::Core ie; - InferenceEngine::CNNNetwork originalNetwork = ie.ReadNetwork(_modelPath, _weightsPath); - { - IE_SUPPRESS_DEPRECATED_START - // convert to old representation - originalNetwork = InferenceEngine::CNNNetwork( - std::make_shared(originalNetwork)); - IE_SUPPRESS_DEPRECATED_END - } - originalNetwork.getInputsInfo().begin()->second->setPrecision(_netPrc); - originalNetwork.getOutputsInfo().begin()->second->setPrecision(_netPrc); - - std::string xmlFilePath = "NetworkSerializer_test_serialized.xml"; - std::string binFileName = "NetworkSerializer_test_serialized.bin"; - try { - originalNetwork.serialize(xmlFilePath, binFileName); - - InferenceEngine::CNNNetwork serializedNetwork = ie.ReadNetwork(xmlFilePath, binFileName); - serializedNetwork.getInputsInfo().begin()->second->setPrecision(_netPrc); - serializedNetwork.getOutputsInfo().begin()->second->setPrecision(_netPrc); - - FuncTestUtils::compareCNNNetworks(originalNetwork, serializedNetwork); - - CommonTestUtils::removeIRFiles(xmlFilePath, binFileName); - } catch (...) { - CommonTestUtils::removeIRFiles(xmlFilePath, binFileName); - throw; - } -} - -TEST_P(CNNNetworkSerializerTest, TopoSortResultUnique) { - InferenceEngine::CNNNetwork network(ngraph::builder::subgraph::makeConvPoolRelu()); - auto convertedNetwork = std::make_shared(network); - auto sorted = InferenceEngine::Serialization::TopologicalSort(*convertedNetwork); - - std::vector actualLayerNames; - for (auto&& layer : sorted) { - IE_SUPPRESS_DEPRECATED_START - actualLayerNames.emplace_back(layer->name); - IE_SUPPRESS_DEPRECATED_END - } - - const std::vector expectedLayerNames = { - "Param_1", "Const_1", "Reshape_1", "Conv_1", - "Pool_1", "Relu_1", "Const_2", "Reshape_2" - }; - - ASSERT_EQ(expectedLayerNames, actualLayerNames); -} - -std::string getTestCaseName(testing::TestParamInfo params) { - return params.param.name(); -} - -INSTANTIATE_TEST_CASE_P( - SerializerTest, - CNNNetworkSerializerTest, - testing::Values(InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP16), - getTestCaseName); diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/cpu_test_utils.hpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/cpu_test_utils.hpp index 72a63c9fd03..35ae147d30c 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/cpu_test_utils.hpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/cpu_test_utils.hpp @@ -6,7 +6,6 @@ #include #include -#include "network_serializer.h" #include "ie_system_conf.h" #include diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/test_plugin.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/test_plugin.hpp index bc1c3a724b9..949c924e3a4 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/test_plugin.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/test_plugin.hpp @@ -10,6 +10,8 @@ #include #include "ie_extension.h" #include +#include +#include #include #include #include @@ -22,32 +24,6 @@ namespace BehaviorTestsDefinitions { using BehaviorTests = BehaviorTestsUtils::BehaviorTestsBasic; -bool static compare_two_files_lexicographically(const std::string &name_a, const std::string &name_b) { - std::ifstream a(name_a), b(name_b); - - std::string line_a, line_b; - while (std::getline(a, line_a)) { - std::string str_a, str_b; - std::istringstream(line_a) >> str_a; - - if (!std::getline(b, line_b)) - throw std::logic_error("Second file is shorter than first"); - else - std::istringstream(line_b) >> str_b; - - if (line_a != line_b) { - std::cout << "Line A: " << line_a << std::endl; - std::cout << "Line B: " << line_b << std::endl; - throw std::logic_error("Files are different"); - } - } - - if (std::getline(b, line_b)) - throw std::logic_error("First file is shorter than second"); - else - return true; -} - void setInputNetworkPrecision(InferenceEngine::CNNNetwork &network, InferenceEngine::InputsDataMap &inputs_info, InferenceEngine::Precision input_precision) { inputs_info = network.getInputsInfo(); @@ -83,20 +59,14 @@ TEST_P(BehaviorTests, canNotLoadNetworkWithoutWeights) { TEST_P(BehaviorTests, pluginDoesNotChangeOriginalNetwork) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() - const std::string name_a = "a.xml"; - const std::string name_b = "b.xml"; auto param = GetParam(); InferenceEngine::CNNNetwork cnnNet(function); - InferenceEngine::CNNNetwork execGraph; - cnnNet.serialize(name_a); - ASSERT_NO_THROW(ie->LoadNetwork(cnnNet, targetDevice, configuration)); - cnnNet.serialize(name_b); - compare_two_files_lexicographically(name_a, name_b); - EXPECT_EQ(0, std::remove(name_a.c_str())); - EXPECT_EQ(0, std::remove(name_b.c_str())); + // compare 2 networks + auto referenceNetwork = ngraph::builder::subgraph::makeConvPoolRelu(); + compare_functions(referenceNetwork, cnnNet.getFunction()); } using BehaviorTestInput = BehaviorTestsUtils::BehaviorTestsBasic; diff --git a/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/network_serializer.cpp b/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/network_serializer.cpp new file mode 100644 index 00000000000..69798fcbe62 --- /dev/null +++ b/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/network_serializer.cpp @@ -0,0 +1,95 @@ +// Copyright (C) 2020 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include +#include + +using namespace InferenceEngine; + +IE_SUPPRESS_DEPRECATED_START + +std::vector TopologicalSort(const InferenceEngine::ICNNNetwork& network) { + std::vector ordered; + std::unordered_set used; + + OutputsDataMap outputs; + network.getOutputsInfo(outputs); + + InputsDataMap inputs; + network.getInputsInfo(inputs); + + auto get_consumers = [](const CNNLayerPtr& node) -> std::vector { + std::vector consumers; + for (const auto & output : node->outData) { + for (const auto &consumer : getInputTo(output)) { + consumers.push_back(consumer.second); + } + } + return consumers; + }; + auto bfs = [&used, &ordered, &get_consumers](const CNNLayerPtr& start_node, bool traverse_via_outputs = false) { + if (!start_node) return; + std::deque q; + q.push_front(start_node); + while (!q.empty()) { + auto node = q.front(); + q.pop_front(); + if (used.insert(node->name).second) { + ordered.push_back(node); + } + + // Traverse via inputs + for (const auto & input : node->insData) { + auto locked_input = input.lock(); + if (!locked_input) { + THROW_IE_EXCEPTION << "insData for " << node->name << " is not valid."; + } + if (auto next_node = getCreatorLayer(locked_input).lock()) { + if (!used.count(next_node->name)) { + // Check that all consumers were used + bool all_consumers_used(true); + for (const auto & consumer : get_consumers(next_node)) { + if (!used.count(consumer->name)) all_consumers_used = false; + } + if (all_consumers_used) { + q.push_front(next_node); + } + } + } + } + + // Traverse via outputs + if (traverse_via_outputs) { + for (const auto &consumer : get_consumers(node)) { + if (!used.count(consumer->name)) { + q.push_front(consumer); + } + } + } + } + }; + + // First we run bfs starting from outputs that provides deterministic graph traverse + for (const auto & output : outputs) { + if (!used.count(output.first)) { + bfs(getCreatorLayer(output.second).lock()); + } + } + + // For cases when graph has no outputs we start bfs from inputs to ensure topological sort + for (const auto & input : inputs) { + const auto data_ptr = input.second->getInputData(); + for (const auto & consumer : getInputTo(data_ptr)) + if (!used.count(consumer.first)) { + bfs(consumer.second, true); + } + } + + std::reverse(ordered.begin(), ordered.end()); + return ordered; +} \ No newline at end of file diff --git a/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp b/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp index 8c8b120af7e..506ca909f64 100644 --- a/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/num_inputs_fusing_bin_conv.cpp @@ -14,7 +14,7 @@ #include "execution_graph_tests/num_inputs_fusing_bin_conv.hpp" -#include "network_serializer.h" +std::vector TopologicalSort(const InferenceEngine::ICNNNetwork& network); namespace LayerTestsDefinitions { @@ -78,7 +78,7 @@ TEST_P(ExecGraphInputsFusingBinConv, CheckNumInputsInBinConvFusingWithConv) { } else { IE_SUPPRESS_DEPRECATED_START std::vector nodes; - ASSERT_NO_THROW(nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo)); + ASSERT_NO_THROW(nodes = TopologicalSort(execGraphInfo)); for (auto &node : nodes) { if (node->type == "BinaryConvolution") { std::string originalLayersNames = node->params["originalLayersNames"]; diff --git a/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/unique_node_names.cpp b/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/unique_node_names.cpp index 15a70e33f4a..25e67d55702 100644 --- a/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/unique_node_names.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/execution_graph_tests/unique_node_names.cpp @@ -21,7 +21,7 @@ #include "execution_graph_tests/unique_node_names.hpp" -#include "network_serializer.h" +std::vector TopologicalSort(const InferenceEngine::ICNNNetwork& network); namespace LayerTestsDefinitions { @@ -88,7 +88,7 @@ TEST_P(ExecGraphUniqueNodeNames, CheckUniqueNodeNames) { } } } else { - auto nodes = InferenceEngine::Serialization::TopologicalSort(execGraphInfo); + auto nodes = TopologicalSort(execGraphInfo); for (auto &node : nodes) { IE_SUPPRESS_DEPRECATED_START ASSERT_TRUE(names.find(node->name) == names.end()) <<