Remove JSON serializer (#1638)

This commit is contained in:
Ilya Churaev 2020-08-06 05:51:05 +03:00 committed by GitHub
parent 0339fff3bc
commit 7a314f216a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 1 additions and 4274 deletions

View File

@ -80,7 +80,6 @@ function(build_ngraph)
else() else()
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE FALSE) ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE FALSE)
endif() endif()
ngraph_set(NGRAPH_JSON_ENABLE FALSE)
ngraph_set(NGRAPH_INTERPRETER_ENABLE TRUE) ngraph_set(NGRAPH_INTERPRETER_ENABLE TRUE)
if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$") if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")

View File

@ -146,7 +146,6 @@ You can use the following additional build options:
- nGraph-specific compilation options: - nGraph-specific compilation options:
`-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer. `-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer.
`-DNGRAPH_JSON_ENABLE=ON` enables nGraph JSON-based serialization.
`-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints. `-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints.
## Build for Raspbian Stretch* OS ## Build for Raspbian Stretch* OS
@ -325,7 +324,6 @@ You can use the following additional build options:
- nGraph-specific compilation options: - nGraph-specific compilation options:
`-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer. `-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer.
`-DNGRAPH_JSON_ENABLE=ON` enables nGraph JSON-based serialization.
`-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints. `-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints.
## Build on Windows* Systems ## Build on Windows* Systems
@ -428,7 +426,6 @@ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
- nGraph-specific compilation options: - nGraph-specific compilation options:
`-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer. `-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer.
`-DNGRAPH_JSON_ENABLE=ON` enables nGraph JSON-based serialization.
`-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints. `-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints.
### Building Inference Engine with Ninja* Build System ### Building Inference Engine with Ninja* Build System
@ -520,7 +517,6 @@ You can use the following additional build options:
- nGraph-specific compilation options: - nGraph-specific compilation options:
`-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer. `-DNGRAPH_ONNX_IMPORT_ENABLE=ON` enables the building of the nGraph ONNX importer.
`-DNGRAPH_JSON_ENABLE=ON` enables nGraph JSON-based serialization.
`-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints. `-DNGRAPH_DEBUG_ENABLE=ON` enables additional debug prints.
## Build on Android* Systems ## Build on Android* Systems

View File

@ -8,20 +8,6 @@ CNNNetwork. Both representations provide an API to get detailed information abou
To receive additional messages about applied graph modifications, rebuild the nGraph library with To receive additional messages about applied graph modifications, rebuild the nGraph library with
the `-DNGRAPH_DEBUG_ENABLE=ON` option. the `-DNGRAPH_DEBUG_ENABLE=ON` option.
To enable serialization and deserialization of the nGraph function to a JSON file, rebuild the
nGraph library with the `-DNGRAPH_JSON_ENABLE=ON` option. To serialize or deserialize the nGraph
function, call the nGraph function as follows:
```cpp
#include <ngraph/serializer.hpp>
std::shared_ptr<ngraph::Function> nGraph;
...
ngraph::serialize("test_json.json", nGraph); // For graph serialization
std::ifstream file("test_json.json"); // Open a JSON file
nGraph = ngraph::deserialize(file); // For graph deserialization
```
To visualize the nGraph function to the xDot format or to an image file, use the To visualize the nGraph function to the xDot format or to an image file, use the
`ngraph::pass::VisualizeTree` graph transformation pass: `ngraph::pass::VisualizeTree` graph transformation pass:
```cpp ```cpp

View File

@ -120,7 +120,6 @@ option(NGRAPH_CODE_COVERAGE_ENABLE "Enable code coverage data collection" FALSE)
option(NGRAPH_LIB_VERSIONING_ENABLE "Enable shared library versioning" FALSE) option(NGRAPH_LIB_VERSIONING_ENABLE "Enable shared library versioning" FALSE)
option(NGRAPH_PYTHON_BUILD_ENABLE "Enable build nGraph python package wheel" FALSE) option(NGRAPH_PYTHON_BUILD_ENABLE "Enable build nGraph python package wheel" FALSE)
option(NGRAPH_IE_ENABLE "Enable the Inference Engine backend" FALSE) option(NGRAPH_IE_ENABLE "Enable the Inference Engine backend" FALSE)
option(NGRAPH_JSON_ENABLE "Enable JSON based serialization and tracing features" TRUE)
option(NGRAPH_DYNAMIC_COMPONENTS_ENABLE "Enable dynamic loading of components" TRUE) option(NGRAPH_DYNAMIC_COMPONENTS_ENABLE "Enable dynamic loading of components" TRUE)
option(NGRAPH_EXPORT_TARGETS_ENABLE "Enable exporting nGraph cmake export targets" TRUE) option(NGRAPH_EXPORT_TARGETS_ENABLE "Enable exporting nGraph cmake export targets" TRUE)
option(NGRAPH_WARNINGS_AS_ERRORS "Make all nGraph compile-time warnings into errors" FALSE) option(NGRAPH_WARNINGS_AS_ERRORS "Make all nGraph compile-time warnings into errors" FALSE)
@ -158,7 +157,6 @@ NORMALIZE_BOOL(NGRAPH_CODE_COVERAGE_ENABLE)
NORMALIZE_BOOL(NGRAPH_LIB_VERSIONING_ENABLE) NORMALIZE_BOOL(NGRAPH_LIB_VERSIONING_ENABLE)
NORMALIZE_BOOL(NGRAPH_PYTHON_BUILD_ENABLE) NORMALIZE_BOOL(NGRAPH_PYTHON_BUILD_ENABLE)
NORMALIZE_BOOL(NGRAPH_IE_ENABLE) NORMALIZE_BOOL(NGRAPH_IE_ENABLE)
NORMALIZE_BOOL(NGRAPH_JSON_ENABLE)
NORMALIZE_BOOL(NGRAPH_DYNAMIC_COMPONENTS_ENABLE) NORMALIZE_BOOL(NGRAPH_DYNAMIC_COMPONENTS_ENABLE)
NORMALIZE_BOOL(NGRAPH_EXPORT_TARGETS_ENABLE) NORMALIZE_BOOL(NGRAPH_EXPORT_TARGETS_ENABLE)
@ -177,7 +175,6 @@ message(STATUS "NGRAPH_EXPORT_TARGETS_ENABLE: ${NGRAPH_EXPORT_TARGETS_EN
message(STATUS "NGRAPH_IE_ENABLE: ${NGRAPH_IE_ENABLE}") message(STATUS "NGRAPH_IE_ENABLE: ${NGRAPH_IE_ENABLE}")
message(STATUS "NGRAPH_IE_STATIC_LIB_ENABLE: ${NGRAPH_IE_STATIC_LIB_ENABLE}") message(STATUS "NGRAPH_IE_STATIC_LIB_ENABLE: ${NGRAPH_IE_STATIC_LIB_ENABLE}")
message(STATUS "NGRAPH_INTERPRETER_ENABLE: ${NGRAPH_INTERPRETER_ENABLE}") message(STATUS "NGRAPH_INTERPRETER_ENABLE: ${NGRAPH_INTERPRETER_ENABLE}")
message(STATUS "NGRAPH_JSON_ENABLE: ${NGRAPH_JSON_ENABLE}")
message(STATUS "NGRAPH_LIB_VERSIONING_ENABLE: ${NGRAPH_LIB_VERSIONING_ENABLE}") message(STATUS "NGRAPH_LIB_VERSIONING_ENABLE: ${NGRAPH_LIB_VERSIONING_ENABLE}")
message(STATUS "NGRAPH_ONNX_IMPORT_ENABLE: ${NGRAPH_ONNX_IMPORT_ENABLE}") message(STATUS "NGRAPH_ONNX_IMPORT_ENABLE: ${NGRAPH_ONNX_IMPORT_ENABLE}")
message(STATUS "NGRAPH_PYTHON_BUILD_ENABLE: ${NGRAPH_PYTHON_BUILD_ENABLE}") message(STATUS "NGRAPH_PYTHON_BUILD_ENABLE: ${NGRAPH_PYTHON_BUILD_ENABLE}")
@ -486,10 +483,6 @@ if (NGRAPH_ONNX_IMPORT_ENABLE)
endif() endif()
endif() endif()
if(NGRAPH_JSON_ENABLE)
include(cmake/external_json.cmake)
endif()
add_subdirectory(src) add_subdirectory(src)
if (NGRAPH_TEST_UTIL_ENABLE) if (NGRAPH_TEST_UTIL_ENABLE)

View File

@ -1,49 +0,0 @@
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# Enable ExternalProject CMake module
include(ExternalProject)
#------------------------------------------------------------------------------
# Download json
#------------------------------------------------------------------------------
SET(JSON_GIT_REPO_URL https://github.com/nlohmann/json)
if(WIN32)
SET(JSON_GIT_LABEL v3.5.0)
else()
SET(JSON_GIT_LABEL v3.7.3)
endif()
ExternalProject_Add(
ext_json
PREFIX json
GIT_REPOSITORY ${JSON_GIT_REPO_URL}
GIT_TAG ${JSON_GIT_LABEL}
# Disable install step
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
UPDATE_COMMAND ""
EXCLUDE_FROM_ALL TRUE
)
#------------------------------------------------------------------------------
ExternalProject_Get_Property(ext_json SOURCE_DIR)
add_library(libjson INTERFACE)
target_include_directories(libjson SYSTEM INTERFACE ${SOURCE_DIR}/include)
add_dependencies(libjson ext_json)

View File

@ -4,7 +4,6 @@
| ------------------------------------|:---:| --- | | ------------------------------------|:---:| --- |
| NGRAPH_DISABLED_FUSIONS | | | NGRAPH_DISABLED_FUSIONS | |
| NGRAPH_ENABLE_REPLACE_CHECK | | | NGRAPH_ENABLE_REPLACE_CHECK | |
| NGRAPH_ENABLE_SERIALIZE_TRACING | |
| NGRAPH_ENABLE_TRACING | | | NGRAPH_ENABLE_TRACING | |
| NGRAPH_ENABLE_VISUALIZE_TRACING | | | NGRAPH_ENABLE_VISUALIZE_TRACING | |
| NGRAPH_FAIL_MATCH_AT | | | NGRAPH_FAIL_MATCH_AT | |
@ -14,7 +13,6 @@
| NGRAPH_PASS_ENABLES | | | NGRAPH_PASS_ENABLES | |
| NGRAPH_PROFILE_PASS_ENABLE | | | NGRAPH_PROFILE_PASS_ENABLE | |
| NGRAPH_PROVENANCE_ENABLE | | | NGRAPH_PROVENANCE_ENABLE | |
| NGRAPH_SERIALIZER_OUTPUT_SHAPES | |
| NGRAPH_VISUALIZE_EDGE_JUMP_DISTANCE | | | NGRAPH_VISUALIZE_EDGE_JUMP_DISTANCE | |
| NGRAPH_VISUALIZE_EDGE_LABELS | | | NGRAPH_VISUALIZE_EDGE_LABELS | |
| NGRAPH_VISUALIZE_TRACING_FORMAT | | | NGRAPH_VISUALIZE_TRACING_FORMAT | |

View File

@ -1,24 +0,0 @@
https://github.com/nlohmann/json/blob/develop/LICENSE.MIT
MIT License
Copyright (c) 2013-2018 Niels Lohmann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -205,7 +205,6 @@ sources = [
"pyngraph/passes/regmodule_pyngraph_passes.cpp", "pyngraph/passes/regmodule_pyngraph_passes.cpp",
"pyngraph/partial_shape.cpp", "pyngraph/partial_shape.cpp",
"pyngraph/pyngraph.cpp", "pyngraph/pyngraph.cpp",
"pyngraph/serializer.cpp",
"pyngraph/shape.cpp", "pyngraph/shape.cpp",
"pyngraph/strides.cpp", "pyngraph/strides.cpp",
"pyngraph/tensor_iterator_builder.cpp", "pyngraph/tensor_iterator_builder.cpp",

View File

@ -46,5 +46,4 @@ from _pyngraph import AxisSet
from _pyngraph import AxisVector from _pyngraph import AxisVector
from _pyngraph import Coordinate from _pyngraph import Coordinate
from _pyngraph import serialize
from _pyngraph import util from _pyngraph import util

View File

@ -34,7 +34,6 @@
#include "pyngraph/ops/util/regmodule_pyngraph_op_util.hpp" #include "pyngraph/ops/util/regmodule_pyngraph_op_util.hpp"
#include "pyngraph/partial_shape.hpp" #include "pyngraph/partial_shape.hpp"
#include "pyngraph/passes/regmodule_pyngraph_passes.hpp" #include "pyngraph/passes/regmodule_pyngraph_passes.hpp"
#include "pyngraph/serializer.hpp"
#include "pyngraph/shape.hpp" #include "pyngraph/shape.hpp"
#include "pyngraph/strides.hpp" #include "pyngraph/strides.hpp"
#include "pyngraph/types/regmodule_pyngraph_types.hpp" #include "pyngraph/types/regmodule_pyngraph_types.hpp"
@ -59,7 +58,6 @@ PYBIND11_MODULE(_pyngraph, m)
regclass_pyngraph_Coordinate(m); regclass_pyngraph_Coordinate(m);
regmodule_pyngraph_types(m); regmodule_pyngraph_types(m);
regclass_pyngraph_Function(m); regclass_pyngraph_Function(m);
regclass_pyngraph_Serializer(m);
py::module m_op = m.def_submodule("op", "Package ngraph.impl.op that wraps ngraph::op"); py::module m_op = m.def_submodule("op", "Package ngraph.impl.op that wraps ngraph::op");
regclass_pyngraph_op_Constant(m_op); regclass_pyngraph_op_Constant(m_op);
regclass_pyngraph_op_Parameter(m_op); regclass_pyngraph_op_Parameter(m_op);

View File

@ -1,32 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <string>
#include "ngraph/serializer.hpp"
#include "pyngraph/serializer.hpp"
namespace py = pybind11;
void regclass_pyngraph_Serializer(py::module m)
{
m.def("serialize",
(std::string(*)(std::shared_ptr<ngraph::Function>, size_t)) & ngraph::serialize,
py::arg(),
py::arg("indent") = 0);
}

View File

@ -1,23 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <pybind11/pybind11.h>
namespace py = pybind11;
void regclass_pyngraph_Serializer(py::module m);

View File

@ -21,7 +21,7 @@ import numpy as np
from openvino.inference_engine import IECore, IENetwork from openvino.inference_engine import IECore, IENetwork
from ngraph.exceptions import UserInputError from ngraph.exceptions import UserInputError
from ngraph.impl import Function, Node, serialize from ngraph.impl import Function, Node
from ngraph.utils.types import NumericData from ngraph.utils.types import NumericData
import tests import tests
@ -111,11 +111,3 @@ class Computation(object):
request = self.executable_network.requests[0] request = self.executable_network.requests[0]
request.infer(dict(zip(request._inputs_list, input_values))) request.infer(dict(zip(request._inputs_list, input_values)))
return [blob.buffer for blob in request.output_blobs.values()] return [blob.buffer for blob in request.output_blobs.values()]
def serialize(self, indent: int = 0) -> str:
"""Serialize function (compute graph) to a JSON string.
:param indent: set indent of serialized output
:return: serialized model
"""
return serialize(self.function, indent)

View File

@ -543,12 +543,6 @@ set (SRC
variant.hpp variant.hpp
) )
if(NGRAPH_JSON_ENABLE)
list(APPEND SRC serializer.cpp serializer.hpp)
else()
list(APPEND SRC serializer_stub.cpp)
endif()
configure_file(version.in.hpp version.hpp) configure_file(version.in.hpp version.hpp)
add_library(ngraph SHARED ${SRC}) add_library(ngraph SHARED ${SRC})
@ -558,10 +552,6 @@ set_target_properties(ngraph PROPERTIES
C_VISIBILITY_PRESET hidden C_VISIBILITY_PRESET hidden
VISIBILITY_INLINES_HIDDEN ON) VISIBILITY_INLINES_HIDDEN ON)
if(NOT NGRAPH_JSON_ENABLE)
target_compile_definitions(ngraph PUBLIC NGRAPH_JSON_DISABLE)
endif()
target_link_libraries(ngraph PRIVATE openvino::itt) target_link_libraries(ngraph PRIVATE openvino::itt)
add_subdirectory(frontend) add_subdirectory(frontend)
@ -581,9 +571,6 @@ if(NGRAPH_LIB_VERSIONING_ENABLE)
VERSION ${NGRAPH_VERSION} VERSION ${NGRAPH_VERSION}
SOVERSION ${NGRAPH_API_VERSION}) SOVERSION ${NGRAPH_API_VERSION})
endif() endif()
if(NGRAPH_JSON_ENABLE)
target_link_libraries(ngraph PRIVATE libjson)
endif()
target_compile_definitions(ngraph PUBLIC NGRAPH_VERSION="${NGRAPH_VERSION}") target_compile_definitions(ngraph PUBLIC NGRAPH_VERSION="${NGRAPH_VERSION}")
if (LINUX) if (LINUX)

File diff suppressed because it is too large Load Diff

View File

@ -1,89 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <memory>
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
namespace ngraph
{
/// \brief Serialize a Function to a json string
/// \param func The Function to serialize
/// \param indent If 0 then there is no formatting applied and the resulting string is the
/// most compact representation. If non-zero then the json string is formatted with the
/// indent level specified.
NGRAPH_API
std::string serialize(std::shared_ptr<ngraph::Function> func, size_t indent = 0);
/// \brief Serialize a Function to a json file
/// \param path The path to the output file
/// \param func The Function to serialize
/// \param indent If 0 then there is no formatting applied and the resulting string is the
/// most compact representation. If non-zero then the json string is formatted with the
/// indent level specified.
NGRAPH_API
void serialize(const std::string& path,
std::shared_ptr<ngraph::Function> func,
size_t indent = 0);
/// \brief Serialize a Function to a json stream
/// \param out The output stream to which the data is serialized.
/// \param func The Function to serialize
/// \param indent If 0 then there is no formatting applied and the json is the
/// most compact representation. If non-zero then the json is formatted with the
/// indent level specified.
NGRAPH_API
void serialize(std::ostream& out, std::shared_ptr<ngraph::Function> func, size_t indent = 0);
/// \brief Deserialize a Function
/// \param in An isteam to the input data
NGRAPH_API
std::shared_ptr<ngraph::Function> deserialize(std::istream& in);
/// \brief Deserialize a Function
/// \param str The json formatted string to deseriailze.
NGRAPH_API
std::shared_ptr<ngraph::Function> deserialize(const std::string& str);
/// \brief If enabled adds output shapes to the serialized graph
/// \param enable Set to true to enable or false otherwise
///
/// Option may be enabled by setting the environment variable NGRAPH_SERIALIZER_OUTPUT_SHAPES
NGRAPH_API
void set_serialize_output_shapes(bool enable);
NGRAPH_API
bool get_serialize_output_shapes();
class WithSerializeOutputShapesEnabled
{
public:
WithSerializeOutputShapesEnabled(bool enabled = true)
{
m_serialize_output_shapes_enabled = get_serialize_output_shapes();
set_serialize_output_shapes(enabled);
}
~WithSerializeOutputShapesEnabled()
{
set_serialize_output_shapes(m_serialize_output_shapes_enabled);
}
private:
bool m_serialize_output_shapes_enabled;
};
}

View File

@ -1,49 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/serializer.hpp"
std::string ngraph::serialize(std::shared_ptr<ngraph::Function> func, size_t indent)
{
throw std::runtime_error("serializer disabled in build");
}
void ngraph::serialize(const std::string& path,
std::shared_ptr<ngraph::Function> func,
size_t indent)
{
throw std::runtime_error("serializer disabled in build");
}
void ngraph::serialize(std::ostream& out, std::shared_ptr<ngraph::Function> func, size_t indent)
{
throw std::runtime_error("serializer disabled in build");
}
std::shared_ptr<ngraph::Function> ngraph::deserialize(std::istream& in)
{
throw std::runtime_error("serializer disabled in build");
}
std::shared_ptr<ngraph::Function> ngraph::deserialize(const std::string& str)
{
throw std::runtime_error("serializer disabled in build");
}
void ngraph::set_serialize_output_shapes(bool enable)
{
throw std::runtime_error("serializer disabled in build");
}

View File

@ -200,10 +200,6 @@ foreach(HEADER ${NGRAPH_HEADER_LIST})
list(APPEND SRC ${OUT_FILE}) list(APPEND SRC ${OUT_FILE})
endforeach() endforeach()
if(NGRAPH_JSON_ENABLE)
list(APPEND SRC core.cpp serialize.cpp)
endif()
set_source_files_properties(includes.cpp PROPERTIES COMPILE_DEFINITIONS set_source_files_properties(includes.cpp PROPERTIES COMPILE_DEFINITIONS
NGRAPH_INCLUDES="${PROJECT_SOURCE_DIR}/src/ngraph") NGRAPH_INCLUDES="${PROJECT_SOURCE_DIR}/src/ngraph")
@ -398,9 +394,6 @@ if (NGRAPH_ONNX_IMPORT_ENABLE AND NOT NGRAPH_USE_PROTOBUF_LITE)
endif() endif()
target_compile_definitions(unit-test PRIVATE NGRAPH_VERSION_LABEL="${NGRAPH_VERSION_LABEL}") target_compile_definitions(unit-test PRIVATE NGRAPH_VERSION_LABEL="${NGRAPH_VERSION_LABEL}")
if (NGRAPH_JSON_ENABLE)
target_link_libraries(unit-test PRIVATE libjson)
endif()
if(NOT WIN32) if(NOT WIN32)
target_link_libraries(unit-test PRIVATE pthread) target_link_libraries(unit-test PRIVATE pthread)
endif() endif()

View File

@ -48,7 +48,6 @@
#include "ngraph/pattern/matcher.hpp" #include "ngraph/pattern/matcher.hpp"
#include "ngraph/pattern/op/label.hpp" #include "ngraph/pattern/op/label.hpp"
#include "ngraph/pattern/op/skip.hpp" #include "ngraph/pattern/op/skip.hpp"
#include "ngraph/serializer.hpp"
#include "util/all_close.hpp" #include "util/all_close.hpp"
#include "util/matcher.hpp" #include "util/matcher.hpp"
#include "util/test_tools.hpp" #include "util/test_tools.hpp"

View File

@ -464,82 +464,3 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_2)
read_vector<float>(result), read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS)); MIN_FLOAT_TOLERANCE_BITS));
} }
#ifndef NGRAPH_JSON_DISABLE
NGRAPH_TEST(${BACKEND_NAME}, constant_broadcast)
{
const string js =
R"([{
"name" : "Function_0",
"ops" : [
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [],
"name" : "Parameter_4",
"op" : "Parameter",
"outputs" : ["Parameter_4"],
"shape" : [ 3, 4 ]
},
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [],
"name" : "Parameter_0",
"op" : "Parameter",
"outputs" : ["Parameter_0"],
"shape" : [ 3, 4 ]
},
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [],
"name" : "Constant_1",
"op" : "Constant",
"outputs" : ["Constant_1"],
"shape" : [],
"value" : ["0"]
},
{
"axes" : [ 0, 1 ],
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : ["Constant_1"],
"name" : "Broadcast_2",
"op" : "Broadcast",
"outputs" : ["Broadcast_2"],
"shape" : [ 3, 4 ]
},
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [ "Parameter_0", "Broadcast_2" ],
"name" : "Maximum_3",
"op" : "Maximum",
"outputs" : ["Maximum_3"]
},
{
"element_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false},
"inputs" : [ "Maximum_3", "Parameter_4" ],
"name" : "Multiply_5",
"op" : "Multiply",
"outputs" : ["Multiply_5"]
}
],
"parameters" : [ "Parameter_0", "Parameter_4" ],
"result" : ["Multiply_5"],
"result_shape" : [ 3, 4 ],
"result_type" :
{"bitwidth" : 32, "c_type_string" : "float", "is_real" : true, "is_signed" : true, "is_quantized" : false}
}])";
stringstream ss(js);
shared_ptr<Function> f = ngraph::deserialize(ss);
// max(x,broadcast(Constant(0)))
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// If this compiles it works
}
#endif

View File

@ -25,7 +25,6 @@
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "ngraph/runtime/tensor.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/serializer.hpp"
#include "runtime/backend.hpp" #include "runtime/backend.hpp"
#include "util/all_close.hpp" #include "util/all_close.hpp"
#include "util/all_close_f.hpp" #include "util/all_close_f.hpp"

View File

@ -58,35 +58,3 @@ TEST(backend_api, DISABLED_config_unsupported)
EXPECT_FALSE(backend->set_config(config, error)); EXPECT_FALSE(backend->set_config(config, error));
EXPECT_FALSE(error == ""); EXPECT_FALSE(error == "");
} }
#ifndef NGRAPH_JSON_DISABLE
TEST(backend_api, save_load)
{
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Add>(A, B), ParameterVector{A, B});
auto backend = runtime::Backend::create("INTERPRETER");
// Create some tensors for input/output
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, shape);
copy_data<float>(a, {1.f, 2.f, 3.f, 4.f});
copy_data<float>(b, {5.f, 6.f, 7.f, 8.f});
stringstream file;
{
auto handle = backend->compile(f);
handle->save(file);
}
{
auto handle = backend->load(file);
ASSERT_NE(handle, nullptr);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), {6.f, 8.f, 10.f, 12.f}));
}
}
#endif

View File

@ -18,7 +18,6 @@
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "ngraph/serializer.hpp"
#include "util/test_tools.hpp" #include "util/test_tools.hpp"
#include <memory> #include <memory>

View File

@ -31,7 +31,6 @@
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp" #include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/pattern/matcher.hpp" #include "ngraph/pattern/matcher.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
#include "util/all_close.hpp" #include "util/all_close.hpp"
#include "util/ndarray.hpp" #include "util/ndarray.hpp"
@ -188,56 +187,3 @@ TEST(control_dependencies, replace_node)
ASSERT_TRUE(0 == count_control_dependencies(ADD, MUL_AB)); ASSERT_TRUE(0 == count_control_dependencies(ADD, MUL_AB));
ASSERT_TRUE(1 == count_control_dependencies(ADD, MUL_BA)); ASSERT_TRUE(1 == count_control_dependencies(ADD, MUL_BA));
} }
#ifndef NGRAPH_JSON_DISABLE
TEST(control_dependencies, serialize_cdop)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{});
auto absn = make_shared<op::Abs>(A);
auto cdop = make_shared<op::Negative>(A);
cdop->add_control_dependency(absn);
auto f = make_shared<Function>(cdop, ParameterVector{A});
string js = serialize(f, 4);
shared_ptr<Function> clone = deserialize(js);
auto matcher = std::make_shared<pattern::Matcher>(cdop);
auto cdop_clone = clone->get_results().at(0)->input_value(0).get_node_shared_ptr();
ASSERT_TRUE(matcher->match(cdop_clone));
auto cloned_deps = cdop_clone->get_control_dependencies();
ASSERT_EQ(cloned_deps.size(), 1);
auto cloned_abs = *begin(cloned_deps);
ASSERT_TRUE(is_type<op::Abs>(cloned_abs));
}
TEST(control_dependencies, serialize_cdop_abs)
{
auto A = make_shared<op::Parameter>(element::f32, Shape{});
auto absn = make_shared<op::Abs>(A);
auto B = make_shared<op::Parameter>(element::f32, Shape{});
auto absn_b = make_shared<op::Abs>(B);
auto cdop = make_shared<op::Negative>(A);
cdop->add_control_dependency(absn);
cdop->add_control_dependency(absn_b);
auto absn_cdop = make_shared<op::Abs>(cdop);
auto f = make_shared<Function>(absn_cdop, ParameterVector{A, B});
string js = serialize(f, 4);
shared_ptr<Function> clone = deserialize(js);
auto matcher = std::make_shared<pattern::Matcher>(cdop);
auto cdop_clone = clone->get_results()
.at(0)
->input_value(0)
.get_node_shared_ptr()
->input_value(0)
.get_node_shared_ptr();
ASSERT_TRUE(matcher->match(cdop_clone));
auto cloned_deps = cdop_clone->get_control_dependencies();
ASSERT_EQ(cloned_deps.size(), 2);
for (auto ccdep : cloned_deps)
{
ASSERT_TRUE(is_type<op::Abs>(ccdep));
}
}
#endif

View File

@ -1,36 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/file_util.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/serializer.hpp"
using namespace ngraph;
using namespace std;
TEST(core, function_size)
{
const string m1 = file_util::path_join(SERIALIZED_ZOO, "mxnet/mnist_mlp_forward.json");
const string m2 = file_util::path_join(SERIALIZED_ZOO, "mxnet/10_bucket_LSTM.json");
auto f1 = deserialize(m1);
auto f2 = deserialize(m2);
auto s1 = f1->get_graph_size();
auto s2 = f2->get_graph_size();
EXPECT_GT(s2, s1);
}

View File

@ -44,7 +44,6 @@
#include "ngraph/pattern/op/or.hpp" #include "ngraph/pattern/op/or.hpp"
#include "ngraph/pattern/op/skip.hpp" #include "ngraph/pattern/op/skip.hpp"
#include "ngraph/pattern/op/true.hpp" #include "ngraph/pattern/op/true.hpp"
#include "ngraph/serializer.hpp"
#include "util/matcher.hpp" #include "util/matcher.hpp"
#include "util/test_tools.hpp" #include "util/test_tools.hpp"

View File

@ -72,37 +72,6 @@ bool runtime::interpreter::INTBackend::is_supported(const Node& node) const
return m_unsupported_op_name_list.find(node.description()) == m_unsupported_op_name_list.end(); return m_unsupported_op_name_list.find(node.description()) == m_unsupported_op_name_list.end();
} }
std::shared_ptr<runtime::Executable> runtime::interpreter::INTBackend::load(istream& in)
{
shared_ptr<Executable> exec;
cpio::Reader reader(in);
auto file_info = reader.get_file_info();
string save_info;
for (const cpio::FileInfo& info : file_info)
{
if (info.get_name() == "save_info")
{
vector<char> buffer = reader.read(info);
save_info = string(buffer.data(), buffer.size());
break;
}
}
if (save_info == "INTERPRETER Save File 1.0")
{
for (const cpio::FileInfo& info : file_info)
{
if (info.get_name() == "model")
{
vector<char> buffer = reader.read(info);
string model_string = string(buffer.data(), buffer.size());
exec = shared_ptr<INTExecutable>(new INTExecutable(model_string));
break;
}
}
}
return exec;
}
bool runtime::interpreter::INTBackend::set_config(const map<string, string>& config, string& error) bool runtime::interpreter::INTBackend::set_config(const map<string, string>& config, string& error)
{ {
bool rc = false; bool rc = false;

View File

@ -59,7 +59,6 @@ public:
std::shared_ptr<Executable> compile(std::shared_ptr<Function> function, std::shared_ptr<Executable> compile(std::shared_ptr<Function> function,
bool enable_performance_data = false) override; bool enable_performance_data = false) override;
std::shared_ptr<Executable> load(std::istream& input_stream) override;
bool is_supported(const Node& node) const override; bool is_supported(const Node& node) const override;

View File

@ -23,7 +23,6 @@
#include "ngraph/op/util/op_types.hpp" #include "ngraph/op/util/op_types.hpp"
#include "ngraph/ops.hpp" #include "ngraph/ops.hpp"
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
#include "opset0_downgrade.hpp" #include "opset0_downgrade.hpp"
#include "opset1_downgrade.hpp" #include "opset1_downgrade.hpp"
@ -65,13 +64,7 @@ runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr<Function>& f
: m_is_compiled{true} : m_is_compiled{true}
, m_performance_counters_enabled{enable_performance_collection} , m_performance_counters_enabled{enable_performance_collection}
{ {
#ifdef INTERPRETER_FORCE_SERIALIZE
// To verify that the serializer works correctly let's just run this graph round-trip
string ser = serialize(function);
m_function = deserialize(ser);
#else
m_function = clone_function(*function); m_function = clone_function(*function);
#endif
auto is_supported = [](const Node& node) { auto is_supported = [](const Node& node) {
bool retval = false; bool retval = false;
switch (INTExecutable::get_typeid(node)) switch (INTExecutable::get_typeid(node))
@ -100,18 +93,6 @@ runtime::interpreter::INTExecutable::INTExecutable(const shared_ptr<Function>& f
set_parameters_and_results(*m_function); set_parameters_and_results(*m_function);
} }
runtime::interpreter::INTExecutable::INTExecutable(const std::string& model_string)
: m_is_compiled{true}
, m_performance_counters_enabled{false}
{
m_function = deserialize(model_string);
for (auto node : m_function->get_ordered_ops())
{
m_nodes.push_back(node);
}
set_parameters_and_results(*m_function);
}
bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::Tensor>>& outputs, bool runtime::interpreter::INTExecutable::call(const vector<shared_ptr<runtime::Tensor>>& outputs,
const vector<shared_ptr<runtime::Tensor>>& inputs) const vector<shared_ptr<runtime::Tensor>>& inputs)
{ {
@ -335,15 +316,6 @@ void runtime::interpreter::INTExecutable::perform_nan_check(
} }
} }
void runtime::interpreter::INTExecutable::save(ostream& out)
{
cpio::Writer writer(out);
string si = "INTERPRETER Save File 1.0";
writer.write("save_info", si.data(), si.size());
string model = serialize(m_function, 0);
writer.write("model", model.data(), model.size());
}
shared_ptr<ngraph::op::Parameter> shared_ptr<ngraph::op::Parameter>
runtime::interpreter::INTExecutable::get_parameter(size_t index) const runtime::interpreter::INTExecutable::get_parameter(size_t index) const
{ {

View File

@ -128,8 +128,6 @@ public:
bool call(const std::vector<std::shared_ptr<Tensor>>& outputs, bool call(const std::vector<std::shared_ptr<Tensor>>& outputs,
const std::vector<std::shared_ptr<Tensor>>& inputs) override; const std::vector<std::shared_ptr<Tensor>>& inputs) override;
virtual void save(std::ostream& output_stream) override;
void set_nan_check(bool enable); void set_nan_check(bool enable);
std::vector<PerformanceCounter> get_performance_data() const override; std::vector<PerformanceCounter> get_performance_data() const override;
@ -145,8 +143,6 @@ public:
create_output_tensor(size_t output_index, size_t pipeline_depth) override; create_output_tensor(size_t output_index, size_t pipeline_depth) override;
protected: protected:
INTExecutable(const std::string& model_string);
std::shared_ptr<ngraph::op::Parameter> get_parameter(size_t index) const; std::shared_ptr<ngraph::op::Parameter> get_parameter(size_t index) const;
std::shared_ptr<ngraph::op::Result> get_result(size_t index) const; std::shared_ptr<ngraph::op::Result> get_result(size_t index) const;
int get_alignment() const { return 64; } int get_alignment() const { return 64; }

View File

@ -1,931 +0,0 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <fstream>
#include <sstream>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/interpolate.hpp"
#include "ngraph/op/passthrough.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "nlohmann/json.hpp"
#include "util/all_close_f.hpp"
#include "util/test_tools.hpp"
#include "util/visitor.hpp"
using namespace std;
using namespace ngraph;
using json = nlohmann::json;
using ::testing::ElementsAre;
using ::testing::NotNull;
using ::testing::StrEq;
template <typename T>
T get_or_default(nlohmann::json& j, const std::string& key, const T& default_value)
{
T rc;
try
{
rc = j.at(key).get<T>();
}
catch (...)
{
rc = default_value;
}
return rc;
}
#if defined(NGRAPH_INTERPRETER_ENABLE)
TEST(serialize, main)
{
// First create "f(A,B,C) = (A+B)*C".
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>((A + B) * C, ParameterVector{A, B, C}, "f");
string js = serialize(f, 4);
{
ofstream out("serialize_function.js");
out << js;
}
istringstream in(js);
shared_ptr<Function> sfunc = deserialize(in);
auto backend = runtime::Backend::create("INTERPRETER");
auto handle = backend->compile(sfunc);
auto x = backend->create_tensor(element::f32, shape);
copy_data(x, vector<float>{1, 2, 3, 4});
auto y = backend->create_tensor(element::f32, shape);
copy_data(y, vector<float>{5, 6, 7, 8});
auto z = backend->create_tensor(element::f32, shape);
copy_data(z, vector<float>{9, 10, 11, 12});
auto result = backend->create_tensor(element::f32, shape);
handle->call_with_validate({result}, {x, y, z});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), read_vector<float>(result));
handle->call_with_validate({result}, {y, x, z});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), read_vector<float>(result));
handle->call_with_validate({result}, {x, z, y});
EXPECT_EQ((vector<float>{50, 72, 98, 128}), read_vector<float>(result));
}
TEST(serialize, friendly_name)
{
// First create "f(A,B,C) = (A+B)*C".
Shape shape{2, 2};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto B = make_shared<op::Parameter>(element::f32, shape);
auto C = make_shared<op::Parameter>(element::f32, shape);
auto sum = A + B;
auto product = sum * C;
auto f = make_shared<Function>(product, ParameterVector{A, B, C}, "f");
A->set_friendly_name("A");
B->set_friendly_name("B");
C->set_friendly_name("C");
sum->set_friendly_name("Sum");
product->set_friendly_name("Product");
string js = serialize(f, 4);
ofstream out("serialize_function.js");
out << js;
istringstream in(js);
shared_ptr<Function> sfunc = deserialize(in);
auto backend = runtime::Backend::create("INTERPRETER");
auto handle = backend->compile(sfunc);
auto x = backend->create_tensor(element::f32, shape);
copy_data(x, vector<float>{1, 2, 3, 4});
auto y = backend->create_tensor(element::f32, shape);
copy_data(y, vector<float>{5, 6, 7, 8});
auto z = backend->create_tensor(element::f32, shape);
copy_data(z, vector<float>{9, 10, 11, 12});
auto result = backend->create_tensor(element::f32, shape);
handle->call_with_validate({result}, {x, y, z});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), read_vector<float>(result));
handle->call_with_validate({result}, {y, x, z});
EXPECT_EQ((vector<float>{54, 80, 110, 144}), read_vector<float>(result));
handle->call_with_validate({result}, {x, z, y});
EXPECT_EQ((vector<float>{50, 72, 98, 128}), read_vector<float>(result));
}
#endif
TEST(serialize, existing_models)
{
vector<string> models = {"mxnet/mnist_mlp_forward.json",
"mxnet/10_bucket_LSTM.json",
"mxnet/LSTM_backward.json",
"mxnet/LSTM_forward.json"};
for (const string& model : models)
{
const string json_path = file_util::path_join(SERIALIZED_ZOO, model);
const string json_string = file_util::read_file_to_string(json_path);
shared_ptr<Function> f = ngraph::deserialize(json_string);
}
}
TEST(serialize, default_value)
{
json j = {{"test1", 1}, {"test2", 2}};
int x1 = j.at("test1").get<int>();
EXPECT_EQ(x1, 1);
int x2 = get_or_default<int>(j, "test2", 0);
EXPECT_EQ(x2, 2);
int x3 = get_or_default<int>(j, "test3", 3);
EXPECT_EQ(x3, 3);
}
TEST(serialize, constant)
{
const string tmp_file = "serialize_constant.cpio";
Shape shape{2, 2, 2};
auto A = op::Constant::create(element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8});
auto f = make_shared<Function>(A, ParameterVector{});
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), A->get_vector<float>());
serialize(tmp_file, f);
auto g = deserialize(tmp_file);
ASSERT_NE(g, nullptr);
file_util::remove_file(tmp_file);
bool found = false;
for (shared_ptr<Node> node : g->get_ops())
{
shared_ptr<op::Constant> c = as_type_ptr<op::Constant>(node);
if (c)
{
found = true;
EXPECT_EQ((vector<float>{1, 2, 3, 4, 5, 6, 7, 8}), c->get_vector<float>());
break;
}
}
EXPECT_TRUE(found);
}
MATCHER_P2(IsOutputShape, type, shape, "")
{
return std::get<0>(arg) == type && std::get<1>(arg).to_shape() == shape;
}
TEST(serialize, passthrough)
{
const string tmp_file = "serialize_passthrough.json";
using estuple = std::tuple<element::Type, PartialShape>;
Shape shape{2, 2, 2};
auto p = make_shared<op::Passthrough>(
"SerializationTest",
"Plain",
"Hello, world!",
OutputVector{},
std::vector<estuple>{estuple{element::f32, PartialShape{2, 3}},
estuple{element::i8, PartialShape{4, 5}}});
auto f = make_shared<Function>(NodeVector{std::make_shared<op::GetOutputElement>(p, 0),
std::make_shared<op::GetOutputElement>(p, 1)},
ParameterVector{});
serialize(tmp_file, f);
auto g = deserialize(tmp_file);
file_util::remove_file(tmp_file);
ASSERT_THAT(g, NotNull());
std::shared_ptr<op::Passthrough> pt;
for (const auto& op : g->get_ops())
{
pt = as_type_ptr<op::Passthrough>(op);
if (pt)
{
break;
}
}
ASSERT_THAT(pt.get(), NotNull());
EXPECT_THAT(pt->logical_type(), StrEq("SerializationTest"));
EXPECT_THAT(pt->language(), StrEq("Plain"));
EXPECT_THAT(pt->function(), StrEq("Hello, world!"));
EXPECT_THAT(pt->output_shapes(),
ElementsAre(IsOutputShape(element::f32, Shape{2, 3}),
IsOutputShape(element::i8, Shape{4, 5})));
}
TEST(serialize, constant_infinity_nan)
{
vector<float> a_data{123.f, 456.f, INFINITY, -INFINITY, NAN};
vector<float> b_data{5.f, 5.f, 5.f, 5.f, 5.f, 5.f};
vector<float> c_data{0.05f, 0.05f, 0.05f, 0.05f, 0.05f, 0.05001f, 0.05f};
vector<int64_t> d_data{-100, -10, -1, 0, 50, 5000000000001};
auto A = make_shared<op::Constant>(element::f32, Shape{5}, a_data);
auto B = make_shared<op::Constant>(element::f32, Shape{6}, b_data);
auto C = make_shared<op::Constant>(element::f32, Shape{7}, c_data);
auto D = make_shared<op::Constant>(element::i64, Shape{d_data.size()}, d_data);
A->set_friendly_name("A");
B->set_friendly_name("B");
C->set_friendly_name("C");
D->set_friendly_name("D");
auto f = make_shared<Function>(NodeVector{A, B, C, D}, ParameterVector{});
string s = serialize(f, 4);
shared_ptr<Function> g = deserialize(s);
shared_ptr<op::Constant> a;
shared_ptr<op::Constant> b;
shared_ptr<op::Constant> c;
shared_ptr<op::Constant> d;
for (auto node : g->get_ops())
{
if (node->get_friendly_name() == "A")
{
a = as_type_ptr<op::Constant>(node);
}
else if (node->get_friendly_name() == "B")
{
b = as_type_ptr<op::Constant>(node);
}
else if (node->get_friendly_name() == "C")
{
c = as_type_ptr<op::Constant>(node);
}
else if (node->get_friendly_name() == "D")
{
d = as_type_ptr<op::Constant>(node);
}
}
ASSERT_TRUE(a);
ASSERT_TRUE(b);
ASSERT_TRUE(c);
ASSERT_TRUE(d);
EXPECT_TRUE(test::all_close_f(a->get_vector<float>(), a_data));
EXPECT_TRUE(test::all_close_f(b->get_vector<float>(), b_data));
EXPECT_TRUE(test::all_close_f(c->get_vector<float>(), c_data));
EXPECT_EQ(d->get_vector<int64_t>(), d_data);
string filename = "constant_infinity_nan_test.dot";
pass::Manager pass_manager;
pass_manager.register_pass<pass::VisualizeTree>(filename);
pass_manager.run_passes(g);
ifstream file(filename);
ASSERT_TRUE(file);
string str((istreambuf_iterator<char>(file)), istreambuf_iterator<char>());
EXPECT_NE(str.find(R"(label="A)"), string::npos);
EXPECT_NE(str.find(R"(label="B)"), string::npos);
EXPECT_NE(str.find(R"(label="C)"), string::npos);
EXPECT_NE(str.find(R"(label="D)"), string::npos);
}
TEST(serialize, non_zero_node_output)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{10});
auto topk = make_shared<op::TopK>(arg, 0, element::i32, 5, true);
auto abs = make_shared<op::Abs>(Output<Node>(topk, 1));
auto result = make_shared<op::Result>(abs);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_abs = g_result->input_value(0).get_node_shared_ptr();
auto topk_out = g_abs->input_value(0);
EXPECT_EQ(topk_out.get_index(), 1);
ASSERT_TRUE(is_type<op::TopK>(topk_out.get_node()));
}
TEST(serialize, opset1_softmax)
{
const auto arg = make_shared<op::Parameter>(element::f32, Shape{10});
const auto softmax = make_shared<op::v1::Softmax>(arg, 0);
const auto result = make_shared<op::Result>(softmax);
const auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
const auto g_result = g->get_results().at(0);
const auto g_softmax = g_result->get_input_node_shared_ptr(0);
EXPECT_TRUE(is_type<op::v1::Softmax>(g_softmax));
}
TEST(serialize, opset1_gather)
{
auto params = make_shared<op::Parameter>(element::f32, Shape{5, 6});
auto indices = make_shared<op::Parameter>(element::i64, Shape{4});
auto axis = make_shared<op::Parameter>(element::i64, Shape{1});
auto gather_v1 = make_shared<op::v1::Gather>(params, indices, axis);
auto result = make_shared<op::Result>(gather_v1);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{params, indices, axis});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_gather = g_result->get_input_node_shared_ptr(0);
EXPECT_TRUE(is_type<op::v1::Gather>(g_gather));
}
TEST(serialize, opset1_product)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto keep_dims = true;
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto reduce_prod = make_shared<op::v1::ReduceProd>(arg, axes, keep_dims);
auto result = make_shared<op::Result>(reduce_prod);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_red_prod = g_result->get_input_node_shared_ptr(0);
auto node = as_type_ptr<op::v1::ReduceProd>(g_red_prod);
EXPECT_TRUE(node);
EXPECT_EQ(node->get_keep_dims(), 1);
EXPECT_EQ(node->get_reduction_axes(), AxisSet({1, 2}));
}
TEST(serialize, opset1_sum)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto keep_dims = true;
auto axes = make_shared<op::Constant>(element::i64, Shape{2}, vector<int64_t>{1, 2});
auto reduce_sum = make_shared<op::v1::ReduceSum>(arg, axes, keep_dims);
auto result = make_shared<op::Result>(reduce_sum);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_red_sum = g_result->get_input_node_shared_ptr(0);
auto node = as_type_ptr<op::v1::ReduceSum>(g_red_sum);
EXPECT_TRUE(node);
EXPECT_EQ(node->get_keep_dims(), 1);
EXPECT_EQ(node->get_reduction_axes(), AxisSet({1, 2}));
}
TEST(serialize, opset1_pad)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{4, 5, 6});
auto pads_begin = make_shared<op::Parameter>(element::i64, Shape{1});
auto pads_end = make_shared<op::Parameter>(element::i64, Shape{2});
auto arg_pad_value = make_shared<op::Parameter>(element::f32, Shape{});
auto pad_mode = op::PadMode::EDGE;
auto pad = make_shared<op::v1::Pad>(arg, pads_begin, pads_end, arg_pad_value, pad_mode);
auto result = make_shared<op::Result>(pad);
auto f = make_shared<Function>(ResultVector{result},
ParameterVector{arg, pads_begin, pads_end, arg_pad_value});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_pad = as_type_ptr<op::v1::Pad>(g_result->input_value(0).get_node_shared_ptr());
ASSERT_TRUE(g_pad);
EXPECT_EQ(g_pad->get_pad_mode(), pad_mode);
}
TEST(serialize, tensor_iterator_raw)
{
// That which we iterate over
auto X = make_shared<op::Parameter>(element::f32, Shape{32, 40, 10});
// Common to all cells
auto WH = make_shared<op::Parameter>(element::f32, Shape{20, 20});
auto WX = make_shared<op::Parameter>(element::f32, Shape{10, 20});
auto bH = make_shared<op::Parameter>(element::f32, Shape{20});
auto WY = make_shared<op::Parameter>(element::f32, Shape{20, 5});
auto bY = make_shared<op::Parameter>(element::f32, Shape{5});
// Initial values
auto Hinit = make_shared<op::Parameter>(element::f32, Shape{32, 1, 20});
// Set up the cell body, a function from (Hi, Xi) -> (Ho, Yo)
// Cell parameters
auto Hi = make_shared<op::Parameter>(element::f32, Shape{32, 1, 20});
auto Xi = make_shared<op::Parameter>(element::f32, Shape{32, 1, 10});
auto WH_body = make_shared<op::Parameter>(element::f32, Shape{20, 20});
auto WX_body = make_shared<op::Parameter>(element::f32, Shape{10, 20});
auto bH_body = make_shared<op::Parameter>(element::f32, Shape{20});
auto WY_body = make_shared<op::Parameter>(element::f32, Shape{20, 5});
auto bY_body = make_shared<op::Parameter>(element::f32, Shape{5});
// Body
auto Ho = make_shared<op::Reshape>(
make_shared<op::Relu>(
make_shared<op::Dot>(make_shared<op::Reshape>(Xi, AxisVector{0, 1, 2}, Shape{32, 10}),
WX_body) +
make_shared<op::Dot>(make_shared<op::Reshape>(Hi, AxisVector{0, 1, 2}, Shape{32, 20}),
WH_body) +
make_shared<op::Broadcast>(bH_body, Shape{32, 20}, AxisSet{0})),
AxisVector{0, 1},
Shape{32, 1, 20});
auto Yo = make_shared<op::Relu>(
make_shared<op::Dot>(make_shared<op::Reshape>(Ho, AxisVector{0, 1, 2}, Shape{32, 20}),
WY_body) +
make_shared<op::Broadcast>(bY_body, Shape{32, 5}, AxisSet{0}));
auto body = make_shared<op::TensorIterator::BodyLambda>(
OutputVector{Yo, Ho}, ParameterVector{Xi, Hi, WH_body, WX_body, WY_body, bH_body, bY_body});
auto tensor_iterator = make_shared<op::TensorIterator>();
tensor_iterator->set_body(body);
// The Xi are the elements of Xseq
// start=0, stride=1, part_size=1, end=39, axis=1
tensor_iterator->set_sliced_input(Xi, X, 0, 1, 1, 39, 1);
// Hi is Hinit on the first iteration, Ho after that
tensor_iterator->set_merged_input(Hi, Hinit, Ho);
tensor_iterator->set_invariant_input(WH_body, WH);
tensor_iterator->set_invariant_input(WX_body, WX);
tensor_iterator->set_invariant_input(WY_body, WY);
tensor_iterator->set_invariant_input(bH_body, bH);
tensor_iterator->set_invariant_input(bY_body, bY);
// Output 0 is last Yo
auto out0 = tensor_iterator->get_iter_value(Yo, -1);
// Output 1 is concat of hidden states
// start=0, stride=1, part_size=1, end=39, axis=1
auto out1 = tensor_iterator->get_concatenated_slices(Ho, 0, 1, 1, 39, 1);
auto results = ResultVector{make_shared<op::Result>(out0), make_shared<op::Result>(out1)};
auto f = make_shared<Function>(results, ParameterVector{X, Hinit, WH, WX, bH, WY, bY});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
ngraph::test::NodeBuilder builder;
// Uncomment to see serialization
// builder.set_print(true);
builder.save_node(tensor_iterator);
auto g_tensor_iterator = as_type_ptr<op::v0::TensorIterator>(builder.create());
ASSERT_TRUE(g_tensor_iterator);
auto& inputs = tensor_iterator->get_input_descriptions();
auto& g_inputs = g_tensor_iterator->get_input_descriptions();
ASSERT_EQ(inputs.size(), g_inputs.size());
for (size_t i = 0; i < tensor_iterator->get_input_descriptions().size(); ++i)
{
auto& val = inputs[i];
auto& g_val = g_inputs[i];
ASSERT_EQ(val->get_type_info(), g_val->get_type_info());
ASSERT_EQ(val->m_input_index, g_val->m_input_index);
ASSERT_EQ(val->m_body_parameter_index, g_val->m_body_parameter_index);
}
auto& outputs = tensor_iterator->get_output_descriptions();
auto& g_outputs = g_tensor_iterator->get_output_descriptions();
ASSERT_EQ(outputs.size(), g_outputs.size());
for (size_t i = 0; i < tensor_iterator->get_output_descriptions().size(); ++i)
{
auto& val = outputs[i];
auto& g_val = g_outputs[i];
ASSERT_EQ(val->get_type_info(), g_val->get_type_info());
}
}
TEST(serialize, tensor_iterator_lstm)
{
// That which we iterate over
const size_t N = 32; // Batch size
const size_t L = 10; // Sequence length
const size_t I = 8; // Input size
const size_t H = 32; // Hidden size
auto SENT = make_shared<op::Parameter>(element::f32, Shape{N, L, I});
auto H_init = make_shared<op::Parameter>(element::f32, Shape{N, 1, H});
auto C_init = make_shared<op::Parameter>(element::f32, Shape{N, 1, H});
auto W = make_shared<op::Parameter>(element::f32, Shape{4 * H, I});
auto R = make_shared<op::Parameter>(element::f32, Shape{4 * H, H});
auto H_t = make_shared<op::Parameter>(element::f32, Shape{N, 1, H});
auto C_t = make_shared<op::Parameter>(element::f32, Shape{N, 1, H});
// Body
auto X = make_shared<op::Parameter>(element::f32, Shape{N, 1, I});
auto W_body = make_shared<op::Parameter>(element::f32, Shape{4 * H, I});
auto R_body = make_shared<op::Parameter>(element::f32, Shape{4 * H, H});
auto LSTM_cell =
make_shared<op::LSTMCell>(make_shared<op::Reshape>(X, AxisVector{0, 1, 2}, Shape{N, I}),
make_shared<op::Reshape>(H_t, AxisVector{0, 1, 2}, Shape{N, H}),
make_shared<op::Reshape>(C_t, AxisVector{0, 1, 2}, Shape{N, H}),
W_body,
R_body,
H);
auto H_o = make_shared<op::Reshape>(LSTM_cell->output(0), AxisVector{0, 1}, Shape{N, 1, H});
auto C_o = make_shared<op::Reshape>(LSTM_cell->output(1), AxisVector{0, 1}, Shape{N, 1, H});
auto body = make_shared<op::TensorIterator::BodyLambda>(
OutputVector{H_o, C_o}, ParameterVector{X, H_t, C_t, W_body, R_body});
auto tensor_iterator = make_shared<op::TensorIterator>();
tensor_iterator->set_body(body);
// start=0, stride=1, part_size=1, end=39, axis=1
tensor_iterator->set_sliced_input(X, SENT, 0, 1, 1, -1, 1);
// H_t is Hinit on the first iteration, Ho after that
tensor_iterator->set_merged_input(H_t, H_init, H_o);
tensor_iterator->set_merged_input(C_t, C_init, C_o);
tensor_iterator->set_invariant_input(W_body, W);
tensor_iterator->set_invariant_input(R_body, R);
// Output 0 is last Ho, result 0 of body
auto out0 = tensor_iterator->get_iter_value(H_o, -1);
// Output 1 is last Co, result 1 of body
auto out1 = tensor_iterator->get_iter_value(C_o, -1);
auto results = ResultVector{make_shared<op::Result>(out0), make_shared<op::Result>(out1)};
auto f = make_shared<Function>(results, ParameterVector{SENT, H_init, C_init, W, R});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
}
TEST(serialize, tensor_iterator_2_slice_inputs_part_size_2)
{
// That which we iterate over
auto X = make_shared<op::Parameter>(element::f32, Shape{32, 40, 10});
auto Y = make_shared<op::Parameter>(element::f32, Shape{32, 40, 10});
auto M = make_shared<op::Parameter>(element::f32, Shape{32, 2, 10});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xi = make_shared<op::Parameter>(element::f32, Shape{32, 2, 10});
auto Yi = make_shared<op::Parameter>(element::f32, Shape{32, 2, 10});
auto M_body = make_shared<op::Parameter>(element::f32, Shape{32, 2, 10});
// Body
auto Zo = (Xi + Yi) * M_body;
auto body = make_shared<op::TensorIterator::BodyLambda>(OutputVector{Zo},
ParameterVector{Xi, Yi, M_body});
auto tensor_iterator = make_shared<op::TensorIterator>();
tensor_iterator->set_body(body);
// The Xi are the elements of Xseq
// start=0, stride=2, part_size=2, end=39, axis=1
tensor_iterator->set_sliced_input(Xi, X, 0, 2, 2, 39, 1);
// The Yi are the elements of Yseq
// start=0, stride=2, part_size=2, end=-1, axis=1
tensor_iterator->set_sliced_input(Yi, Y, 0, 2, 2, -1, 1);
tensor_iterator->set_invariant_input(M_body, M);
// Output 0 is last Zo
auto out0 = tensor_iterator->get_iter_value(Zo, -1);
// Output 1 is concat of Zos
// start=0, stride=2, part_size=2, end=39, axis=1
auto out1 = tensor_iterator->get_concatenated_slices(Zo, 0, 2, 2, 39, 1);
auto result0 = make_shared<op::Result>(out0);
auto result1 = make_shared<op::Result>(out1);
Shape out0_shape{32, 2, 10};
Shape out1_shape{32, 40, 10};
auto results = ResultVector{result0, result1};
auto f = make_shared<Function>(results, ParameterVector{X, Y, M});
EXPECT_EQ(result0->get_output_shape(0), out0_shape);
EXPECT_EQ(result1->get_output_shape(0), out1_shape);
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
}
TEST(serialize, tensor_iterator_2_slice_inputs_part_size_2_dynamic)
{
// That which we iterate over
auto X = make_shared<op::Parameter>(element::f32, Shape{32, 40, 10});
auto Y = make_shared<op::Parameter>(element::f32, Shape{32, 40, 10});
auto M = make_shared<op::Parameter>(element::f32, Shape{32, 2, 10});
// Set up the cell body, a function from (Xi, Yi) -> (Zo)
// Body parameters
auto Xi = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto Yi = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
auto M_body = make_shared<op::Parameter>(element::f32, PartialShape::dynamic());
// Body
auto Zo = (Xi + Yi) * M_body;
auto body = make_shared<op::TensorIterator::BodyLambda>(OutputVector{Zo},
ParameterVector{Xi, Yi, M_body});
auto tensor_iterator = make_shared<op::TensorIterator>();
tensor_iterator->set_body(body);
// The Xi are the elements of Xseq
// start=0, stride=2, part_size=2, end=38, axis=1
tensor_iterator->set_sliced_input(Xi, X, 0, 2, 2, 38, 1);
// The Yi are the elements of Yseq
// start=0, stride=2, part_size=2, end=-2, axis=1
tensor_iterator->set_sliced_input(Yi, Y, 0, 2, 2, -2, 1);
tensor_iterator->set_invariant_input(M_body, M);
// check input descriptors
for (auto& desc : tensor_iterator->get_input_descriptions())
{
auto type_info = desc->get_type_info();
if (std::strcmp(type_info.name, "InvariantInputDescription") == 0)
{
auto input_desc =
as_type_ptr<ngraph::op::TensorIterator::InvariantInputDescription>(desc);
EXPECT_NE(input_desc, nullptr);
}
else if (std::strcmp(type_info.name, "SliceInputDescription") == 0)
{
auto input_desc = as_type_ptr<ngraph::op::TensorIterator::SliceInputDescription>(desc);
EXPECT_NE(input_desc, nullptr);
}
else if (std::strcmp(type_info.name, "MergedInputDescription") == 0)
{
auto input_desc = as_type_ptr<ngraph::op::TensorIterator::MergedInputDescription>(desc);
EXPECT_NE(input_desc, nullptr);
}
}
// Output 0 is last Zo
auto out0 = tensor_iterator->get_iter_value(Zo, -1);
// Output 1 is concat of Zos
// start=0, stride=2, part_size=2, end=38, axis=1
auto out1 = tensor_iterator->get_concatenated_slices(Zo, 0, 2, 2, 38, 1);
// check output descriptors
for (auto& desc : tensor_iterator->get_output_descriptions())
{
auto type_info = desc->get_type_info();
if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0)
{
auto output_desc =
as_type_ptr<ngraph::op::TensorIterator::ConcatOutputDescription>(desc);
EXPECT_NE(output_desc, nullptr);
}
else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0)
{
auto output_desc = as_type_ptr<ngraph::op::TensorIterator::BodyOutputDescription>(desc);
EXPECT_NE(output_desc, nullptr);
}
}
auto result0 = make_shared<op::Result>(out0);
auto result1 = make_shared<op::Result>(out1);
Shape out0_shape{32, 2, 10};
Shape out1_shape{32, 38, 10};
auto results = ResultVector{result0, result1};
auto f = make_shared<Function>(results, ParameterVector{X, Y, M});
EXPECT_EQ(result0->get_output_shape(0), out0_shape);
EXPECT_EQ(result1->get_output_shape(0), out1_shape);
EXPECT_EQ(body->get_results()[0]->get_output_shape(0), out0_shape);
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
}
TEST(serialize, opset1_strided_slice)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{2, 4, 6, 8});
auto begin = make_shared<op::Parameter>(element::i64, Shape{4});
auto end = make_shared<op::Parameter>(element::i64, Shape{4});
auto strides = make_shared<op::Parameter>(element::i64, Shape{4});
const std::vector<int64_t> begin_mask{1, 0, 1, 0};
const std::vector<int64_t> end_mask{1, 1, 1, 0};
const std::vector<int64_t> new_axis_mask{0, 0, 1, 1};
const std::vector<int64_t> shrink_axis_mask{0, 0, 0, 0};
const std::vector<int64_t> ellipsis_mask{1, 1, 1, 1};
auto strided_slice_in = make_shared<op::v1::StridedSlice>(data,
begin,
end,
strides,
begin_mask,
end_mask,
new_axis_mask,
shrink_axis_mask,
ellipsis_mask);
auto result = make_shared<op::Result>(strided_slice_in);
auto f =
make_shared<Function>(ResultVector{result}, ParameterVector{data, begin, end, strides});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_strided_slice_v1 = g_result->get_input_node_shared_ptr(0);
auto strided_slice_out = as_type_ptr<op::v1::StridedSlice>(g_strided_slice_v1);
ASSERT_TRUE(strided_slice_out);
EXPECT_EQ(strided_slice_out->get_begin_mask(), begin_mask);
EXPECT_EQ(strided_slice_out->get_end_mask(), end_mask);
EXPECT_EQ(strided_slice_out->get_new_axis_mask(), new_axis_mask);
EXPECT_EQ(strided_slice_out->get_shrink_axis_mask(), shrink_axis_mask);
EXPECT_EQ(strided_slice_out->get_ellipsis_mask(), ellipsis_mask);
}
TEST(serialize, opset1_binary_convolution)
{
auto data = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 2});
auto filter = make_shared<op::Parameter>(element::f32, Shape{1, 2, 2, 2});
const Strides strides{1, 1};
const CoordinateDiff pads_begin{0, 0};
const CoordinateDiff pads_end{0, 0};
const Strides dilations{1, 1};
auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 2.1f;
const auto auto_pad = op::PadType::NOTSET;
auto binary_conv_in = make_shared<op::v1::BinaryConvolution>(
data, filter, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
auto result = make_shared<op::Result>(binary_conv_in);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{data, filter});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_binary_conv = g_result->get_input_node_shared_ptr(0);
auto binary_conv_out = as_type_ptr<op::v1::BinaryConvolution>(g_binary_conv);
ASSERT_TRUE(binary_conv_out);
EXPECT_EQ(binary_conv_out->get_strides(), strides);
EXPECT_EQ(binary_conv_out->get_pads_begin(), pads_begin);
EXPECT_EQ(binary_conv_out->get_pads_end(), pads_end);
EXPECT_EQ(binary_conv_out->get_dilations(), dilations);
EXPECT_EQ(binary_conv_out->get_mode(),
op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT);
EXPECT_EQ(binary_conv_out->get_pad_value(), pad_value);
EXPECT_EQ(binary_conv_out->get_auto_pad(), auto_pad);
}
TEST(serialize, opset1_interpolate)
{
auto image = make_shared<op::Parameter>(element::f32, Shape{2, 2, 33, 65});
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {15, 30});
op::InterpolateAttrs attrs;
attrs.axes = {2, 3};
attrs.mode = "linear";
attrs.align_corners = true;
attrs.antialias = false;
attrs.pads_begin = {0, 0, 0, 0};
attrs.pads_end = {0, 0, 0, 0};
auto op = make_shared<op::Interpolate>(image, output_shape, attrs);
auto result = make_shared<op::Result>(op);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{image});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_interpolate = g_result->get_input_node_shared_ptr(0);
auto g_op = as_type_ptr<op::Interpolate>(g_interpolate);
ASSERT_TRUE(g_op);
op::InterpolateAttrs g_attrs = g_op->get_attrs();
EXPECT_EQ(g_attrs.axes, attrs.axes);
EXPECT_EQ(g_attrs.mode, attrs.mode);
EXPECT_EQ(g_attrs.align_corners, attrs.align_corners);
EXPECT_EQ(g_attrs.antialias, attrs.antialias);
EXPECT_EQ(g_attrs.pads_begin, attrs.pads_begin);
EXPECT_EQ(g_attrs.pads_end, attrs.pads_end);
}
TEST(serialize, opset3_interpolate)
{
using op::v3::Interpolate;
using InterpolateMode = op::v3::Interpolate::InterpolateMode;
using CoordinateTransformMode = op::v3::Interpolate::CoordinateTransformMode;
using InterpolateAttrs = op::v3::Interpolate::InterpolateAttrs;
auto image = make_shared<op::Parameter>(element::f32, Shape{2, 2, 33, 65});
auto output_shape = op::Constant::create<int64_t>(element::i64, Shape{2}, {15, 30});
InterpolateAttrs attrs;
attrs.axes = {2, 3};
attrs.mode = InterpolateMode::linear;
attrs.coordinate_transformation_mode = CoordinateTransformMode::half_pixel;
attrs.antialias = false;
attrs.pads_begin = {0, 0, 0, 0};
attrs.pads_end = {0, 0, 0, 0};
auto op = make_shared<Interpolate>(image, output_shape, attrs);
auto result = make_shared<op::Result>(op);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{image});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_interpolate = g_result->get_input_node_shared_ptr(0);
auto g_op = as_type_ptr<op::v3::Interpolate>(g_interpolate);
ASSERT_TRUE(g_op);
InterpolateAttrs g_attrs = g_op->get_attrs();
EXPECT_EQ(g_attrs.axes, attrs.axes);
EXPECT_EQ(g_attrs.mode, attrs.mode);
EXPECT_EQ(g_attrs.coordinate_transformation_mode, attrs.coordinate_transformation_mode);
EXPECT_EQ(g_attrs.antialias, attrs.antialias);
EXPECT_EQ(g_attrs.pads_begin, attrs.pads_begin);
EXPECT_EQ(g_attrs.pads_end, attrs.pads_end);
}
TEST(serialize, depth_to_space)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{4, 5, 6});
auto mode = op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST;
size_t block_size = 2;
auto depth_to_space_in = make_shared<op::DepthToSpace>(arg, mode, block_size);
auto result = make_shared<op::Result>(depth_to_space_in);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_depth_to_space = g_result->get_input_node_shared_ptr(0);
auto depth_to_space_out = as_type_ptr<op::DepthToSpace>(g_depth_to_space);
ASSERT_TRUE(depth_to_space_out);
EXPECT_EQ(depth_to_space_out->get_block_size(), block_size);
EXPECT_EQ(depth_to_space_out->get_mode(), mode);
}
TEST(serialize, space_to_depth)
{
auto arg = make_shared<op::Parameter>(element::f32, Shape{4, 6, 8});
auto mode = op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
size_t block_size = 2;
auto space_to_depth_in = make_shared<op::SpaceToDepth>(arg, mode, block_size);
auto result = make_shared<op::Result>(space_to_depth_in);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{arg});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_space_to_depth = g_result->get_input_node_shared_ptr(0);
auto depth_to_space_out = as_type_ptr<op::SpaceToDepth>(g_space_to_depth);
ASSERT_TRUE(depth_to_space_out);
EXPECT_EQ(depth_to_space_out->get_block_size(), block_size);
EXPECT_EQ(depth_to_space_out->get_mode(), mode);
}
TEST(serialize, deformable_psroi_pooling)
{
auto input = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
auto coords = make_shared<op::Parameter>(element::f32, Shape{1, 1});
auto offsets = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
const int64_t output_dim = 1;
const int64_t group_size = 2;
const float spatial_scale = 3;
std::string mode = "bilinear_deformable";
int64_t spatial_bins_x = 4;
int64_t spatial_bins_y = 5;
float trans_std = 6.1f;
int64_t part_size = 7;
auto def_psroi_pool_in = make_shared<op::v1::DeformablePSROIPooling>(input,
coords,
offsets,
output_dim,
spatial_scale,
group_size,
mode,
spatial_bins_x,
spatial_bins_y,
trans_std,
part_size);
auto result = make_shared<op::Result>(def_psroi_pool_in);
auto f = make_shared<Function>(ResultVector{result}, ParameterVector{input, coords, offsets});
string s = serialize(f);
shared_ptr<Function> g = deserialize(s);
auto g_result = g->get_results().at(0);
auto g_def_psroi_pool = g_result->get_input_node_shared_ptr(0);
auto def_psroi_pool_out = as_type_ptr<op::v1::DeformablePSROIPooling>(g_def_psroi_pool);
EXPECT_EQ(def_psroi_pool_out->description(), "DeformablePSROIPooling");
EXPECT_EQ(def_psroi_pool_out->get_version(), 1);
EXPECT_EQ(def_psroi_pool_out->get_output_dim(), output_dim);
EXPECT_EQ(def_psroi_pool_out->get_group_size(), group_size);
EXPECT_EQ(def_psroi_pool_out->get_spatial_scale(), spatial_scale);
EXPECT_EQ(def_psroi_pool_out->get_mode(), mode);
EXPECT_EQ(def_psroi_pool_out->get_spatial_bins_x(), spatial_bins_x);
EXPECT_EQ(def_psroi_pool_out->get_spatial_bins_y(), spatial_bins_y);
EXPECT_EQ(def_psroi_pool_out->get_trans_std(), trans_std);
EXPECT_EQ(def_psroi_pool_out->get_part_size(), part_size);
}

View File

@ -28,7 +28,6 @@
#include "ngraph/op/util/op_annotations.hpp" #include "ngraph/op/util/op_annotations.hpp"
#include "ngraph/pass/manager.hpp" #include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp" #include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/serializer.hpp"
#include "util/all_close.hpp" #include "util/all_close.hpp"
#include "util/ndarray.hpp" #include "util/ndarray.hpp"

View File

@ -209,17 +209,6 @@ string get_results_str(const std::vector<char>& ref_data,
return ss.str(); return ss.str();
} }
#ifndef NGRAPH_JSON_DISABLE
std::shared_ptr<Function> make_function_from_file(const std::string& file_name)
{
const string json_path = file_util::path_join(SERIALIZED_ZOO, file_name);
const string json_string = file_util::read_file_to_string(json_path);
stringstream ss(json_string);
shared_ptr<Function> func = ngraph::deserialize(ss);
return func;
}
#endif
::testing::AssertionResult test_ordered_ops(shared_ptr<Function> f, const NodeVector& required_ops) ::testing::AssertionResult test_ordered_ops(shared_ptr<Function> f, const NodeVector& required_ops)
{ {
unordered_set<Node*> seen; unordered_set<Node*> seen;

View File

@ -32,7 +32,6 @@
#include "ngraph/op/op.hpp" #include "ngraph/op/op.hpp"
#include "ngraph/runtime/host_tensor.hpp" #include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/tensor.hpp" #include "ngraph/runtime/tensor.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/type/element_type_traits.hpp" #include "ngraph/type/element_type_traits.hpp"
#include "runtime/backend.hpp" #include "runtime/backend.hpp"
@ -71,9 +70,6 @@ namespace ngraph
bool validate_list(const std::vector<std::shared_ptr<ngraph::Node>>& nodes); bool validate_list(const std::vector<std::shared_ptr<ngraph::Node>>& nodes);
std::shared_ptr<ngraph::Function> make_test_graph(); std::shared_ptr<ngraph::Function> make_test_graph();
#ifndef NGRAPH_JSON_DISABLE
std::shared_ptr<ngraph::Function> make_function_from_file(const std::string& file_name);
#endif
template <typename T> template <typename T>
void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& data) void copy_data(std::shared_ptr<ngraph::runtime::Tensor> tv, const std::vector<T>& data)