PaddlePaddle frontend - Core Part (#6036)

* PaddlePaddle frontend - Core Part

Basic functionality, only limited number of supported operations

Rest of operations will be submitted in separate PR

* Remove copy-paste for 'protobuf-generate'

* Remove copy-paste for 'protobuf-generate'

* Disable all installs for static protobuf

* Fixed error in protobuf generator script

* Disable compilation warnings for generated Protobuf code

* Cmake - move PDPD frontend to the bottom
Add install rules for PDPD and Frontend_manager

* Separate working directory for static protobuf

* Use system 'protoc' code generator in case of cross-compiling mode

* Pass SYSTEM_PROTOC to static protobuf subproject

Needed for cross compiling

* Enable back commented out PDPD unit tests

* Fix TODOs

* Fix some comments

* Update according to new gtest

* Removed NGRAPH_EXPORT_TARGETS_ENABLE for PDPD frontend
This commit is contained in:
Mikhail Nosov 2021-06-24 16:58:41 +03:00 committed by GitHub
parent 23f689bf74
commit 6736188526
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
59 changed files with 3500 additions and 48 deletions

View File

@ -59,8 +59,10 @@ function(build_ngraph)
if(NOT (ANDROID OR WINDOWS_STORE OR (MSVC AND (ARM OR AARCH64)) ))
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE ON)
ngraph_set(NGRAPH_PDPD_FRONTEND_ENABLE ON)
else()
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE OFF)
ngraph_set(NGRAPH_PDPD_FRONTEND_ENABLE OFF)
endif()
ngraph_set(NGRAPH_INTERPRETER_ENABLE ON)
@ -92,6 +94,7 @@ function(build_ngraph)
add_subdirectory(ngraph)
set(NGRAPH_LIBRARIES ngraph PARENT_SCOPE)
set(FRONTEND_LIBRARIES frontend_manager PARENT_SCOPE)
set(NGRAPH_REF_LIBRARIES ngraph_reference PARENT_SCOPE)
endfunction()

View File

@ -23,11 +23,12 @@ option(NGRAPH_INTERPRETER_ENABLE "Control the building of the INTERPRETER backen
option(NGRAPH_DEBUG_ENABLE "Enable output for NGRAPH_DEBUG statements" OFF)
option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" OFF)
option(NGRAPH_ONNX_EDITOR_ENABLE "Enable ONNX Editor" OFF)
option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" OFF)
option(NGRAPH_PYTHON_BUILD_ENABLE "Enable build nGraph python package wheel" OFF)
option(NGRAPH_DYNAMIC_COMPONENTS_ENABLE "Enable dynamic loading of components" ON)
option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" OFF)
if (NGRAPH_ONNX_IMPORT_ENABLE)
if (NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE)
option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system provided Protobuf shared object" OFF)
endif()
if(NGRAPH_ONNX_EDITOR_ENABLE AND NOT NGRAPH_ONNX_IMPORT_ENABLE)
@ -39,6 +40,7 @@ message(STATUS "NGRAPH_DYNAMIC_COMPONENTS_ENABLE: ${NGRAPH_DYNAMIC_COMPONENT
message(STATUS "NGRAPH_INTERPRETER_ENABLE: ${NGRAPH_INTERPRETER_ENABLE}")
message(STATUS "NGRAPH_ONNX_IMPORT_ENABLE: ${NGRAPH_ONNX_IMPORT_ENABLE}")
message(STATUS "NGRAPH_ONNX_EDITOR_ENABLE: ${NGRAPH_ONNX_EDITOR_ENABLE}")
message(STATUS "NGRAPH_PDPD_FRONTEND_ENABLE: ${NGRAPH_PDPD_FRONTEND_ENABLE}")
message(STATUS "NGRAPH_PYTHON_BUILD_ENABLE: ${NGRAPH_PYTHON_BUILD_ENABLE}")
message(STATUS "NGRAPH_USE_PROTOBUF_LITE: ${NGRAPH_USE_PROTOBUF_LITE}")
message(STATUS "NGRAPH_UNIT_TEST_ENABLE: ${NGRAPH_UNIT_TEST_ENABLE}")
@ -179,7 +181,12 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ngraphConfig.cmake
DESTINATION "deployment_tools/ngraph/cmake"
COMPONENT ngraph_dev)
if (NGRAPH_ONNX_IMPORT_ENABLE)
set(USE_STATIC_PROTOBUF OFF)
if (NGRAPH_PDPD_FRONTEND_ENABLE) # add more frontends here which depend on static protobuf
set(USE_STATIC_PROTOBUF ON)
endif()
if (NGRAPH_ONNX_IMPORT_ENABLE OR USE_STATIC_PROTOBUF)
if (MSVC)
# When we build dll libraries. These flags make sure onnx and protobuf build with /MD, not /MT.
# These two options can't be mixed, because they requires link two imcompatiable runtime.
@ -195,6 +202,7 @@ if (NGRAPH_ONNX_IMPORT_ENABLE)
set(BEFORE_ONNX_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
set(BUILD_SHARED_LIBS ON)
set(BUILD_STANDALONE_STATIC OFF)
if (NOT NGRAPH_USE_SYSTEM_PROTOBUF)
include(cmake/external_protobuf.cmake)
@ -202,13 +210,17 @@ if (NGRAPH_ONNX_IMPORT_ENABLE)
find_package(Protobuf 2.6.1 REQUIRED)
endif()
# target onnx_proto will be shared lib, onnx static
include(cmake/external_onnx.cmake)
if (TARGET ext_protobuf)
add_dependencies(onnx ext_protobuf)
if (NGRAPH_ONNX_IMPORT_ENABLE)
# target onnx_proto will be shared lib, onnx static
include(cmake/external_onnx.cmake)
if (TARGET ext_protobuf)
add_dependencies(onnx ext_protobuf)
endif()
endif()
set(BUILD_SHARED_LIBS ${BEFORE_ONNX_BUILD_SHARED_LIBS})
unset(BEFORE_ONNX_BUILD_SHARED_LIBS)
unset(BUILD_STANDALONE_STATIC)
endif()
add_subdirectory(frontend)

View File

@ -64,25 +64,45 @@ if(PROTOC_VERSION VERSION_LESS "3.9" AND NGRAPH_USE_PROTOBUF_LITE)
message(FATAL_ERROR "Minimum supported version of protobuf-lite library is 3.9.0")
else()
if(PROTOC_VERSION VERSION_GREATER_EQUAL "3.0")
FetchContent_Declare(
ext_protobuf
GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL}
GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG}
GIT_SHALLOW TRUE
)
FetchContent_GetProperties(ext_protobuf)
if(NOT ext_protobuf_POPULATED)
FetchContent_Populate(ext_protobuf)
set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests")
set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support")
add_subdirectory(${ext_protobuf_SOURCE_DIR}/cmake ${ext_protobuf_BINARY_DIR} EXCLUDE_FROM_ALL)
if (NOT BUILD_STANDALONE_STATIC)
FetchContent_Declare(
ext_protobuf
GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL}
GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG}
GIT_SHALLOW TRUE
)
FetchContent_GetProperties(ext_protobuf)
if(NOT ext_protobuf_POPULATED)
FetchContent_Populate(ext_protobuf)
set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests")
set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support")
add_subdirectory(${ext_protobuf_SOURCE_DIR}/cmake ${ext_protobuf_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
endif()
if (USE_STATIC_PROTOBUF)
FetchContent_Declare(
ext_protobuf_static
GIT_REPOSITORY ${NGRAPH_PROTOBUF_GIT_REPO_URL}
GIT_TAG ${NGRAPH_PROTOBUF_GIT_TAG}
GIT_SHALLOW TRUE
)
FetchContent_GetProperties(ext_protobuf_static)
if((NOT ext_protobuf_static_POPULATED) AND BUILD_STANDALONE_STATIC)
FetchContent_Populate(ext_protobuf_static)
set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build tests")
set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build with zlib support")
add_subdirectory(${ext_protobuf_static_SOURCE_DIR}/cmake ${ext_protobuf_static_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
endif()
else()
message(FATAL_ERROR "Minimum supported version of protobuf library is 3.0.0")
endif()
set(Protobuf_INCLUDE_DIRS ${ext_protobuf_SOURCE_DIR}/src)
if (BUILD_STANDALONE_STATIC)
set(Protobuf_INCLUDE_DIRS ${ext_protobuf_static_SOURCE_DIR}/src)
else()
set(Protobuf_INCLUDE_DIRS ${ext_protobuf_SOURCE_DIR}/src)
endif()
if(NGRAPH_USE_PROTOBUF_LITE)
set(Protobuf_LIBRARIES libprotobuf-lite)
else()
@ -117,9 +137,11 @@ endif()
# Now make sure we restore the original flags
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE "${PUSH_CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE}")
install(TARGETS ${Protobuf_LIBRARIES}
RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph
ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph
LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph)
export(TARGETS ${Protobuf_LIBRARIES} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}")
if (NOT BUILD_STANDALONE_STATIC)
message("NGRAPH_INSTALL_LIB = ${NGRAPH_INSTALL_LIB}")
install(TARGETS ${Protobuf_LIBRARIES}
RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph
ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph
LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph)
export(TARGETS ${Protobuf_LIBRARIES} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}")
endif()

View File

@ -2,6 +2,46 @@
# SPDX-License-Identifier: Apache-2.0
#
if(NOT WIN32)
message(${CMAKE_CURRENT_SOURCE_DIR}/cmake_static_protobuf)
message(BINARY ${CMAKE_CURRENT_BINARY_DIR})
# There seems no suitable other way to identify exact output binary name for libprotobuf
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
# Use 'protobufd' directly as it is done in the same way in protobuf cmake files
set(PROTOBUF_STATIC_LIB_OUTPUT
${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/${CMAKE_STATIC_LIBRARY_PREFIX}protobufd${CMAKE_STATIC_LIBRARY_SUFFIX})
else(CMAKE_BUILD_TYPE STREQUAL "Debug")
set(PROTOBUF_STATIC_LIB_OUTPUT ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/${CMAKE_STATIC_LIBRARY_PREFIX}protobuf${CMAKE_STATIC_LIBRARY_SUFFIX})
endif()
message("Static protobuf lib: ${PROTOBUF_STATIC_LIB_OUTPUT}")
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/libprotobuf_static)
add_custom_command(
OUTPUT
${PROTOBUF_STATIC_LIB_OUTPUT}
COMMAND ${CMAKE_COMMAND} ${CMAKE_CURRENT_SOURCE_DIR}/cmake_static_protobuf
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
-DCMAKE_COMPILE_PDB_OUTPUT_DIRECTORY=${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_PDB_OUTPUT_DIRECTORY=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_CXX_VISIBILITY_PRESET=${CMAKE_CXX_VISIBILITY_PRESET}
-DNGRAPH_INSTALL_LIB=${NGRAPH_INSTALL_LIB}
-DSYSTEM_PROTOC=${SYSTEM_PROTOC}
${NGRAPH_FORWARD_CMAKE_ARGS}
COMMAND ${CMAKE_COMMAND} --build . --target libprotobuf
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/libprotobuf_static
COMMENT "Build Protobuf Static Library"
)
add_custom_target(libprotobuf_static
DEPENDS ${PROTOBUF_STATIC_LIB_OUTPUT})
endif()
add_subdirectory(frontend_manager)
if (NGRAPH_ONNX_IMPORT_ENABLE)
@ -12,3 +52,7 @@ endif()
if (NGRAPH_ONNX_EDITOR_ENABLE)
add_subdirectory(onnx_editor)
endif()
if (NGRAPH_PDPD_FRONTEND_ENABLE)
add_subdirectory(paddlepaddle)
endif()

View File

@ -0,0 +1,23 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required(VERSION 3.13)
project(libprotobuf_static)
message("Add PROTOBUF dependency - static")
set(BUILD_SHARED_LIBS_OLD ${BUILD_SHARED_LIBS})
set(BUILD_STANDALONE_STATIC_OLD ${BUILD_STANDALONE_STATIC})
set(USE_STATIC_PROTOBUF_OLD ${USE_STATIC_PROTOBUF})
set(BUILD_SHARED_LIBS OFF)
set(BUILD_STANDALONE_STATIC ON)
set(USE_STATIC_PROTOBUF ON)
include(../../cmake/external_protobuf.cmake)
set(BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS_OLD})
set(BUILD_STANDALONE_STATIC ${BUILD_STANDALONE_STATIC_OLD})
set(USE_STATIC_PROTOBUF ${USE_STATIC_PROTOBUF_OLD})

View File

@ -48,3 +48,4 @@ install(DIRECTORY ${FRONTEND_INCLUDE_DIR}/frontend_manager
)
export(TARGETS ${TARGET_NAME} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}")

View File

@ -0,0 +1,100 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "paddlepaddle_ngraph_frontend")
file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/*.cpp)
file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/src/*.hpp)
file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/*.hpp)
if (TARGET ext_protobuf)
add_dependencies(${TARGET_NAME} ext_protobuf)
endif()
set(${TARGET_NAME}_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/include)
# Create named folders for the sources within the .vcproj
# Empty name lists them directly under the .vcproj
source_group("src" FILES ${LIBRARY_SRC})
source_group("include" FILES ${LIBRARY_HEADERS})
source_group("public include" FILES ${LIBRARY_PUBLIC_HEADERS})
set(PROTO_SRCS)
set(PROTO_HDRS)
# Generate protobuf file on build time for each '.proto' file in src/proto
file(GLOB proto_files ${CMAKE_CURRENT_SOURCE_DIR}/src/proto/*.proto)
if(CMAKE_CROSSCOMPILING)
set(PDPD_PROTOC_EXECUTABLE ${SYSTEM_PROTOC})
else()
set(PDPD_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
endif()
message("PDPD_PROTOC_EXECUTABLE is [${PDPD_PROTOC_EXECUTABLE}]")
foreach(INFILE ${proto_files})
get_filename_component(FILE_DIR ${INFILE} DIRECTORY)
get_filename_component(FILE_WE ${INFILE} NAME_WE)
set(OUTPUT_PB_SRC ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}.pb.cc)
set(OUTPUT_PB_HEADER ${CMAKE_CURRENT_BINARY_DIR}/${FILE_WE}.pb.h)
set(GENERATED_PROTO ${INFILE})
add_custom_command(
OUTPUT "${OUTPUT_PB_SRC}" "${OUTPUT_PB_HEADER}"
COMMAND ${PDPD_PROTOC_EXECUTABLE} ARGS --cpp_out ${CMAKE_CURRENT_BINARY_DIR} -I ${FILE_DIR} ${FILE_WE}.proto
DEPENDS ${PDPD_PROTOC_EXECUTABLE} ${GENERATED_PROTO}
COMMENT "Running C++ protocol buffer compiler (${PDPD_PROTOC_EXECUTABLE}) on ${GENERATED_PROTO}"
VERBATIM)
list(APPEND PROTO_SRCS "${OUTPUT_PB_SRC}")
list(APPEND PROTO_HDRS "${OUTPUT_PB_HEADER}")
endforeach()
add_custom_target(${TARGET_NAME}_proto DEPENDS ${PROTO_SRCS} ${PROTO_HDRS})
set_source_files_properties(${PROTO_SRCS} ${PROTO_HDRS} PROPERTIES GENERATED TRUE)
# Disable all warnings for generated code
set_source_files_properties(${PROTO_SRCS} ${PROTO_HDRS} PROPERTIES COMPILE_FLAGS -w)
include_directories(${Protobuf_INCLUDE_DIRS} ${${TARGET_NAME}_INCLUDE_DIR})
# Create shared library
add_library(${TARGET_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS} ${LIBRARY_PUBLIC_HEADERS} ${PROTO_SRCS} ${PROTO_HDRS})
add_dependencies(${TARGET_NAME} libprotobuf paddlepaddle_ngraph_frontend_proto)
if(NOT WIN32)
# Addition dependency on static libprotobuf for non-Windows
add_dependencies(${TARGET_NAME} libprotobuf_static)
endif()
target_include_directories(${TARGET_NAME}
PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/src
${FRONTEND_INCLUDE_PATH}
${CMAKE_CURRENT_BINARY_DIR})
if(COMMAND ie_add_vs_version_file)
ie_add_vs_version_file(NAME ${TARGET_NAME}
FILEDESCRIPTION "FrontEnd to load and convert PaddlePaddle file format")
endif()
if(WIN32)
target_link_libraries(${TARGET_NAME} PRIVATE libprotobuf PUBLIC ngraph PRIVATE ngraph::builder)
else()
target_link_libraries(${TARGET_NAME} PRIVATE ${PROTOBUF_STATIC_LIB_OUTPUT} PUBLIC ngraph PRIVATE ngraph::builder)
endif()
target_link_libraries(${TARGET_NAME} PRIVATE frontend_manager)
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}
EXCLUDE_PATTERNS ${PROTO_SRCS} ${PROTO_HDRS})
install(TARGETS ${TARGET_NAME} EXPORT ngraphTargets
RUNTIME DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph
ARCHIVE DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph
LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB} COMPONENT ngraph)
export(TARGETS ${TARGET_NAME} NAMESPACE ngraph:: APPEND FILE "${NGRAPH_TARGETS_FILE}")

View File

@ -0,0 +1,45 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <frontend_manager/frontend_exceptions.hpp>
#include <ngraph/node.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
class NodeContext;
class OpValidationFailurePDPD : public OpValidationFailure
{
public:
OpValidationFailurePDPD(const CheckLocInfo& check_loc_info,
const pdpd::NodeContext& node,
const std::string& explanation)
: OpValidationFailure(
check_loc_info, get_error_msg_prefix_pdpd(node), explanation)
{
}
private:
static std::string get_error_msg_prefix_pdpd(const pdpd::NodeContext& node);
};
} // namespace pdpd
} // namespace frontend
/// \brief Macro to check whether a boolean condition holds.
/// \param node_context Object of NodeContext class
/// \param cond Condition to check
/// \param ... Additional error message info to be added to the error message via the `<<`
/// stream-insertion operator. Note that the expressions here will be evaluated lazily,
/// i.e., only if the `cond` evalutes to `false`.
/// \throws ::ngraph::OpValidationFailurePDPD if `cond` is false.
#define PDPD_OP_VALIDATION_CHECK(node_context, ...) \
NGRAPH_CHECK_HELPER( \
::ngraph::frontend::pdpd::OpValidationFailurePDPD, (node_context), __VA_ARGS__)
} // namespace ngraph

View File

@ -0,0 +1,58 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <frontend_manager/frontend_manager.hpp>
#include "exceptions.hpp"
#include "model.hpp"
namespace ngraph
{
namespace frontend
{
class PDPD_API FrontEndPDPD : public FrontEnd
{
static std::shared_ptr<Function>
convert_model(const std::shared_ptr<InputModelPDPD>& model);
public:
FrontEndPDPD() = default;
/**
* @brief Reads model from file and deducts file names of weights
* @param path path to folder which contains __model__ file or path to .pdmodel file
* @return InputModel::Ptr
*/
InputModel::Ptr load_from_file(const std::string& path) const override;
/**
* @brief Reads model and weights from files
* @param paths vector containing path to .pdmodel and .pdiparams files
* @return InputModel::Ptr
*/
InputModel::Ptr load_from_files(const std::vector<std::string>& paths) const override;
/**
* @brief Reads model from stream
* @param model_stream stream containing .pdmodel or __model__ files. Can only be used
* if model have no weights
* @return InputModel::Ptr
*/
InputModel::Ptr load_from_stream(std::istream& model_stream) const override;
/**
* @brief Reads model from stream
* @param paths vector of streams containing .pdmodel and .pdiparams files. Can't be
* used in case of multiple weight files
* @return InputModel::Ptr
*/
InputModel::Ptr
load_from_streams(const std::vector<std::istream*>& paths) const override;
std::shared_ptr<Function> convert(InputModel::Ptr model) const override;
};
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,44 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <frontend_manager/frontend_manager.hpp>
#include <paddlepaddle_frontend/utility.hpp>
namespace ngraph
{
namespace frontend
{
class OpPlacePDPD;
class TensorPlacePDPD;
class PDPD_API InputModelPDPD : public InputModel
{
friend class FrontEndPDPD;
class InputModelPDPDImpl;
std::shared_ptr<InputModelPDPDImpl> _impl;
std::vector<std::shared_ptr<OpPlacePDPD>> getOpPlaces() const;
std::map<std::string, std::shared_ptr<TensorPlacePDPD>> getVarPlaces() const;
std::map<std::string, Output<Node>> getTensorValues() const;
public:
explicit InputModelPDPD(const std::string& path);
explicit InputModelPDPD(const std::vector<std::istream*>& streams);
std::vector<Place::Ptr> get_inputs() const override;
std::vector<Place::Ptr> get_outputs() const override;
Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const override;
void override_all_outputs(const std::vector<Place::Ptr>& outputs) override;
void override_all_inputs(const std::vector<Place::Ptr>& inputs) override;
void extract_subgraph(const std::vector<Place::Ptr>& inputs,
const std::vector<Place::Ptr>& outputs) override;
void set_partial_shape(Place::Ptr place, const ngraph::PartialShape&) override;
ngraph::PartialShape get_partial_shape(Place::Ptr place) const override;
void set_element_type(Place::Ptr place, const ngraph::element::Type&) override;
void set_tensor_value(Place::Ptr place, const void* value) override;
};
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,206 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <frontend_manager/frontend_manager.hpp>
#include <paddlepaddle_frontend/exceptions.hpp>
namespace paddle
{
namespace framework
{
namespace proto
{
class OpDesc;
class VarDesc;
} // namespace proto
} // namespace framework
} // namespace paddle
namespace ngraph
{
namespace frontend
{
class TensorPlacePDPD;
class OpPlacePDPD;
class PlacePDPD : public Place
{
public:
PlacePDPD(const InputModel& input_model, const std::vector<std::string>& names)
: m_input_model(input_model)
, m_names(names)
{
}
explicit PlacePDPD(const InputModel& input_model)
: PlacePDPD(input_model, std::vector<std::string>{})
{
}
~PlacePDPD() override = default;
bool is_input() const override;
bool is_output() const override;
bool is_equal(Ptr another) const override { return this == another.get(); }
std::vector<std::string> get_names() const override { return m_names; }
private:
const InputModel& m_input_model;
std::vector<std::string> m_names;
};
class InPortPlacePDPD : public PlacePDPD
{
public:
explicit InPortPlacePDPD(const InputModel& input_model)
: PlacePDPD(input_model)
{
}
void setOp(const std::weak_ptr<OpPlacePDPD>& op) { m_op = op; }
void setSourceTensor(const std::weak_ptr<TensorPlacePDPD>& source_tensor)
{
m_source_tensor = source_tensor;
}
std::shared_ptr<TensorPlacePDPD> getSourceTensorPDPD() const;
std::shared_ptr<OpPlacePDPD> getOp();
private:
std::weak_ptr<TensorPlacePDPD> m_source_tensor;
std::weak_ptr<OpPlacePDPD> m_op;
};
class OutPortPlacePDPD : public PlacePDPD
{
public:
explicit OutPortPlacePDPD(const InputModel& input_model)
: PlacePDPD(input_model)
{
}
void setOp(const std::weak_ptr<OpPlacePDPD>& op) { m_op = op; }
void setTargetTensor(const std::weak_ptr<TensorPlacePDPD>& target_tensor)
{
m_target_tensor = target_tensor;
}
std::shared_ptr<TensorPlacePDPD> getTargetTensorPDPD() const;
private:
std::weak_ptr<OpPlacePDPD> m_op;
std::weak_ptr<TensorPlacePDPD> m_target_tensor;
};
class OpPlacePDPD : public PlacePDPD
{
public:
OpPlacePDPD(const InputModel& input_model,
const std::vector<std::string>& names,
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc);
OpPlacePDPD(const InputModel& input_model,
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc);
void addInPort(const std::shared_ptr<InPortPlacePDPD>& input, const std::string& name)
{
m_input_ports[name].push_back(input);
}
void addOutPort(const std::shared_ptr<OutPortPlacePDPD>& output,
const std::string& name)
{
m_output_ports[name].push_back(output);
}
const std::map<std::string, std::vector<std::shared_ptr<OutPortPlacePDPD>>>&
getOutputPorts() const
{
return m_output_ports;
}
const std::map<std::string, std::vector<std::shared_ptr<InPortPlacePDPD>>>&
getInputPorts() const
{
return m_input_ports;
}
std::shared_ptr<OutPortPlacePDPD> getOutputPortPDPD(const std::string& name, int idx)
{
return m_output_ports[name][idx];
}
std::shared_ptr<InPortPlacePDPD> getInputPortPDPD(const std::string& name, int idx)
{
return m_input_ports[name][idx];
}
const std::shared_ptr<paddle::framework::proto::OpDesc>& getDesc() const
{
return m_op_desc;
}
private:
std::shared_ptr<paddle::framework::proto::OpDesc> m_op_desc;
std::map<std::string, std::vector<std::shared_ptr<InPortPlacePDPD>>> m_input_ports;
std::map<std::string, std::vector<std::shared_ptr<OutPortPlacePDPD>>> m_output_ports;
};
class TensorPlacePDPD : public PlacePDPD
{
public:
TensorPlacePDPD(const InputModel& input_model,
const std::vector<std::string>& names,
const std::shared_ptr<paddle::framework::proto::VarDesc>& var_desc);
TensorPlacePDPD(const InputModel& input_model,
const std::shared_ptr<paddle::framework::proto::VarDesc>& var_desc);
void addProducingPort(const std::shared_ptr<OutPortPlacePDPD>& out_port)
{
m_producing_ports.push_back(out_port);
}
void addConsumingPort(const std::shared_ptr<InPortPlacePDPD>& in_port)
{
m_consuming_ports.push_back(in_port);
}
std::vector<Place::Ptr> get_consuming_ports() const override;
Ptr get_producing_port() const override;
const PartialShape& getPartialShape() const { return m_pshape; }
const element::Type& getElementType() const { return m_type; }
void setPartialShape(const PartialShape& pshape) { m_pshape = pshape; }
void setElementType(const element::Type& type) { m_type = type; }
const std::shared_ptr<paddle::framework::proto::VarDesc>& getDesc() const
{
return m_var_desc;
}
private:
std::shared_ptr<paddle::framework::proto::VarDesc> m_var_desc;
PartialShape m_pshape;
element::Type m_type;
std::vector<std::weak_ptr<OutPortPlacePDPD>> m_producing_ports;
std::vector<std::weak_ptr<InPortPlacePDPD>> m_consuming_ports;
};
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,31 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <frontend_manager/frontend_manager.hpp>
// Defined if we are building the plugin DLL (instead of using it)
#ifdef paddlepaddle_ngraph_frontend_EXPORTS
#define PDPD_API NGRAPH_HELPER_DLL_EXPORT
#else
#define PDPD_API NGRAPH_HELPER_DLL_IMPORT
#endif // paddlepaddle_ngraph_frontend_EXPORTS
namespace ngraph
{
namespace frontend
{
inline void PDPD_ASSERT(bool ex, const std::string& msg = "Unspecified error.")
{
if (!ex)
throw std::runtime_error(msg);
}
#define PDPD_THROW(msg) throw std::runtime_error(std::string("ERROR: ") + msg)
#define NOT_IMPLEMENTED(msg) throw std::runtime_error(std::string(msg) + " is not implemented")
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,139 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <chrono>
#include <fstream>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "framework.pb.h"
#include "decoder.hpp"
namespace ngraph
{
namespace frontend
{
using namespace paddle::framework;
std::map<paddle::framework::proto::VarType_Type, ngraph::element::Type> TYPE_MAP{
{proto::VarType_Type::VarType_Type_BOOL, ngraph::element::boolean},
{proto::VarType_Type::VarType_Type_INT16, ngraph::element::i16},
{proto::VarType_Type::VarType_Type_INT32, ngraph::element::i32},
{proto::VarType_Type::VarType_Type_INT64, ngraph::element::i64},
{proto::VarType_Type::VarType_Type_FP16, ngraph::element::f16},
{proto::VarType_Type::VarType_Type_FP32, ngraph::element::f32},
{proto::VarType_Type::VarType_Type_FP64, ngraph::element::f64},
{proto::VarType_Type::VarType_Type_UINT8, ngraph::element::u8},
{proto::VarType_Type::VarType_Type_INT8, ngraph::element::i8},
{proto::VarType_Type::VarType_Type_BF16, ngraph::element::bf16}};
std::shared_ptr<Variant>
DecoderPDPDProto::get_attribute(const std::string& name,
const VariantTypeInfo& type_info) const
{
auto attrs = decode_attribute_helper(name);
if (attrs.empty())
{
return nullptr;
}
if (type_info == VariantWrapper<std::string>::type_info)
{
return std::make_shared<VariantWrapper<std::string>>(attrs[0].s());
}
else if (type_info == VariantWrapper<int64_t>::type_info)
{
return std::make_shared<VariantWrapper<int64_t>>(attrs[0].l());
}
else if (type_info == VariantWrapper<std::vector<int64_t>>::type_info)
{
auto longs = std::vector<int64_t>(attrs[0].longs().begin(), attrs[0].longs().end());
return std::make_shared<VariantWrapper<std::vector<int64_t>>>(longs);
}
else if (type_info == VariantWrapper<int32_t>::type_info)
{
return std::make_shared<VariantWrapper<int32_t>>(attrs[0].i());
}
else if (type_info == VariantWrapper<std::vector<int32_t>>::type_info)
{
auto ints = std::vector<int32_t>(attrs[0].ints().begin(), attrs[0].ints().end());
return std::make_shared<VariantWrapper<std::vector<int32_t>>>(ints);
}
else if (type_info == VariantWrapper<float>::type_info)
{
return std::make_shared<VariantWrapper<float>>(attrs[0].f());
}
else if (type_info == VariantWrapper<std::vector<float>>::type_info)
{
auto floats =
std::vector<float>(attrs[0].floats().begin(), attrs[0].floats().end());
return std::make_shared<VariantWrapper<std::vector<float>>>(floats);
}
else if (type_info == VariantWrapper<ngraph::element::Type>::type_info)
{
auto data_type = (paddle::framework::proto::VarType_Type)attrs[0].i();
return std::make_shared<VariantWrapper<ngraph::element::Type>>(TYPE_MAP[data_type]);
}
else if (type_info == VariantWrapper<bool>::type_info)
{
return std::make_shared<VariantWrapper<bool>>(attrs[0].b());
}
// Type is not supported by decoder
return nullptr;
}
std::vector<pdpd::OutPortName> DecoderPDPDProto::get_output_names() const
{
std::vector<std::string> output_names;
for (const auto& output : op_place->getDesc()->outputs())
{
output_names.push_back(output.parameter());
}
return output_names;
}
ngraph::element::Type
DecoderPDPDProto::get_out_port_type(const std::string& port_name) const
{
std::vector<ngraph::element::Type> output_types;
for (const auto& out_port : op_place->getOutputPorts().at(port_name))
{
output_types.push_back(out_port->getTargetTensorPDPD()->getElementType());
}
FRONT_END_GENERAL_CHECK(output_types.size() > 0, "Port has no tensors connected.");
FRONT_END_GENERAL_CHECK(
std::equal(output_types.begin() + 1, output_types.end(), output_types.begin()),
"Port has tensors with different types connected.");
return output_types[0];
}
std::string DecoderPDPDProto::get_op_type() const { return op_place->getDesc()->type(); }
std::vector<proto::OpDesc_Attr>
DecoderPDPDProto::decode_attribute_helper(const std::string& name) const
{
std::vector<proto::OpDesc_Attr> attrs;
for (const auto& attr : op_place->getDesc()->attrs())
{
if (attr.name() == name)
attrs.push_back(attr);
}
FRONT_END_GENERAL_CHECK(attrs.size() <= 1,
"An error occurred while parsing the ",
name,
" attribute of ",
op_place->getDesc()->type(),
"node. Unsupported number of attributes. Current number: ",
attrs.size(),
" Expected number: 0 or 1");
return attrs;
}
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,54 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <algorithm>
#include <chrono>
#include <fstream>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "framework.pb.h"
#include <paddlepaddle_frontend/frontend.hpp>
#include <paddlepaddle_frontend/place.hpp>
#include "node_context.hpp"
#include <ngraph/ngraph.hpp>
namespace ngraph
{
namespace frontend
{
extern std::map<paddle::framework::proto::VarType_Type, ngraph::element::Type> TYPE_MAP;
class DecoderPDPDProto : public pdpd::DecoderBase
{
public:
explicit DecoderPDPDProto(const std::shared_ptr<OpPlacePDPD>& op)
: op_place(op)
{
}
std::shared_ptr<Variant> get_attribute(const std::string& name,
const VariantTypeInfo& type_info) const override;
std::vector<pdpd::OutPortName> get_output_names() const override;
ngraph::element::Type get_out_port_type(const std::string& port_name) const override;
std::string get_op_type() const override;
private:
std::vector<paddle::framework::proto::OpDesc_Attr>
decode_attribute_helper(const std::string& name) const;
std::shared_ptr<OpPlacePDPD> op_place;
};
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,23 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "paddlepaddle_frontend/exceptions.hpp"
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
std::string
OpValidationFailurePDPD::get_error_msg_prefix_pdpd(const pdpd::NodeContext& node)
{
std::stringstream ss;
ss << "While validating node '" << node.get_op_type() << '\'';
return ss.str();
}
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,216 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <algorithm>
#include <chrono>
#include <fstream>
#include <map>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "framework.pb.h"
#include <paddlepaddle_frontend/frontend.hpp>
#include <paddlepaddle_frontend/model.hpp>
#include <paddlepaddle_frontend/place.hpp>
#include <ngraph/ngraph.hpp>
#include <ngraph/opsets/opset7.hpp>
#include <paddlepaddle_frontend/exceptions.hpp>
#include "decoder.hpp"
#include "node_context.hpp"
#include "op_table.hpp"
#include <functional>
#include "frontend_manager/frontend_manager.hpp"
using namespace ngraph::opset7;
using namespace ngraph;
using namespace ngraph::frontend;
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
NamedOutputs make_ng_node(std::map<pdpd::TensorName, Output<Node>>& nodes,
const std::shared_ptr<OpPlacePDPD>& op_place,
const std::map<std::string, CreatorFunction>& CREATORS_MAP)
{
const auto& op = op_place->getDesc();
// std::cout << "Making node: " << op->type() << std::endl;
FRONT_END_OP_CONVERSION_CHECK(CREATORS_MAP.find(op->type()) != CREATORS_MAP.end(),
"No creator found for ",
op->type(),
" node.");
pdpd::NamedInputs named_inputs;
const auto& input_ports = op_place->getInputPorts();
for (const auto& name_to_ports : input_ports)
{
for (const auto& port : name_to_ports.second)
{
const auto& var_desc = port->getSourceTensorPDPD()->getDesc();
if (nodes.count(var_desc->name()))
named_inputs[name_to_ports.first].push_back(nodes.at(var_desc->name()));
else
// return empty map when not all inputs exist. It usually means that
// these nodes are not used because model inputs were overwritten
return NamedOutputs();
}
}
return CREATORS_MAP.at(op->type())(
NodeContext(DecoderPDPDProto(op_place), named_inputs));
}
} // namespace pdpd
std::shared_ptr<Function>
FrontEndPDPD::convert_model(const std::shared_ptr<InputModelPDPD>& model)
{
// std::cout << "Convert Model Start" << std::endl;
std::map<pdpd::TensorName, Output<Node>> nodes_dict(model->getTensorValues());
ParameterVector parameter_nodes;
ResultVector result_nodes;
std::map<std::string, pdpd::CreatorFunction> CREATORS_MAP = pdpd::get_supported_ops();
for (const auto& _inp_place : model->get_inputs())
{
const auto& inp_place = std::dynamic_pointer_cast<TensorPlacePDPD>(_inp_place);
const auto& var = inp_place->getDesc();
const auto& shape = inp_place->getPartialShape();
const auto& type = inp_place->getElementType();
auto param = std::make_shared<Parameter>(type, shape);
param->set_friendly_name(var->name());
nodes_dict[var->name()] = param;
parameter_nodes.push_back(param);
}
const auto& op_places = model->getOpPlaces();
for (const auto& op_place : op_places)
{
const auto& op_type = op_place->getDesc()->type();
if (op_type == "feed" || op_type == "fetch")
{
// inputs and outputs are stored in the model already
continue;
}
else
{
const auto& named_outputs =
pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP);
// set layer name by the name of first output var
if (!named_outputs.empty())
{
const auto& first_output_var = op_place->getOutputPorts()
.begin()
->second.at(0)
->getTargetTensorPDPD()
->getDesc();
auto node = named_outputs.begin()->second[0].get_node_shared_ptr();
node->set_friendly_name(first_output_var->name());
}
const auto& out_ports = op_place->getOutputPorts();
for (const auto& name_to_outputs : named_outputs)
{
const auto& ports = out_ports.at(name_to_outputs.first);
FRONT_END_OP_CONVERSION_CHECK(
ports.size() == name_to_outputs.second.size(),
"The number of output tensors must be equal to "
"the number of outputs of the ngraph node.");
for (size_t idx = 0; idx < ports.size(); ++idx)
{
const auto& var = ports[idx]->getTargetTensorPDPD()->getDesc();
name_to_outputs.second[idx].get_tensor().set_names({var->name()});
// if nodes_dict already has node mapped to this tensor name it usually
// means that it was overwritten using setTensorValue
if (!nodes_dict.count(var->name()))
nodes_dict[var->name()] = name_to_outputs.second[idx];
}
}
}
}
for (const auto& _outp_place : model->get_outputs())
{
const auto& outp_place = std::dynamic_pointer_cast<TensorPlacePDPD>(_outp_place);
auto var = outp_place->getDesc();
auto input_var_name = var->name();
auto result = std::make_shared<Result>(nodes_dict.at(input_var_name));
result->set_friendly_name(input_var_name + "/Result");
result_nodes.push_back(result);
}
return std::make_shared<ngraph::Function>(result_nodes, parameter_nodes);
}
InputModel::Ptr FrontEndPDPD::load_from_file(const std::string& path) const
{
return load_from_files({path});
}
InputModel::Ptr FrontEndPDPD::load_from_files(const std::vector<std::string>& paths) const
{
if (paths.size() == 1)
{
// The case when folder with __model__ and weight files is provided or .pdmodel file
return std::make_shared<InputModelPDPD>(paths[0]);
}
else if (paths.size() == 2)
{
// The case when .pdmodel and .pdparams files are provided
std::ifstream model_stream(paths[0], std::ios::in | std::ifstream::binary);
FRONT_END_INITIALIZATION_CHECK(model_stream && model_stream.is_open(),
"Cannot open model file.");
std::ifstream weights_stream(paths[1], std::ios::in | std::ifstream::binary);
FRONT_END_INITIALIZATION_CHECK(weights_stream && weights_stream.is_open(),
"Cannot open weights file.");
return load_from_streams({&model_stream, &weights_stream});
}
FRONT_END_INITIALIZATION_CHECK(false, "Model can be loaded either from 1 or 2 files");
}
InputModel::Ptr FrontEndPDPD::load_from_stream(std::istream& model_stream) const
{
return load_from_streams({&model_stream});
}
InputModel::Ptr
FrontEndPDPD::load_from_streams(const std::vector<std::istream*>& streams) const
{
return std::make_shared<InputModelPDPD>(streams);
}
std::shared_ptr<ngraph::Function> FrontEndPDPD::convert(InputModel::Ptr model) const
{
auto pdpd_model = std::dynamic_pointer_cast<InputModelPDPD>(model);
auto f = convert_model(pdpd_model);
return f;
}
} // namespace frontend
} // namespace ngraph
extern "C" PDPD_API FrontEndVersion GetAPIVersion()
{
return OV_FRONTEND_API_VERSION;
}
extern "C" PDPD_API void* GetFrontEndData()
{
FrontEndPluginInfo* res = new FrontEndPluginInfo();
res->m_name = "pdpd";
res->m_creator = [](FrontEndCapFlags) { return std::make_shared<FrontEndPDPD>(); };
return res;
}

View File

@ -0,0 +1,437 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <paddlepaddle_frontend/exceptions.hpp>
#include <paddlepaddle_frontend/model.hpp>
#include <paddlepaddle_frontend/place.hpp>
#include <fstream>
#include <ngraph/opsets/opset7.hpp>
#include "decoder.hpp"
#include "framework.pb.h"
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
using namespace paddle::framework::proto;
class InputModelPDPD::InputModelPDPDImpl
{
public:
InputModelPDPDImpl(const std::string& path, const InputModel& input_model);
InputModelPDPDImpl(const std::vector<std::istream*>& streams,
const InputModel& input_model);
std::vector<Place::Ptr> getInputs() const;
std::vector<Place::Ptr> getOutputs() const;
Place::Ptr getPlaceByTensorName(const std::string& tensorName) const;
void overrideAllOutputs(const std::vector<Place::Ptr>& outputs);
void overrideAllInputs(const std::vector<Place::Ptr>& inputs);
void extractSubgraph(const std::vector<Place::Ptr>& inputs,
const std::vector<Place::Ptr>& outputs);
void setDefaultShape(Place::Ptr place, const ngraph::Shape&);
void setPartialShape(Place::Ptr place, const ngraph::PartialShape&);
ngraph::PartialShape getPartialShape(Place::Ptr place) const;
void setElementType(Place::Ptr place, const ngraph::element::Type&);
void setTensorValue(Place::Ptr place, const void* value);
std::vector<uint8_t> readWeight(const std::string& name, int64_t len);
std::vector<std::shared_ptr<OpPlacePDPD>> getOpPlaces() const { return m_op_places; }
std::map<std::string, std::shared_ptr<TensorPlacePDPD>> getVarPlaces() const
{
return m_var_places;
}
std::map<pdpd::TensorName, Output<Node>> getTensorValues() const
{
return m_tensor_values;
};
private:
void loadPlaces();
void loadConsts(std::string folder_with_weights, std::istream* weight_stream);
std::vector<std::shared_ptr<OpPlacePDPD>> m_op_places;
std::map<std::string, std::shared_ptr<TensorPlacePDPD>> m_var_places;
std::shared_ptr<ProgramDesc> m_fw_ptr;
const InputModel& m_input_model;
std::vector<Place::Ptr> m_inputs;
std::vector<Place::Ptr> m_outputs;
std::map<pdpd::TensorName, Output<Node>> m_tensor_values;
};
void InputModelPDPD::InputModelPDPDImpl::loadPlaces()
{
const int cnt_of_blocks = m_fw_ptr->blocks_size();
const auto& blocks = m_fw_ptr->blocks();
for (int block_idx = 0; block_idx < cnt_of_blocks; block_idx++)
{
const auto& block = blocks[block_idx];
for (const auto& var : block.vars())
{
m_var_places[var.name()] = std::make_shared<TensorPlacePDPD>(
m_input_model, std::make_shared<VarDesc>(var));
}
for (const auto& op : block.ops())
{
auto op_place =
std::make_shared<OpPlacePDPD>(m_input_model, std::make_shared<OpDesc>(op));
m_op_places.push_back(op_place);
for (const auto& output : op.outputs())
{
for (const auto& var_name : output.arguments())
{
auto out_port = std::make_shared<OutPortPlacePDPD>(m_input_model);
// connect out_port and tensor
const auto& tensor = m_var_places.at(var_name);
tensor->addProducingPort(out_port);
out_port->setTargetTensor(tensor);
// connect out_port and op
op_place->addOutPort(out_port, output.parameter());
out_port->setOp(op_place);
}
}
for (const auto& input : op.inputs())
{
for (const auto& var_name : input.arguments())
{
auto in_port = std::make_shared<InPortPlacePDPD>(m_input_model);
// connect in_port and tensor
const auto& tensor = m_var_places.at(var_name);
tensor->addConsumingPort(in_port);
in_port->setSourceTensor(tensor);
// connect in_port and op
op_place->addInPort(in_port, input.parameter());
in_port->setOp(op_place);
}
}
// Determine outputs and inputs
if (op.type() == "feed")
{
const auto& place = op_place->getOutputPortPDPD("Out", 0);
const auto& var_place = std::dynamic_pointer_cast<TensorPlacePDPD>(
place->getTargetTensorPDPD());
const auto& tensor_desc =
var_place->getDesc()->type().lod_tensor().tensor();
const auto& dims = tensor_desc.dims();
var_place->setElementType(TYPE_MAP[tensor_desc.data_type()]);
var_place->setPartialShape(
PartialShape(std::vector<Dimension>(dims.begin(), dims.end())));
m_inputs.push_back(var_place);
}
else if (op.type() == "fetch")
{
auto place = op_place->getInputPortPDPD("X", 0);
m_outputs.push_back(place->getSourceTensorPDPD());
}
}
}
}
namespace pdpd
{
bool endsWith(const std::string& str, const std::string& suffix)
{
if (str.length() >= suffix.length())
{
return (0 ==
str.compare(str.length() - suffix.length(), suffix.length(), suffix));
}
return false;
}
void read_tensor(std::istream& is, char* data, size_t len)
{
std::vector<char> header(16);
is.read(&header[0], 16);
uint32_t dims_len = 0;
is.read(reinterpret_cast<char*>(&dims_len), 4);
std::vector<char> dims_struct(dims_len);
is.read(&dims_struct[0], dims_len);
is.read(data, len);
}
} // namespace pdpd
void InputModelPDPD::InputModelPDPDImpl::loadConsts(std::string folder_with_weights,
std::istream* weight_stream)
{
for (const auto& item : m_var_places)
{
const auto& var_desc = item.second->getDesc();
const auto& name = item.first;
if (pdpd::endsWith(name, "feed") || pdpd::endsWith(name, "fetch"))
continue;
if (!var_desc->persistable())
continue;
FRONT_END_GENERAL_CHECK(var_desc->type().type() ==
paddle::framework::proto::VarType::LOD_TENSOR);
const auto& tensor = var_desc->type().lod_tensor().tensor();
Shape shape(tensor.dims().cbegin(), tensor.dims().cend());
const auto& type = TYPE_MAP[tensor.data_type()];
const auto& data_length = shape_size(shape) * type.size();
std::vector<uint8_t> tensor_data(data_length);
if (weight_stream)
{
pdpd::read_tensor(
*weight_stream, reinterpret_cast<char*>(&tensor_data[0]), data_length);
}
else if (!folder_with_weights.empty())
{
std::ifstream is(folder_with_weights + "/" + name,
std::ios::in | std::ifstream::binary);
FRONT_END_GENERAL_CHECK(is && is.is_open(),
"Cannot open file for constant value.");
pdpd::read_tensor(is, reinterpret_cast<char*>(&tensor_data[0]), data_length);
}
else
{
FRONT_END_GENERAL_CHECK(
false, "Either folder with weights or stream must be provided.");
}
auto const_node = opset7::Constant::create(type, shape, &tensor_data[0]);
const_node->set_friendly_name(name);
m_tensor_values[name] = const_node;
}
}
InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::string& path,
const InputModel& input_model)
: m_fw_ptr{std::make_shared<ProgramDesc>()}
, m_input_model(input_model)
{
std::string ext = ".pdmodel";
std::string model_file(path);
std::unique_ptr<std::ifstream> weights_stream;
if (model_file.length() >= ext.length() &&
(0 == model_file.compare(model_file.length() - ext.length(), ext.length(), ext)))
{
std::string weights_file(path);
weights_file.replace(weights_file.size() - ext.size(), ext.size(), ".pdiparams");
weights_stream = std::unique_ptr<std::ifstream>(
new std::ifstream(weights_file, std::ios::binary));
// Don't throw error if file isn't opened
// It may mean that model don't have constants
}
else
{
model_file += "/__model__";
}
std::ifstream pb_stream(model_file, std::ios::binary);
FRONT_END_GENERAL_CHECK(m_fw_ptr->ParseFromIstream(&pb_stream),
"Model can't be parsed");
loadPlaces();
loadConsts(weights_stream ? "" : path, weights_stream.get());
}
InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(
const std::vector<std::istream*>& streams, const InputModel& input_model)
: m_fw_ptr{std::make_shared<ProgramDesc>()}
, m_input_model(input_model)
{
if (streams.size() != 1)
{
FRONT_END_GENERAL_CHECK(
streams.size() == 2,
"Two streams are needed to load a model: model and weights streams");
}
FRONT_END_GENERAL_CHECK(m_fw_ptr->ParseFromIstream(streams[0]),
"Model can't be parsed");
loadPlaces();
if (streams.size() > 1)
loadConsts("", streams[1]);
}
std::vector<Place::Ptr> InputModelPDPD::InputModelPDPDImpl::getInputs() const
{
return m_inputs;
}
std::vector<Place::Ptr> InputModelPDPD::InputModelPDPDImpl::getOutputs() const
{
return m_outputs;
}
Place::Ptr InputModelPDPD::InputModelPDPDImpl::getPlaceByTensorName(
const std::string& tensorName) const
{
if (m_var_places.count(tensorName))
return m_var_places.at(tensorName);
return nullptr;
}
namespace pdpd
{
std::shared_ptr<TensorPlacePDPD> castToTensorPlace(const Place::Ptr& place)
{
if (auto var_place = std::dynamic_pointer_cast<TensorPlacePDPD>(place))
{
return var_place;
}
else if (auto in_port_place = std::dynamic_pointer_cast<InPortPlacePDPD>(place))
{
return in_port_place->getSourceTensorPDPD();
}
else if (auto out_port_place = std::dynamic_pointer_cast<OutPortPlacePDPD>(place))
{
return out_port_place->getTargetTensorPDPD();
}
FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlacePDPD.");
}
} // namespace pdpd
void InputModelPDPD::InputModelPDPDImpl::overrideAllInputs(
const std::vector<Place::Ptr>& inputs)
{
m_inputs.clear();
for (const auto& inp : inputs)
{
m_inputs.push_back(pdpd::castToTensorPlace(inp));
}
}
void InputModelPDPD::InputModelPDPDImpl::overrideAllOutputs(
const std::vector<Place::Ptr>& outputs)
{
m_outputs.clear();
for (const auto& outp : outputs)
{
m_outputs.push_back(pdpd::castToTensorPlace(outp));
}
}
void InputModelPDPD::InputModelPDPDImpl::extractSubgraph(
const std::vector<Place::Ptr>& inputs, const std::vector<Place::Ptr>& outputs)
{
overrideAllInputs(inputs);
overrideAllOutputs(outputs);
}
void InputModelPDPD::InputModelPDPDImpl::setDefaultShape(Place::Ptr place,
const ngraph::Shape& shape)
{
FRONT_END_NOT_IMPLEMENTED("setDefaultShape");
}
void
InputModelPDPD::InputModelPDPDImpl::setPartialShape(Place::Ptr place,
const ngraph::PartialShape& p_shape)
{
pdpd::castToTensorPlace(place)->setPartialShape(p_shape);
}
ngraph::PartialShape
InputModelPDPD::InputModelPDPDImpl::getPartialShape(Place::Ptr place) const
{
return pdpd::castToTensorPlace(place)->getPartialShape();
}
void InputModelPDPD::InputModelPDPDImpl::setElementType(Place::Ptr place,
const ngraph::element::Type& type)
{
pdpd::castToTensorPlace(place)->setElementType(type);
}
void InputModelPDPD::InputModelPDPDImpl::setTensorValue(Place::Ptr place, const void* value)
{
auto tensor_place = pdpd::castToTensorPlace(place);
auto p_shape = tensor_place->getPartialShape();
auto type = tensor_place->getElementType();
auto constant = opset7::Constant::create(type, p_shape.to_shape(), value);
auto name = tensor_place->get_names()[0];
constant->set_friendly_name(name);
m_tensor_values[name] = constant;
}
InputModelPDPD::InputModelPDPD(const std::string& path)
: _impl{std::make_shared<InputModelPDPDImpl>(path, *this)}
{
}
InputModelPDPD::InputModelPDPD(const std::vector<std::istream*>& streams)
: _impl{std::make_shared<InputModelPDPDImpl>(streams, *this)}
{
}
std::vector<std::shared_ptr<OpPlacePDPD>> InputModelPDPD::getOpPlaces() const
{
return _impl->getOpPlaces();
}
std::map<std::string, std::shared_ptr<TensorPlacePDPD>> InputModelPDPD::getVarPlaces() const
{
return _impl->getVarPlaces();
}
std::map<pdpd::TensorName, Output<Node>> InputModelPDPD::getTensorValues() const
{
return _impl->getTensorValues();
}
std::vector<Place::Ptr> InputModelPDPD::get_inputs() const { return _impl->getInputs(); }
std::vector<Place::Ptr> InputModelPDPD::get_outputs() const { return _impl->getOutputs(); }
Place::Ptr InputModelPDPD::get_place_by_tensor_name(const std::string& tensorName) const
{
return _impl->getPlaceByTensorName(tensorName);
}
void InputModelPDPD::override_all_outputs(const std::vector<Place::Ptr>& outputs)
{
return _impl->overrideAllOutputs(outputs);
}
void InputModelPDPD::override_all_inputs(const std::vector<Place::Ptr>& inputs)
{
return _impl->overrideAllInputs(inputs);
}
void InputModelPDPD::extract_subgraph(const std::vector<Place::Ptr>& inputs,
const std::vector<Place::Ptr>& outputs)
{
return _impl->extractSubgraph(inputs, outputs);
}
void InputModelPDPD::set_partial_shape(Place::Ptr place,
const ngraph::PartialShape& p_shape)
{
return _impl->setPartialShape(place, p_shape);
}
ngraph::PartialShape InputModelPDPD::get_partial_shape(Place::Ptr place) const
{
return _impl->getPartialShape(place);
}
void InputModelPDPD::set_element_type(Place::Ptr place, const ngraph::element::Type& type)
{
return _impl->setElementType(place, type);
}
void InputModelPDPD::set_tensor_value(Place::Ptr place, const void* value)
{
return _impl->setTensorValue(place, value);
}
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,21 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "node_context.hpp"
#define NGRAPH_VARIANT_DEFINITION(TYPE) \
constexpr VariantTypeInfo VariantWrapper<TYPE>::type_info; \
template class ngraph::VariantImpl<TYPE>;
namespace ngraph
{
NGRAPH_VARIANT_DEFINITION(int32_t)
NGRAPH_VARIANT_DEFINITION(std::vector<int32_t>)
NGRAPH_VARIANT_DEFINITION(float)
NGRAPH_VARIANT_DEFINITION(std::vector<float>)
NGRAPH_VARIANT_DEFINITION(bool)
NGRAPH_VARIANT_DEFINITION(ngraph::element::Type)
NGRAPH_VARIANT_DEFINITION(std::vector<int64_t>)
} // namespace ngraph

View File

@ -0,0 +1,182 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <ngraph/variant.hpp>
#include <paddlepaddle_frontend/exceptions.hpp>
#include <paddlepaddle_frontend/utility.hpp>
#define NGRAPH_VARIANT_DECLARATION(TYPE, info) \
template <> \
class VariantWrapper<TYPE> : public VariantImpl<TYPE> \
{ \
public: \
static constexpr VariantTypeInfo type_info{info, 0}; \
const VariantTypeInfo& get_type_info() const override { return type_info; } \
VariantWrapper(const value_type& value) \
: VariantImpl<value_type>(value) \
{ \
} \
}
namespace ngraph
{
NGRAPH_VARIANT_DECLARATION(int32_t, "Variant::int32");
NGRAPH_VARIANT_DECLARATION(std::vector<int32_t>, "Variant::int32_vector");
NGRAPH_VARIANT_DECLARATION(float, "Variant::float");
NGRAPH_VARIANT_DECLARATION(std::vector<float>, "Variant::float_vector");
NGRAPH_VARIANT_DECLARATION(bool, "Variant::bool");
NGRAPH_VARIANT_DECLARATION(ngraph::element::Type, "Variant::element_type");
NGRAPH_VARIANT_DECLARATION(std::vector<int64_t>, "Variant::int64_vector");
namespace frontend
{
namespace pdpd
{
using InPortName = std::string;
using OutPortName = std::string;
using TensorName = std::string;
using NamedOutputs = std::map<OutPortName, OutputVector>;
using NamedInputs = std::map<InPortName, OutputVector>;
class DecoderBase
{
public:
/// \brief Get attribute value by name and requested type
///
/// \param name Attribute name
/// \param type_info Attribute type information
/// \return Shared pointer to appropriate value if it exists, 'nullptr' otherwise
virtual std::shared_ptr<Variant>
get_attribute(const std::string& name,
const VariantTypeInfo& type_info) const = 0;
virtual std::vector<OutPortName> get_output_names() const = 0;
/// \brief Get output port type
///
/// Current API assumes that output port has only one output type.
/// If decoder supports multiple types for specified port, it shall throw general
/// exception
///
/// \param port_name Port name for the node
///
/// \return Type of specified output port
virtual ngraph::element::Type
get_out_port_type(const std::string& port_name) const = 0;
virtual std::string get_op_type() const = 0;
};
/// Keep necessary data for a single node in the original FW graph to facilitate
/// conversion process in the rules code.
class NodeContext
{
const DecoderBase& decoder;
const NamedInputs& name_map;
public:
NodeContext(const DecoderBase& _decoder, const NamedInputs& _name_map)
: decoder(_decoder)
, name_map(_name_map)
{
}
/// Returns node attribute by name. Returns 'def' value if attribute does not exist
template <typename T>
T get_attribute(const std::string& name, const T& def) const
{
auto res = decoder.get_attribute(name, VariantWrapper<T>::type_info);
if (res)
{
auto ret = std::dynamic_pointer_cast<VariantWrapper<T>>(res);
FRONT_END_GENERAL_CHECK(
ret, "Attribute with name '", name, "' has invalid type");
return ret->get();
}
else
{
return def;
}
}
template <typename T>
T get_attribute(const std::string& name) const
{
auto res = decoder.get_attribute(name, VariantWrapper<T>::type_info);
FRONT_END_GENERAL_CHECK(res, "Attribute with name '", name, "' does not exist");
auto ret = std::dynamic_pointer_cast<VariantWrapper<T>>(res);
FRONT_END_GENERAL_CHECK(
ret, "Attribute with name '", name, "' has invalid type");
return ret->get();
}
template <typename T>
bool has_attribute(const std::string& name) const
{
return decoder.get_attribute(name, VariantWrapper<T>::type_info) != nullptr;
}
/// Detects if there is at least one input attached with a given name
bool has_ng_input(const std::string& name) const
{
auto found = name_map.find(name);
if (found != name_map.end())
return !found->second.empty();
return false;
}
/// Returns exactly one input with a given name; throws if there is no inputs or
/// there are more than one input
Output<Node> get_ng_input(const std::string& name) const
{
FRONT_END_GENERAL_CHECK(name_map.at(name).size() == 1);
return name_map.at(name).at(0);
}
/// Returns all inputs with a given name
OutputVector get_ng_inputs(const std::string& name) const
{
return name_map.at(name);
}
std::vector<OutPortName> get_output_names() const
{
return decoder.get_output_names();
}
ngraph::element::Type get_out_port_type(const std::string& port_name) const
{
return decoder.get_out_port_type(port_name);
}
std::string get_op_type() const { return decoder.get_op_type(); }
NamedOutputs default_single_output_mapping(
const std::shared_ptr<Node>& ngraph_node,
const std::vector<OutPortName>& required_pdpd_out_names) const;
};
inline NamedOutputs NodeContext::default_single_output_mapping(
const std::shared_ptr<Node>& ngraph_node,
const std::vector<OutPortName>& required_pdpd_out_names) const
{
NamedOutputs named_outputs;
const auto& ngraph_outputs = ngraph_node->outputs();
const auto& pdpd_op_output_names = this->get_output_names();
FRONT_END_GENERAL_CHECK(ngraph_outputs.size() == 1,
"nGraph node must have exactly one output");
for (const auto& pdpd_name : pdpd_op_output_names)
{
if (std::find(required_pdpd_out_names.begin(),
required_pdpd_out_names.end(),
pdpd_name) != required_pdpd_out_names.end())
named_outputs[pdpd_name] = {ngraph_outputs[0]};
}
return named_outputs;
}
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,25 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "conv2d.hpp"
#include <ngraph/opsets/opset6.hpp>
#include "conv2d_utils.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs conv2d(const NodeContext& node)
{
return conv2d_base<opset6::GroupConvolution, opset6::Convolution>(node);
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,21 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs conv2d(const NodeContext& node_context);
}
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,116 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <ngraph/opsets/opset6.hpp>
#include "conv2d_utils.hpp"
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
ngraph::op::PadType get_auto_pad(const NodeContext& node)
{
// Default value means use explicitly provided padding values.
ngraph::op::PadType pad_type{ngraph::op::PadType::NOTSET};
auto padding_algorithm = node.get_attribute<std::string>("padding_algorithm");
static std::unordered_map<std::string, ngraph::op::PadType> auto_pad_values{
{"VALID", ngraph::op::PadType::VALID},
{"SAME", ngraph::op::PadType::SAME_UPPER},
{"NOTSET", ngraph::op::PadType::NOTSET},
};
const auto pad_val_it = auto_pad_values.find(padding_algorithm);
if (pad_val_it == auto_pad_values.end())
{
pad_type = ngraph::op::PadType::NOTSET;
}
else
{
pad_type = pad_val_it->second;
}
return pad_type;
}
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const NodeContext& node,
const size_t kernel_rank)
{
CoordinateDiff pads(kernel_rank, 0);
auto pads_int32 = node.get_attribute<std::vector<int32_t>>("paddings");
pads = CoordinateDiff{std::begin(pads_int32), std::end(pads_int32)};
CoordinateDiff pads_begin;
CoordinateDiff pads_end;
if (pads.size() == kernel_rank * 2)
{
for (size_t i = 0; i < pads.size(); i++)
{
if (i & 0x01)
{
pads_end.push_back(pads[i]);
}
else
{
pads_begin.push_back(pads[i]);
}
}
return {pads_begin, pads_end};
}
else
{
// No paddings provided or only one side values provided, which means same
// padding at both begin and end of axis.
return {pads, pads};
}
}
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const NodeContext& node)
{
const auto data_rank = node.get_ng_input("Input").get_partial_shape().rank();
PDPD_ASSERT(data_rank.get_length() > 2, "the rank of conv input must > 2");
const auto data_spatial_dims = data_rank.get_length() - 2;
return get_pads(node, data_spatial_dims);
}
std::shared_ptr<Node> get_reshaped_filter(const Output<Node>& filters,
const int32_t groups)
{
auto shape_of_filters = std::make_shared<opset6::ShapeOf>(filters);
auto num_begin = opset6::Constant::create(element::i64, Shape{1}, {0});
auto num_end = opset6::Constant::create(element::i64, Shape{1}, {1});
auto num_node = std::make_shared<opset6::StridedSlice>(shape_of_filters,
num_begin,
num_end,
std::vector<int64_t>{0},
std::vector<int64_t>{0});
auto hw_begin = opset6::Constant::create(element::i64, Shape{1}, {1});
auto hw_end = opset6::Constant::create(element::i64, Shape{1}, {4});
auto filter_hw_node =
std::make_shared<opset6::StridedSlice>(shape_of_filters,
hw_begin,
hw_end,
std::vector<int64_t>{0},
std::vector<int64_t>{0});
auto groups_node = opset6::Constant::create(element::i64, Shape{1}, {groups});
auto grouped_num_node = std::make_shared<opset6::Divide>(num_node, groups_node);
auto target_filter_shape = std::make_shared<opset6::Concat>(
OutputVector{groups_node, grouped_num_node, filter_hw_node}, 0);
return std::make_shared<opset6::Reshape>(filters, target_filter_shape, false);
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,71 @@
// Copyright (C) 2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
ngraph::op::PadType get_auto_pad(const NodeContext& node);
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const NodeContext& node);
std::shared_ptr<Node> get_reshaped_filter(const Output<Node>& filters,
int32_t groups);
template <typename T1, typename T2>
NamedOutputs conv2d_base(const NodeContext& node)
{
auto data = node.get_ng_input("Input");
auto filters = node.get_ng_input("Filter");
const auto strides = node.get_attribute<std::vector<int32_t>>("strides");
const auto dilations = node.get_attribute<std::vector<int32_t>>("dilations");
const auto auto_pad_type = get_auto_pad(node);
const auto paddings = get_pads(node);
const auto pads_begin = paddings.first;
const auto pads_end = paddings.second;
const auto groups = node.get_attribute<int32_t>("groups");
const auto data_format = node.get_attribute<std::string>("data_format");
// TODO Support Other data layout #55423
PDPD_ASSERT(data_format == "NCHW", "conv2d only supports NCHW now");
if (groups > 1)
{
const auto reshaped_filters = get_reshaped_filter(filters, groups);
return node.default_single_output_mapping(
{std::make_shared<T1>(
data,
reshaped_filters,
ngraph::Strides(strides.begin(), strides.end()),
pads_begin,
pads_end,
ngraph::Strides(dilations.begin(), dilations.end()),
auto_pad_type)},
{"Output"});
}
else
{
return node.default_single_output_mapping(
{std::make_shared<T2>(
data,
filters,
ngraph::Strides(strides.begin(), strides.end()),
pads_begin,
pads_end,
ngraph::Strides(dilations.begin(), dilations.end()),
auto_pad_type)},
{"Output"});
}
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,102 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <map>
#include <ngraph/opsets/opset6.hpp>
#include "elementwise_ops.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
template <typename T>
NamedOutputs elementwise_ops(const NodeContext& node)
{
auto x = node.get_ng_input("X");
auto y = node.get_ng_input("Y");
auto axis = node.get_attribute<int>("axis");
PDPD_OP_VALIDATION_CHECK(node,
x.get_partial_shape().rank().is_static(),
"elementwise_ops: X rank must be static!");
PDPD_OP_VALIDATION_CHECK(node,
y.get_partial_shape().rank().is_static(),
"elementwise_ops: Y rank must be static!");
int64_t x_rank = x.get_partial_shape().rank().get_length();
int64_t y_rank = y.get_partial_shape().rank().get_length();
if ((axis == -1) || (axis == x_rank - 1) || (x_rank == y_rank))
{
return node.default_single_output_mapping({std::make_shared<T>(x, y)},
{"Out"});
}
else
{
// This broadcast can be implemented by either ngraph::Reshape or
// ngraph::Broadcast. Since PDPD implicates y_shape is a subsequence of
// x_shape starting from axis, to use ngraph::Reshape like Paddle2ONNX,
// which is more friendly to PnP.
auto broadcast_shape = std::vector<int64_t>(x_rank, 1);
PartialShape y_shape = y.get_partial_shape();
int32_t i = 0;
for (auto it = y_shape.begin(); it != y_shape.end(); ++i, ++it)
broadcast_shape[axis + i] = (*it).get_length();
auto reshape_node =
ngraph::opset6::Constant::create(ngraph::element::i64,
ngraph::Shape{broadcast_shape.size()},
broadcast_shape);
auto y_node =
std::make_shared<ngraph::opset6::Reshape>(y, reshape_node, false);
return node.default_single_output_mapping({std::make_shared<T>(x, y_node)},
{"Out"});
}
}
//
NamedOutputs elementwise_add(const NodeContext& node_context)
{
return elementwise_ops<ngraph::opset6::Add>(node_context);
}
NamedOutputs elementwise_sub(const NodeContext& node_context)
{
return elementwise_ops<ngraph::opset6::Subtract>(node_context);
}
NamedOutputs elementwise_mul(const NodeContext& node_context)
{
return elementwise_ops<ngraph::opset6::Multiply>(node_context);
}
NamedOutputs elementwise_div(const NodeContext& node_context)
{
return elementwise_ops<ngraph::opset6::Divide>(node_context);
}
NamedOutputs elementwise_min(const NodeContext& node_context)
{
return elementwise_ops<ngraph::opset6::Minimum>(node_context);
}
NamedOutputs elementwise_max(const NodeContext& node_context)
{
return elementwise_ops<ngraph::opset6::Maximum>(node_context);
}
NamedOutputs elementwise_pow(const NodeContext& node_context)
{
return elementwise_ops<ngraph::opset6::Power>(node_context);
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs elementwise_add(const NodeContext& node_context);
NamedOutputs elementwise_sub(const NodeContext& node_context);
NamedOutputs elementwise_mul(const NodeContext& node_context);
NamedOutputs elementwise_div(const NodeContext& node_context);
NamedOutputs elementwise_min(const NodeContext& node_context);
NamedOutputs elementwise_max(const NodeContext& node_context);
NamedOutputs elementwise_pow(const NodeContext& node_context);
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,25 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "relu.hpp"
#include <ngraph/opsets/opset6.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs relu(const NodeContext& node)
{
return node.default_single_output_mapping(
{std::make_shared<ngraph::opset6::Relu>(node.get_ng_input("X"))}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,21 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs relu(const NodeContext& node);
}
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,62 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "scale.hpp"
#include <ngraph/builder/make_constant.hpp>
#include <ngraph/opsets/opset6.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs scale(const NodeContext& node)
{
auto data = node.get_ng_input("X");
auto dtype = data.get_element_type();
// Note: PDPD Scale output data_type is the same with input
Output<Node> scale;
Output<Node> bias;
if (node.has_ng_input("ScaleTensor"))
{
auto scale_tensor_node = node.get_ng_input("ScaleTensor");
if (scale_tensor_node.get_element_type() == dtype)
scale = scale_tensor_node;
else
scale = std::make_shared<opset6::Convert>(scale_tensor_node, dtype);
}
else
{
scale = builder::make_constant(
dtype, Shape{1}, node.get_attribute<float>("scale"));
}
bias =
builder::make_constant(dtype, Shape{1}, node.get_attribute<float>("bias"));
auto bias_after_scale = node.get_attribute<bool>("bias_after_scale");
std::shared_ptr<Node> result_node;
if (!bias_after_scale)
{
auto node_add = std::make_shared<ngraph::opset6::Add>(data, bias);
result_node = std::make_shared<ngraph::opset6::Multiply>(node_add, scale);
}
else
{
auto node_multiply =
std::make_shared<ngraph::opset6::Multiply>(data, scale);
result_node = std::make_shared<ngraph::opset6::Add>(node_multiply, bias);
}
return node.default_single_output_mapping({result_node}, {"Out"});
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,20 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs scale(const NodeContext& node);
}
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,80 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "split.hpp"
#include <ngraph/opsets/opset7.hpp>
#include <paddlepaddle_frontend/utility.hpp>
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs split(const NodeContext& node)
{
using namespace ngraph;
using namespace opset7;
const auto& data = node.get_ng_input("X");
Output<Node> axis;
if (node.has_ng_input("AxisTensor"))
{
auto input = node.get_ng_input("AxisTensor");
auto zero_node = Constant::create(element::i32, {1}, {0});
axis = std::make_shared<Reshape>(input, zero_node, false);
}
else
{
auto dim = -1;
if (node.has_attribute<int32_t>("axis"))
{
dim = node.get_attribute<int32_t>("axis");
}
axis = std::make_shared<Constant>(ngraph::element::i32, Shape{}, dim);
}
auto num_or_sections = node.get_attribute<int32_t>("num");
NamedOutputs named_outputs;
std::vector<Output<Node>> split_outputs;
if (num_or_sections == 0)
{
Output<Node> sections_node;
if (node.has_ng_input("SectionsTensorList"))
{
auto inputs = node.get_ng_inputs("SectionsTensorList");
sections_node = std::make_shared<ngraph::opset7::Concat>(inputs, 0);
}
else
{
PDPD_ASSERT(node.has_attribute<std::vector<int32_t>>("sections"),
"split: num==0 && no sections is invalid.");
auto sections = node.get_attribute<std::vector<int32_t>>("sections");
sections_node =
Constant::create(element::i32, {sections.size()}, sections);
}
split_outputs =
std::make_shared<VariadicSplit>(data, axis, sections_node)->outputs();
}
else
{
split_outputs =
std::make_shared<Split>(data, axis, num_or_sections)->outputs();
}
auto out_names = node.get_output_names();
PDPD_ASSERT(out_names.size() == 1, "Unexpected number of outputs");
auto it = std::find(out_names.begin(), out_names.end(), "Out");
PDPD_ASSERT(it != out_names.end(), "Expected output not found");
for (const auto& split_output : split_outputs)
{
named_outputs[*it].push_back(split_output);
}
return named_outputs;
}
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,20 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
namespace op
{
NamedOutputs split(const NodeContext& node);
} // namespace op
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,36 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op/conv2d.hpp"
#include "op/elementwise_ops.hpp"
#include "op/relu.hpp"
#include "op/scale.hpp"
#include "op/split.hpp"
#include "op_table.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
std::map<std::string, CreatorFunction> get_supported_ops()
{
return {{"conv2d", op::conv2d},
{"elementwise_add", op::elementwise_add},
{"elementwise_div", op::elementwise_div},
{"elementwise_max", op::elementwise_max},
{"elementwise_min", op::elementwise_min},
{"elementwise_mul", op::elementwise_mul},
{"elementwise_pow", op::elementwise_pow},
{"elementwise_sub", op::elementwise_sub},
{"relu", op::relu},
{"scale", op::scale},
{"split", op::split}};
};
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,27 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <functional>
#include <map>
#include <string>
#include <ngraph/output_vector.hpp>
#include "node_context.hpp"
namespace ngraph
{
namespace frontend
{
namespace pdpd
{
using CreatorFunction = std::function<NamedOutputs(const NodeContext&)>;
std::map<std::string, CreatorFunction> get_supported_ops();
} // namespace pdpd
} // namespace frontend
} // namespace ngraph

View File

@ -0,0 +1,115 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <paddlepaddle_frontend/place.hpp>
#include "decoder.hpp"
#include "framework.pb.h"
using namespace ngraph;
using namespace frontend;
bool PlacePDPD::is_input() const
{
const auto& model_ins = m_input_model.get_inputs();
const auto cmp = [this](const Place::Ptr& p) { return p.get() == this; };
return std::find_if(model_ins.begin(), model_ins.end(), cmp) != model_ins.end();
}
bool PlacePDPD::is_output() const
{
const auto& model_outs = m_input_model.get_outputs();
const auto cmp = [this](const Place::Ptr& p) { return p.get() == this; };
return std::find_if(model_outs.begin(), model_outs.end(), cmp) != model_outs.end();
}
OpPlacePDPD::OpPlacePDPD(const InputModel& input_model,
const std::vector<std::string>& names,
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc)
: PlacePDPD(input_model, names)
, m_op_desc(op_desc)
{
}
OpPlacePDPD::OpPlacePDPD(const InputModel& input_model,
const std::shared_ptr<paddle::framework::proto::OpDesc>& op_desc)
: OpPlacePDPD(input_model, {}, op_desc)
{
}
TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model,
const std::vector<std::string>& names,
const std::shared_ptr<paddle::framework::proto::VarDesc>& var_desc)
: PlacePDPD(input_model, names)
, m_var_desc(var_desc)
{
const auto& var_type = var_desc->type();
if (var_type.type() == paddle::framework::proto::VarType::LOD_TENSOR)
{
const auto& tensor_desc = var_type.lod_tensor().tensor();
m_type = TYPE_MAP[tensor_desc.data_type()];
m_pshape = PartialShape(
std::vector<Dimension>(tensor_desc.dims().begin(), tensor_desc.dims().end()));
}
}
TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model,
const std::shared_ptr<paddle::framework::proto::VarDesc>& var_desc)
: TensorPlacePDPD(input_model, {var_desc->name()}, var_desc)
{
}
std::vector<Place::Ptr> TensorPlacePDPD::get_consuming_ports() const
{
std::vector<Place::Ptr> consuming_ports;
for (const auto& consuming_port : m_consuming_ports)
{
if (const auto& locked = consuming_port.lock())
{
consuming_ports.push_back(locked);
}
else
{
FRONT_END_THROW("Consuming Port has expired.");
}
}
return consuming_ports;
}
Place::Ptr TensorPlacePDPD::get_producing_port() const
{
FRONT_END_GENERAL_CHECK(m_producing_ports.size() > 1, "Only one producing port is supported.");
if (const auto& producing_port = m_producing_ports[0].lock())
{
return producing_port;
}
FRONT_END_THROW("Producing Port has expired.");
}
std::shared_ptr<TensorPlacePDPD> InPortPlacePDPD::getSourceTensorPDPD() const
{
if (const auto& tensor = m_source_tensor.lock())
{
return tensor;
}
FRONT_END_THROW("Source Tensor has expired.");
}
std::shared_ptr<OpPlacePDPD> InPortPlacePDPD::getOp()
{
if (const auto& op = m_op.lock())
{
return op;
}
FRONT_END_THROW("Operation has expired.");
}
std::shared_ptr<TensorPlacePDPD> OutPortPlacePDPD::getTargetTensorPDPD() const
{
if (const auto& target_tensor = m_target_tensor.lock())
{
return target_tensor;
}
FRONT_END_THROW("Target Tensor has expired.");
}

View File

@ -0,0 +1,205 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
syntax = "proto2";
package paddle.framework.proto;
// Any incompatible changes to ProgramDesc and its dependencies should
// raise the version defined version.h.
//
// Serailization and Deserialization codes should be modified in a way
// that supports old versions following the version and compatibility policy.
message Version { optional int64 version = 1 [ default = 0 ]; }
enum AttrType {
INT = 0;
FLOAT = 1;
STRING = 2;
INTS = 3;
FLOATS = 4;
STRINGS = 5;
BOOLEAN = 6;
BOOLEANS = 7;
BLOCK = 8;
LONG = 9;
BLOCKS = 10;
LONGS = 11;
}
// OpDesc describes an instance of a C++ framework::OperatorBase
// derived class type.
message OpDesc {
message Attr {
required string name = 1;
required AttrType type = 2;
optional int32 i = 3;
optional float f = 4;
optional string s = 5;
repeated int32 ints = 6;
repeated float floats = 7;
repeated string strings = 8;
optional bool b = 10;
repeated bool bools = 11;
optional int32 block_idx = 12;
optional int64 l = 13;
repeated int32 blocks_idx = 14;
repeated int64 longs = 15;
};
message Var {
required string parameter = 1;
repeated string arguments = 2;
};
required string type = 3;
repeated Var inputs = 1;
repeated Var outputs = 2;
repeated Attr attrs = 4;
optional bool is_target = 5 [ default = false ];
};
// OpProto describes a C++ framework::OperatorBase derived class.
message OpProto {
// VarProto describes the C++ type framework::Variable.
message Var {
required string name = 1;
required string comment = 2;
optional bool duplicable = 3 [ default = false ];
optional bool intermediate = 4 [ default = false ];
optional bool dispensable = 5 [ default = false ];
}
// AttrProto describes the C++ type Attribute.
message Attr {
required string name = 1;
required AttrType type = 2;
required string comment = 3;
// If that attribute is generated, it means the Paddle third
// language binding has responsibility to fill that
// attribute. End-User should not set that attribute.
optional bool generated = 4 [ default = false ];
}
required string type = 1;
repeated Var inputs = 2;
repeated Var outputs = 3;
repeated Attr attrs = 4;
required string comment = 5;
}
message VarType {
enum Type {
// Pod Types
BOOL = 0;
INT16 = 1;
INT32 = 2;
INT64 = 3;
FP16 = 4;
FP32 = 5;
FP64 = 6;
// Tensor<size_t> is used in C++.
SIZE_T = 19;
UINT8 = 20;
INT8 = 21;
BF16 = 22;
COMPLEX64 = 23;
COMPLEX128 = 24;
// Other types that may need additional descriptions
LOD_TENSOR = 7;
SELECTED_ROWS = 8;
FEED_MINIBATCH = 9;
FETCH_LIST = 10;
STEP_SCOPES = 11;
LOD_RANK_TABLE = 12;
LOD_TENSOR_ARRAY = 13;
PLACE_LIST = 14;
READER = 15;
// Any runtime decided variable type is raw
// raw variables should manage their own allocations
// in operators like nccl_op
RAW = 17;
TUPLE = 18;
}
required Type type = 1;
message TensorDesc {
// Should only be PODType. Is enforced in C++
required Type data_type = 1;
repeated int64 dims = 2; // [UNK, 640, 480] is saved as [-1, 640, 480]
}
optional TensorDesc selected_rows = 2;
message LoDTensorDesc {
required TensorDesc tensor = 1;
optional int32 lod_level = 2 [ default = 0 ];
}
optional LoDTensorDesc lod_tensor = 3;
message LoDTensorArrayDesc {
required TensorDesc tensor = 1;
optional int32 lod_level = 2 [ default = 0 ];
}
optional LoDTensorArrayDesc tensor_array = 4;
message ReaderDesc { repeated LoDTensorDesc lod_tensor = 1; }
optional ReaderDesc reader = 5;
message Tuple { repeated Type element_type = 1; }
optional Tuple tuple = 7;
}
message VarDesc {
required string name = 1;
required VarType type = 2;
optional bool persistable = 3 [ default = false ];
// True if the variable is an input data and
// have to check the feed data shape and dtype
optional bool need_check_feed = 4 [ default = false ];
}
message BlockDesc {
required int32 idx = 1;
required int32 parent_idx = 2;
repeated VarDesc vars = 3;
repeated OpDesc ops = 4;
optional int32 forward_block_idx = 5 [ default = -1 ];
}
// In some cases, Paddle may perform operator definition iterations,
// and the operator uses OpVersionMap for compatibility testing.
message OpVersion { required int32 version = 1; }
message OpVersionMap {
message OpVersionPair {
required string op_name = 1;
required OpVersion op_version = 2;
}
repeated OpVersionPair pair = 1;
}
// Please refer to
// https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md
// for more details.
// TODO(panyx0718): A model can have multiple programs. Need a
// way to distinguish them. Maybe ID or name?
message ProgramDesc {
reserved 2, 3; // For backward compatibility.
repeated BlockDesc blocks = 1;
optional Version version = 4;
optional OpVersionMap op_version_map = 5;
}

View File

@ -507,7 +507,8 @@ if (NGRAPH_ONNX_EDITOR_ENABLE)
onnx/onnx_import_with_editor.in.cpp)
endif()
# SOURCE AND HEADERS FOR FRONTEND TESTING
# SOURCE FOR FRONTEND TESTING
file(GLOB FRONTEND_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/frontend/frontend_manager.cpp)
set(SRC ${FRONTEND_TESTS_SRC} ${SRC})
@ -515,6 +516,37 @@ file(GLOB FRONTEND_SHARED_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/frontend/shared/
file(GLOB FRONTEND_SHARED_TESTS_HDR ${CMAKE_CURRENT_SOURCE_DIR}/frontend/shared/include/*.hpp)
set(SRC ${FRONTEND_SHARED_TESTS_SRC} ${SRC})
# ---- PaddlePaddle FrontEnd testing ------
if (NGRAPH_PDPD_FRONTEND_ENABLE)
find_package (PythonInterp 3 REQUIRED)
set(PDPD_PYTHON_OK TRUE)
if(NOT PYTHON_EXECUTABLE)
message("Python3 is required to build the PDPD frontend unit tests")
set(PDPD_PYTHON_OK FALSE)
endif()
if (PDPD_PYTHON_OK)
execute_process(
COMMAND ${PYTHON_EXECUTABLE} -m pip show paddlepaddle
RESULT_VARIABLE PIP_EXIT_CODE
OUTPUT_QUIET
)
if (NOT ${PIP_EXIT_CODE} EQUAL 0)
message("Python paddlepaddle package is not installed. Please use \"pip install paddlepaddle==2.0.1\".")
set(PDPD_PYTHON_OK FALSE)
endif()
endif()
if (PDPD_PYTHON_OK)
file(GLOB FRONTEND_PDPD_TESTS_SRC ${CMAKE_CURRENT_SOURCE_DIR}/frontend/paddlepaddle/*.cpp)
set(SRC ${FRONTEND_PDPD_TESTS_SRC} ${SRC})
set(TEST_PDPD_MODELS ${CMAKE_CURRENT_BINARY_DIR}/pdpd_test_models/)
add_definitions("-DTEST_PDPD_MODELS=\"${TEST_PDPD_MODELS}\"")
endif()
endif()
# ---- End PaddlePaddle FrontEnd testing ------
add_clang_format_target(unit-test_clang FOR_SOURCES ${SRC} ${MULTI_TEST_SRC} ${FRONTEND_SHARED_TESTS_HDR})
foreach(BACKEND_NAME ${ACTIVE_BACKEND_LIST})
@ -605,3 +637,26 @@ target_link_libraries(unit-test PRIVATE frontend_manager)
add_subdirectory(frontend)
### END FRONTEND ###
#PaddlePaddle - test models generator
if (NGRAPH_PDPD_FRONTEND_ENABLE AND PDPD_PYTHON_OK)
file(GLOB_RECURSE PDPD_GEN_SCRIPTS ${CMAKE_CURRENT_SOURCE_DIR}/files/paddlepaddle/gen_scripts/generate_*.py)
set(OUT_FILES "")
foreach(GEN_SCRIPT ${PDPD_GEN_SCRIPTS})
get_filename_component(FILE_WE ${GEN_SCRIPT} NAME_WE)
set(OUT_DONE_FILE ${TEST_PDPD_MODELS}/${FILE_WE}_done.txt)
set(OUT_FILES ${OUT_DONE_FILE} ${OUT_FILES})
add_custom_command(OUTPUT ${OUT_DONE_FILE}
COMMAND ${PYTHON_EXECUTABLE}
${CMAKE_CURRENT_SOURCE_DIR}/files/paddlepaddle/gen_wrapper.py
${GEN_SCRIPT}
${TEST_PDPD_MODELS}
${OUT_DONE_FILE}
DEPENDS ${GEN_SCRIPT} ${CMAKE_CURRENT_SOURCE_DIR}/files/paddlepaddle/gen_wrapper.py
)
endforeach()
add_custom_target(pdpd_test_models DEPENDS ${OUT_FILES})
add_dependencies(unit-test pdpd_test_models)
add_dependencies(unit-test paddlepaddle_ngraph_frontend)
endif()

View File

@ -0,0 +1,39 @@
import paddle
from paddle import fluid
import numpy as np
import os
import sys
paddle.enable_static()
inp_blob1 = np.random.randn(1, 1, 3, 3).astype(np.float32)
inp_blob2 = np.random.randn(1, 2, 3, 3).astype(np.float32)
x1 = fluid.data(name='inputX1', shape=[1, 1, 3, 3], dtype='float32')
x2 = fluid.data(name='inputX2', shape=[1, 2, 3, 3], dtype='float32')
conv2d1 = fluid.layers.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1")
conv2d2 = fluid.layers.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2")
add1 = fluid.layers.elementwise_add(conv2d1, conv2d2, name="add1")
relu2a = fluid.layers.relu(add1, name="relu2a")
relu2b = fluid.layers.relu(add1, name="relu2b")
add2 = fluid.layers.elementwise_add(relu2a, relu2b, name="add2")
relu3a = fluid.layers.relu(add2, name="relu3a")
relu3b = fluid.layers.relu(add2, name="relu3b")
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
inp_dict = {'inputX1': inp_blob1, 'inputX2': inp_blob2}
var = [relu3a, relu3b]
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
fluid.io.save_inference_model(os.path.join(sys.argv[1], "2in_2out"), list(inp_dict.keys()), var, exe,
model_filename="2in_2out.pdmodel", params_filename="2in_2out.pdiparams")

View File

@ -0,0 +1,39 @@
import paddle
from paddle import fluid
import numpy as np
import os
import sys
paddle.enable_static()
inp_blob1 = np.random.randn(1, 1, 3, 3).astype(np.float32)
inp_blob2 = np.random.randn(1, 2, 3, 3).astype(np.float32)
x1 = fluid.data(name='inputX1', shape=[-1, 1, 3, 3], dtype='float32')
x2 = fluid.data(name='inputX2', shape=[-1, 2, 3, 3], dtype='float32')
conv2d1 = fluid.layers.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1")
conv2d2 = fluid.layers.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2")
add1 = fluid.layers.elementwise_add(conv2d1, conv2d2, name="add1")
relu2a = fluid.layers.relu(add1, name="relu2a")
relu2b = fluid.layers.relu(add1, name="relu2b")
add2 = fluid.layers.elementwise_add(relu2a, relu2b, name="add2")
relu3a = fluid.layers.relu(add2, name="relu3a")
relu3b = fluid.layers.relu(add2, name="relu3b")
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
inp_dict = {'inputX1': inp_blob1, 'inputX2': inp_blob2}
var = [relu3a, relu3b]
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
fluid.io.save_inference_model(os.path.join(sys.argv[1], "2in_2out_dynbatch"), list(inp_dict.keys()), var, exe,
model_filename="2in_2out_dynbatch.pdmodel", params_filename="2in_2out_dynbatch.pdiparams")

View File

@ -0,0 +1,22 @@
import paddle
from paddle import fluid
import numpy as np
import os
import sys
paddle.enable_static()
inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32)
x = fluid.data(name='x', shape=[1, 3, 4, 4], dtype='float32')
test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1),
dilation=(1, 1), groups=1, bias_attr=False)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
inp_dict = {'x': inp_blob}
var = [test_layer]
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d"), list(inp_dict.keys()), var, exe)

View File

@ -0,0 +1,25 @@
import paddle
from paddle import fluid
import numpy as np
import os
import sys
paddle.enable_static()
inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32)
x = fluid.data(name='xxx', shape=[1, 3, 4, 4], dtype='float32')
test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1),
dilation=(1, 1), groups=1, bias_attr=False)
relu = fluid.layers.relu(test_layer)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
inp_dict = {'xxx': inp_blob}
var = [relu]
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_relu"), list(inp_dict.keys()), var, exe,
model_filename="conv2d_relu.pdmodel", params_filename="conv2d_relu.pdiparams")

View File

@ -0,0 +1,22 @@
import paddle
from paddle import fluid
import numpy as np
import os
import sys
paddle.enable_static()
inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32)
x = fluid.data(name='x', shape=[1, 3, 4, 4], dtype='float32')
test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1),
dilation=(1, 1), groups=1, bias_attr=False)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
inp_dict = {'x': inp_blob}
var = [test_layer]
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_s"), list(inp_dict.keys()), var, exe,
model_filename="conv2d.pdmodel", params_filename="conv2d.pdiparams")

View File

@ -0,0 +1,39 @@
import paddle
from paddle import fluid
import numpy as np
import sys
import os
# it's better to use PYTHON_PATH
# import sys
# sys.path.append('/home/itikhonov/OpenVINO/openvino/bin/intel64/Debug/lib/python_api/python3.6/')
# from openvino.inference_engine import IECore
def create_multi_output_model():
paddle.enable_static()
# PDPD model creation and inference
num_splits = 20
inp_blob_1 = np.random.randn(2, num_splits, 4, 4).astype(np.float32)
x = fluid.data(name='x', shape=[2, num_splits, 4, 4], dtype='float32')
test_layer = fluid.layers.split(x, num_or_sections=num_splits, dim=1)
var = []
for i in range(num_splits//2):
add = fluid.layers.elementwise_add(test_layer[2*i], test_layer[2*i+1])
var.append(add)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
inp_dict = {'x': inp_blob_1}
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
fluid.io.save_inference_model(os.path.join(sys.argv[1], "multi_tensor_split"),
list(inp_dict.keys()), var, exe,
model_filename="multi_tensor_split.pdmodel",
params_filename="multi_tensor_split.pdiparams")
create_multi_output_model()

View File

@ -0,0 +1,38 @@
#
# relu paddle model generator
#
import numpy as np
from save_model import saveModel
import sys
def relu(name: str, x):
import paddle as pdpd
pdpd.enable_static()
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
out = pdpd.nn.functional.relu(node_x)
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out],
inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([-2, 0, 1]).astype('float32')
relu("relu", data)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,80 @@
import os
import numpy as np
import paddle as pdpd
#print numpy array like C structure
def print_alike(arr):
shape = arr.shape
rank = len(shape)
#print("shape: ", shape, "rank: %d" %(rank))
#for idx, value in np.ndenumerate(arr):
# print(idx, value)
def print_array(arr, end=' '):
shape = arr.shape
rank = len(arr.shape)
if rank > 1:
line = "{"
for i in range(arr.shape[0]):
line += print_array(arr[i,:], end="},\n" if i < arr.shape[0]-1 else "}")
line += end
return line
else:
line = "{"
for i in range(arr.shape[0]):
line += "{:.2f}".format(arr[i]) #str(arr[i])
line += ", " if i < shape[0]-1 else ' '
line += end
#print(line)
return line
print(print_array(arr, "}"))
def saveModel(name, exe, feedkeys:list, fetchlist:list, inputs:list, outputs:list, target_dir:str):
model_dir = os.path.join(target_dir, name)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
print("\n\n------------- %s -----------\n" % (name))
for i, input in enumerate(inputs):
print("INPUT %s :" % (feedkeys[i]), input.shape, input.dtype, "\n")
print_alike(input)
np.save(os.path.join(model_dir, "input{}".format(i)), input)
np.save(os.path.join(model_dir, "input{}.{}.{}".format(i, feedkeys[i], input.dtype)), input)
print("\n")
for i, output in enumerate(outputs):
print("OUTPUT %s :" % (fetchlist[i]),output.shape, output.dtype, "\n")
print_alike(output)
np.save(os.path.join(model_dir, "output{}".format(i)), output)
# composited model + scattered model
pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe)
pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name+".pdmodel", params_filename=name+".pdiparams")
if __name__ == "__main__":
np.set_printoptions(precision=2)
np.set_printoptions(suppress=True)
#x = np.random.randn(2,3).astype(np.float32)
x = np.array([[[
[1, 2, 3],
[4, 5, 6]
],
[
[1, 2, 3],
[4, 5, 6]
]],
[[
[1, 2, 3],
[4, 5, 6]
],
[
[1, 2, 3],
[4, 5, 6]
]]]).astype(np.float32)
print_alike(x)

View File

@ -0,0 +1,20 @@
import os
import subprocess
import sys
print(sys.argv)
if len(sys.argv) < 4:
print("Script, output folder and mark file must be specified as arguments")
exit(1)
gen_script = sys.argv[1]
out_folder = sys.argv[2]
mark_file = sys.argv[3]
print("Processing: {} ".format(gen_script))
subprocess.run([sys.executable, gen_script, out_folder], env=os.environ)
# Create mark file indicating that script was executed
with open(mark_file, "w") as fp:
pass

View File

@ -0,0 +1,28 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "../shared/include/basic_api.hpp"
using namespace ngraph;
using namespace ngraph::frontend;
static const std::string PDPD = "pdpd";
using PDPDBasicTest = FrontEndBasicTest;
static const std::vector<std::string> models{
std::string("conv2d"),
std::string("conv2d_s/conv2d.pdmodel"),
std::string("conv2d_relu/conv2d_relu.pdmodel"),
std::string("2in_2out/2in_2out.pdmodel"),
std::string("multi_tensor_split/multi_tensor_split.pdmodel"),
std::string("2in_2out_dynbatch/2in_2out_dynbatch.pdmodel"),
};
INSTANTIATE_TEST_SUITE_P(PDPDBasicTest,
FrontEndBasicTest,
::testing::Combine(::testing::Values(PDPD),
::testing::Values(std::string(TEST_PDPD_MODELS)),
::testing::ValuesIn(models)),
FrontEndBasicTest::getTestCaseName);

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "../shared/include/cut_specific_model.hpp"
using namespace ngraph;
using namespace ngraph::frontend;
static const auto PDPD = "pdpd";
using PDPDCutTest = FrontEndCutModelTest;
static CutModelParam getTestData_2in_2out()
{
CutModelParam res;
res.m_frontEndName = PDPD;
res.m_modelsPath = std::string(TEST_PDPD_MODELS);
res.m_modelName = "2in_2out/2in_2out.pdmodel";
res.m_oldInputs = {"inputX1", "inputX2"};
res.m_newInputs = {"add1.tmp_0"};
res.m_oldOutputs = {"save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"};
res.m_newOutputs = {"add2.tmp_0"};
res.m_tensorValueName = "conv2dX2.tmp_0";
res.m_tensorValue = {1, 2, 3, 4, 5, 6, 7, 8, 9};
res.m_op_before_name = "conv2dX2.tmp_0";
return res;
}
INSTANTIATE_TEST_SUITE_P(PDPDCutTest,
FrontEndCutModelTest,
::testing::Values(getTestData_2in_2out()),
FrontEndCutModelTest::getTestCaseName);

View File

@ -0,0 +1,29 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "../shared/include/load_from.hpp"
using namespace ngraph;
using namespace ngraph::frontend;
static const auto PDPD = "pdpd";
using PDPDCutTest = FrontEndLoadFromTest;
static LoadFromFEParam getTestData()
{
LoadFromFEParam res;
res.m_frontEndName = PDPD;
res.m_modelsPath = std::string(TEST_PDPD_MODELS);
res.m_file = "conv2d";
res.m_files = {"2in_2out/2in_2out.pdmodel", "2in_2out/2in_2out.pdiparams"};
res.m_stream = "relu/relu.pdmodel";
res.m_streams = {"2in_2out/2in_2out.pdmodel", "2in_2out/2in_2out.pdiparams"};
return res;
}
INSTANTIATE_TEST_SUITE_P(PDPDCutTest,
FrontEndLoadFromTest,
::testing::Values(getTestData()),
FrontEndLoadFromTest::getTestCaseName);

View File

@ -0,0 +1,74 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "../shared/include/partial_shape.hpp"
using namespace ngraph;
using namespace ngraph::frontend;
static const auto PDPD = "pdpd";
using PDPDPartialShapeTest = FrontEndPartialShapeTest;
static PartShape getTestShape_2in_2out()
{
PartShape res;
res.m_modelName = "2in_2out/2in_2out.pdmodel";
res.m_tensorName = "inputX1";
res.m_oldPartialShape = PartialShape{1, 1, 3, 3};
res.m_newPartialShape = PartialShape{2, 1, 3, 3};
return res;
}
static PartShape getTestShape_2in_2out_dynbatch()
{
PartShape res;
res.m_modelName = "2in_2out_dynbatch/2in_2out_dynbatch.pdmodel";
res.m_tensorName = "inputX1";
res.m_oldPartialShape = PartialShape{Dimension::dynamic(), 1, 3, 3};
res.m_newPartialShape = PartialShape{2, 1, 3, 3};
return res;
}
static PartShape getTestShape_conv2d()
{
PartShape res;
res.m_modelName = "conv2d_s/conv2d.pdmodel";
res.m_tensorName = "x";
res.m_oldPartialShape = PartialShape{1, 3, 4, 4};
res.m_newPartialShape = PartialShape{1, 3, 8, 8};
return res;
}
static PartShape getTestShape_conv2d_setDynamicBatch()
{
PartShape res;
res.m_modelName = "conv2d_s/conv2d.pdmodel";
res.m_tensorName = "x";
res.m_oldPartialShape = PartialShape{1, 3, 4, 4};
res.m_newPartialShape = PartialShape{Dimension::dynamic(), 3, 8, 8};
return res;
}
static PartShape getTestShape_conv2d_relu()
{
PartShape res;
res.m_modelName = "conv2d_relu/conv2d_relu.pdmodel";
res.m_tensorName = "xxx";
res.m_oldPartialShape = PartialShape{1, 3, 4, 4};
res.m_newPartialShape = PartialShape{5, 3, 5, 5};
return res;
}
INSTANTIATE_TEST_SUITE_P(PDPDPartialShapeTest,
FrontEndPartialShapeTest,
::testing::Combine(::testing::Values(BaseFEParam{
PDPD, std::string(TEST_PDPD_MODELS)}),
::testing::ValuesIn(std::vector<PartShape>{
getTestShape_2in_2out(),
getTestShape_conv2d_relu(),
getTestShape_conv2d(),
getTestShape_conv2d_setDynamicBatch(),
getTestShape_2in_2out_dynbatch()})),
FrontEndPartialShapeTest::getTestCaseName);

View File

@ -0,0 +1,26 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "../shared/include/set_element_type.hpp"
using namespace ngraph;
using namespace ngraph::frontend;
static const auto PDPD = "pdpd";
using PDPDCutTest = FrontEndElementTypeTest;
static SetTypeFEParam getTestData_relu()
{
SetTypeFEParam res;
res.m_frontEndName = PDPD;
res.m_modelsPath = std::string(TEST_PDPD_MODELS);
res.m_modelName = "relu/relu.pdmodel";
return res;
}
INSTANTIATE_TEST_SUITE_P(PDPDCutTest,
FrontEndElementTypeTest,
::testing::Values(getTestData_relu()),
FrontEndElementTypeTest::getTestCaseName);

View File

@ -0,0 +1,35 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <frontend_manager/frontend_manager.hpp>
#include <gtest/gtest.h>
struct LoadFromFEParam
{
std::string m_frontEndName;
std::string m_modelsPath;
std::string m_file;
std::vector<std::string> m_files;
std::string m_stream;
std::vector<std::string> m_streams;
};
class FrontEndLoadFromTest : public ::testing::TestWithParam<LoadFromFEParam>
{
public:
LoadFromFEParam m_param;
ngraph::frontend::FrontEndManager m_fem;
ngraph::frontend::FrontEnd::Ptr m_frontEnd;
ngraph::frontend::InputModel::Ptr m_inputModel;
static std::string getTestCaseName(const testing::TestParamInfo<LoadFromFEParam>& obj);
void SetUp() override;
protected:
void initParamTest();
};

View File

@ -26,8 +26,8 @@ struct PartShape
{
std::string m_modelName;
std::string m_tensorName;
std::vector<size_t> m_oldPartialShape;
std::vector<size_t> m_newPartialShape;
ngraph::PartialShape m_oldPartialShape;
ngraph::PartialShape m_newPartialShape;
};
using PartialShapeParam = std::tuple<BaseFEParam, PartShape>;

View File

@ -0,0 +1,33 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <frontend_manager/frontend_manager.hpp>
#include <gtest/gtest.h>
struct SetTypeFEParam
{
std::string m_frontEndName;
std::string m_modelsPath;
std::string m_modelName;
};
class FrontEndElementTypeTest : public ::testing::TestWithParam<SetTypeFEParam>
{
public:
SetTypeFEParam m_param;
ngraph::frontend::FrontEndManager m_fem;
ngraph::frontend::FrontEnd::Ptr m_frontEnd;
ngraph::frontend::InputModel::Ptr m_inputModel;
static std::string getTestCaseName(const testing::TestParamInfo<SetTypeFEParam>& obj);
void SetUp() override;
protected:
void initParamTest();
void doLoadFromFile();
};

View File

@ -5,6 +5,8 @@
#pragma once
#include <string>
#include "backend.hpp"
#include "ngraph/file_util.hpp"
// Helper functions
namespace FrontEndTestUtils
@ -27,4 +29,21 @@ namespace FrontEndTestUtils
}
return res;
}
inline int set_test_env(const char* name, const char* value)
{
#ifdef _WIN32
return _putenv_s(name, value);
#elif defined(__linux) || defined(__APPLE__)
std::string var = std::string(name) + "=" + value;
return setenv(name, value, 0);
#endif
}
inline void setupTestEnv()
{
std::string fePath = ngraph::file_util::get_directory(
ngraph::runtime::Backend::get_backend_shared_library_search_directory());
set_test_env("OV_FRONTEND_PATH", fePath.c_str());
}
} // namespace FrontEndTestUtils

View File

@ -17,13 +17,15 @@ std::string FrontEndBasicTest::getTestCaseName(const testing::TestParamInfo<Basi
void FrontEndBasicTest::SetUp()
{
FrontEndTestUtils::setupTestEnv();
m_fem = FrontEndManager(); // re-initialize after setting up environment
initParamTest();
}
void FrontEndBasicTest::initParamTest()
{
std::tie(m_feName, m_pathToModels, m_modelFile) = GetParam();
m_modelFile = std::string(TEST_FILES) + m_pathToModels + m_modelFile;
m_modelFile = m_pathToModels + m_modelFile;
}
void FrontEndBasicTest::doLoadFromFile()

View File

@ -26,14 +26,15 @@ std::string FrontEndCutModelTest::getTestCaseName(const testing::TestParamInfo<C
void FrontEndCutModelTest::SetUp()
{
FrontEndTestUtils::setupTestEnv();
m_fem = FrontEndManager(); // re-initialize after setting up environment
initParamTest();
}
void FrontEndCutModelTest::initParamTest()
{
m_param = GetParam();
m_param.m_modelName = std::string(TEST_FILES) + m_param.m_modelsPath + m_param.m_modelName;
std::cout << "Model: " << m_param.m_modelName << std::endl;
m_param.m_modelName = m_param.m_modelsPath + m_param.m_modelName;
}
void FrontEndCutModelTest::doLoadFromFile()

View File

@ -0,0 +1,106 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "../include/load_from.hpp"
#include <fstream>
#include "../include/utils.hpp"
using namespace ngraph;
using namespace ngraph::frontend;
std::string
FrontEndLoadFromTest::getTestCaseName(const testing::TestParamInfo<LoadFromFEParam>& obj)
{
std::string res = obj.param.m_frontEndName;
return FrontEndTestUtils::fileToTestName(res);
}
void FrontEndLoadFromTest::SetUp()
{
FrontEndTestUtils::setupTestEnv();
m_fem = FrontEndManager(); // re-initialize after setting up environment
m_param = GetParam();
}
///////////////////////////////////////////////////////////////////
TEST_P(FrontEndLoadFromTest, testLoadFromFile)
{
std::vector<std::string> frontends;
FrontEnd::Ptr fe;
ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends());
ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName));
ASSERT_NE(m_frontEnd, nullptr);
ASSERT_NO_THROW(m_inputModel =
m_frontEnd->load_from_file(m_param.m_modelsPath + m_param.m_file));
ASSERT_NE(m_inputModel, nullptr);
std::shared_ptr<ngraph::Function> function;
ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel));
ASSERT_NE(function, nullptr);
}
TEST_P(FrontEndLoadFromTest, testLoadFromFiles)
{
std::vector<std::string> frontends;
FrontEnd::Ptr fe;
ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends());
ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName));
ASSERT_NE(m_frontEnd, nullptr);
auto dir_files = m_param.m_files;
for (auto& file : dir_files)
{
file = m_param.m_modelsPath + file;
}
ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_files(dir_files));
ASSERT_NE(m_inputModel, nullptr);
std::shared_ptr<ngraph::Function> function;
function = m_frontEnd->convert(m_inputModel);
ASSERT_NE(function, nullptr);
}
TEST_P(FrontEndLoadFromTest, testLoadFromStream)
{
std::vector<std::string> frontends;
FrontEnd::Ptr fe;
ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends());
ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName));
ASSERT_NE(m_frontEnd, nullptr);
std::ifstream is(m_param.m_modelsPath + m_param.m_stream, std::ios::in | std::ifstream::binary);
ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_stream(is));
ASSERT_NE(m_inputModel, nullptr);
std::shared_ptr<ngraph::Function> function;
ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel));
ASSERT_NE(function, nullptr);
}
TEST_P(FrontEndLoadFromTest, testLoadFromStreams)
{
std::vector<std::string> frontends;
FrontEnd::Ptr fe;
ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends());
ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName));
ASSERT_NE(m_frontEnd, nullptr);
std::vector<std::shared_ptr<std::ifstream>> is_vec;
std::vector<std::istream*> is_ptr_vec;
for (auto& file : m_param.m_streams)
{
is_vec.push_back(std::make_shared<std::ifstream>(m_param.m_modelsPath + file,
std::ios::in | std::ifstream::binary));
is_ptr_vec.push_back(is_vec.back().get());
}
ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_streams(is_ptr_vec));
ASSERT_NE(m_inputModel, nullptr);
std::shared_ptr<ngraph::Function> function;
ASSERT_NO_THROW(function = m_frontEnd->convert(m_inputModel));
ASSERT_NE(function, nullptr);
}

View File

@ -17,22 +17,22 @@ std::string
std::string res = base.m_frontEndName + "_" + part.m_modelName + "_" + part.m_tensorName;
for (auto s : part.m_newPartialShape)
{
res += "_" + std::to_string(s);
res += "_" + (s.is_dynamic() ? "dyn" : std::to_string(s.get_length()));
}
return FrontEndTestUtils::fileToTestName(res);
}
void FrontEndPartialShapeTest::SetUp()
{
FrontEndTestUtils::setupTestEnv();
m_fem = FrontEndManager(); // re-initialize after setting up environment
initParamTest();
}
void FrontEndPartialShapeTest::initParamTest()
{
std::tie(m_baseParam, m_partShape) = GetParam();
m_partShape.m_modelName =
std::string(TEST_FILES) + m_baseParam.m_modelsPath + m_partShape.m_modelName;
std::cout << "Model: " << m_partShape.m_modelName << std::endl;
m_partShape.m_modelName = m_baseParam.m_modelsPath + m_partShape.m_modelName;
}
void FrontEndPartialShapeTest::doLoadFromFile()
@ -59,12 +59,8 @@ TEST_P(FrontEndPartialShapeTest, testCheckOldPartialShape)
return node->get_friendly_name().find(m_partShape.m_tensorName) != std::string::npos;
});
ASSERT_NE(it, ops.end());
auto shape = (*it)->get_output_partial_shape(0).get_shape();
ASSERT_EQ(shape.size(), m_partShape.m_oldPartialShape.size());
for (std::size_t i = 0; i < shape.size(); i++)
{
EXPECT_EQ(shape.at(i), m_partShape.m_oldPartialShape.at(i));
}
auto shape = (*it)->get_output_partial_shape(0);
ASSERT_EQ(shape, m_partShape.m_oldPartialShape);
}
TEST_P(FrontEndPartialShapeTest, testSetNewPartialShape)
@ -83,10 +79,6 @@ TEST_P(FrontEndPartialShapeTest, testSetNewPartialShape)
return node->get_friendly_name().find(m_partShape.m_tensorName) != std::string::npos;
});
ASSERT_NE(it, ops.end());
auto shape = (*it)->get_output_partial_shape(0).get_shape();
ASSERT_EQ(shape.size(), m_partShape.m_newPartialShape.size());
for (std::size_t i = 0; i < shape.size(); i++)
{
EXPECT_EQ(shape.at(i), m_partShape.m_newPartialShape.at(i));
}
auto shape = (*it)->get_output_partial_shape(0);
ASSERT_EQ(shape, m_partShape.m_newPartialShape);
}

View File

@ -0,0 +1,62 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "../include/set_element_type.hpp"
#include "../include/utils.hpp"
using namespace ngraph;
using namespace ngraph::frontend;
std::string
FrontEndElementTypeTest::getTestCaseName(const testing::TestParamInfo<SetTypeFEParam>& obj)
{
std::string res = obj.param.m_frontEndName + "_" + obj.param.m_modelName;
return FrontEndTestUtils::fileToTestName(res);
}
void FrontEndElementTypeTest::SetUp()
{
FrontEndTestUtils::setupTestEnv();
m_fem = FrontEndManager(); // re-initialize after setting up environment
initParamTest();
}
void FrontEndElementTypeTest::initParamTest()
{
m_param = GetParam();
m_param.m_modelName = m_param.m_modelsPath + m_param.m_modelName;
}
void FrontEndElementTypeTest::doLoadFromFile()
{
std::vector<std::string> frontends;
FrontEnd::Ptr fe;
ASSERT_NO_THROW(frontends = m_fem.get_available_front_ends());
ASSERT_NO_THROW(m_frontEnd = m_fem.load_by_framework(m_param.m_frontEndName));
ASSERT_NE(m_frontEnd, nullptr);
ASSERT_NO_THROW(m_inputModel = m_frontEnd->load_from_file(m_param.m_modelName));
ASSERT_NE(m_inputModel, nullptr);
}
///////////////////////////////////////////////////////////////////
TEST_P(FrontEndElementTypeTest, testSetElementType)
{
ASSERT_NO_THROW(doLoadFromFile());
Place::Ptr place;
ASSERT_NO_THROW(place = m_inputModel->get_inputs()[0]);
ASSERT_NE(place, nullptr);
auto name = place->get_names()[0];
ASSERT_NO_THROW(m_inputModel->set_element_type(place, element::f16));
std::shared_ptr<ngraph::Function> function;
function = m_frontEnd->convert(m_inputModel);
auto ops = function->get_ordered_ops();
auto it = std::find_if(ops.begin(), ops.end(), [&](const std::shared_ptr<ngraph::Node>& node) {
return node->get_friendly_name().find(name) != std::string::npos;
});
ASSERT_NE(it, ops.end());
EXPECT_EQ((*it)->get_output_element_type(0), element::f16);
}