Paddle FrontEnd Refactoring (#9157)
* Refactor PaddlePaddle FrontEnd * Codestyle * FrontEnd,InputModel,Place base classes -> abstract, renamed model file * Fix unit tests * fix unit tests * ngraph:: to ov:: * Rename frontends dir to frontend * Rename paddlepaddle to paddle; pdpd to paddle * add missing file * codestyle * Remove local change * paddlepaddle -> paddle for azure configs and .md files * fix package name, fix config files * Fix win build * Revert Broadcast/AutoBroadcast changes * codestyle * fix FronEnd class * fix ngraph_cpp_api.config * codestyle * merge master * fix build * refactoring; revert broadcast/autobroadcast changes * codestyle * fix MacOS config
This commit is contained in:
parent
2daafec53d
commit
0bbda24186
@ -116,8 +116,8 @@ jobs:
|
||||
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/wheel/requirements-dev.txt
|
||||
# For running Python API tests
|
||||
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/src/requirements-dev.txt
|
||||
# For running PaddlePaddle frontend unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/core/tests/frontend/paddlepaddle/requirements_dev.txt
|
||||
# For running Paddle frontend unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/core/tests/frontend/paddle/requirements_dev.txt
|
||||
# For running ONNX frontend unit tests
|
||||
python3 -m pip install -r $(REPO_DIR)/src/core/tests/requirements_test_onnx.txt
|
||||
# For MO unit tests
|
||||
@ -248,8 +248,8 @@ jobs:
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/paddlepaddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-PaddlePaddle.xml
|
||||
displayName: 'PaddlePaddle Frontend UT'
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/paddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-Paddle.xml
|
||||
displayName: 'Paddle Frontend UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/tensorflow_tests --gtest_print_time=1 --gtest_output=xml:TEST-Tensorflow.xml
|
||||
|
@ -109,8 +109,8 @@ jobs:
|
||||
python -m pip install --upgrade pip
|
||||
rem For running Python API tests
|
||||
python -m pip install -r $(REPO_DIR)\inference-engine\ie_bridges\python\src\requirements-dev.txt
|
||||
rem For running PaddlePaddle frontend unit tests
|
||||
python -m pip install -r $(REPO_DIR)\src\core\tests\frontend\paddlepaddle\requirements_dev.txt
|
||||
rem For running Paddle frontend unit tests
|
||||
python -m pip install -r $(REPO_DIR)\src\core\tests\frontend\paddle\requirements_dev.txt
|
||||
rem For running ONNX frontend unit tests
|
||||
python -m pip install -r $(REPO_DIR)\src\core\tests\requirements_test_onnx.txt
|
||||
rem For MO unit tests
|
||||
@ -207,8 +207,8 @@ jobs:
|
||||
displayName: 'OV Core UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\paddlepaddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-PaddlePaddle.xml
|
||||
displayName: 'PaddlePaddle Frontend UT'
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\paddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-Paddle.xml
|
||||
displayName: 'Paddle Frontend UT'
|
||||
continueOnError: false
|
||||
|
||||
- script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\tensorflow_tests --gtest_print_time=1 --gtest_output=xml:TEST-Tensorflow.xml
|
||||
|
@ -126,8 +126,8 @@ function(ie_add_plugin)
|
||||
if(TARGET ov_onnx_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} ov_onnx_frontend)
|
||||
endif()
|
||||
if(TARGET ov_paddlepaddle_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} ov_paddlepaddle_frontend)
|
||||
if(TARGET ov_paddle_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} ov_paddle_frontend)
|
||||
endif()
|
||||
if(TARGET ov_tensorflow_frontend)
|
||||
add_dependencies(${IE_PLUGIN_NAME} ov_tensorflow_frontend)
|
||||
|
@ -163,11 +163,11 @@ else()
|
||||
endif()
|
||||
|
||||
ie_dependent_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" ON "protoc_available" OFF)
|
||||
ie_dependent_option(ENABLE_OV_PDPD_FRONTEND "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
|
||||
ie_dependent_option(ENABLE_OV_PADDLE_FRONTEND "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
|
||||
ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON)
|
||||
ie_dependent_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON "protoc_available" OFF)
|
||||
ie_dependent_option(ENABLE_SYSTEM_PROTOBUF "Use system protobuf" OFF
|
||||
"ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PDPD_FRONTEND OR ENABLE_OV_TF_FRONTEND;BUILD_SHARED_LIBS" OFF)
|
||||
"ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_TF_FRONTEND;BUILD_SHARED_LIBS" OFF)
|
||||
ie_dependent_option(ENABLE_OV_CORE_UNIT_TESTS "Enables OpenVINO core unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF)
|
||||
ie_dependent_option(ENABLE_OV_CORE_BACKEND_UNIT_TESTS "Control the building of unit tests using backends" ON
|
||||
"ENABLE_OV_CORE_UNIT_TESTS" OFF)
|
||||
|
@ -11,7 +11,7 @@
|
||||
#
|
||||
# * `Runtime`: OpenVINO C++ and C Core & Inference Runtime, frontend common
|
||||
# * `ONNX`: OpenVINO ONNX frontend
|
||||
# * `PaddlePaddle`: OpenVINO PaddlePaddle frontend
|
||||
# * `Paddle`: OpenVINO Paddle frontend
|
||||
#
|
||||
# If no components are specified, `Runtime` component is provided:
|
||||
#
|
||||
@ -43,8 +43,8 @@
|
||||
# `openvino::frontend::onnx`
|
||||
# ONNX FrontEnd target (optional)
|
||||
#
|
||||
# `openvino::frontend::paddlepaddle`
|
||||
# PaddlePaddle FrontEnd target (optional)
|
||||
# `openvino::frontend::paddle`
|
||||
# Paddle FrontEnd target (optional)
|
||||
#
|
||||
# `openvino::frontend::tensorflow`
|
||||
# TensorFlow FrontEnd target (optional)
|
||||
@ -63,8 +63,8 @@
|
||||
# `OpenVINO_Frontend_ONNX_FOUND`
|
||||
# OpenVINO ONNX frontend is available
|
||||
#
|
||||
# `OpenVINO_Frontend_PaddlePaddle_FOUND`
|
||||
# OpenVINO PaddlePaddle frontend is available
|
||||
# `OpenVINO_Frontend_Paddle_FOUND`
|
||||
# OpenVINO Paddle frontend is available
|
||||
#
|
||||
# `OpenVINO_Frontend_TensorFlow_FOUND`
|
||||
# OpenVINO TensorFlow frontend is available
|
||||
@ -193,12 +193,12 @@ endif()
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Runtime_FOUND ON)
|
||||
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND @ENABLE_OV_ONNX_FRONTEND@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND @ENABLE_OV_PDPD_FRONTEND@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND @ENABLE_OV_PADDLE_FRONTEND@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND @ENABLE_OV_TF_FRONTEND@)
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND @ENABLE_OV_IR_FRONTEND@)
|
||||
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_ONNX_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND})
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_PaddlePaddle_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND})
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_Paddle_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND})
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_TensorFlow_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND})
|
||||
set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_IR_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND})
|
||||
|
||||
@ -212,7 +212,7 @@ endif()
|
||||
#
|
||||
|
||||
foreach(target openvino::runtime openvino::runtime::c
|
||||
openvino::frontend::onnx openvino::frontend::paddlepaddle openvino::frontend::tensorflow)
|
||||
openvino::frontend::onnx openvino::frontend::paddle openvino::frontend::tensorflow)
|
||||
if(TARGET ${target} AND _ov_as_external_package)
|
||||
_ov_target_no_deprecation_error(${target})
|
||||
endif()
|
||||
@ -230,6 +230,6 @@ if(_need_package_name_reset)
|
||||
unset(_need_package_name_reset)
|
||||
endif()
|
||||
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND)
|
||||
unset(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND)
|
||||
|
@ -29,8 +29,8 @@
|
||||
# ngraph_ov_onnx_frontend_FOUND - True if the system has ov_onnx_frontend library
|
||||
# ngraph::ov_onnx_frontend - ONNX FrontEnd target (optional)
|
||||
#
|
||||
# ngraph_paddlepaddle_frontend_FOUND - True if the system has PDPD frontend
|
||||
# ngraph::ov_paddlepaddle_frontend - nGraph PDPD frontend (optional)
|
||||
# ngraph_paddle_frontend_FOUND - True if the system has Paddle frontend
|
||||
# ngraph::ov_paddle_frontend - nGraph Paddle frontend (optional)
|
||||
#
|
||||
|
||||
@PACKAGE_INIT@
|
||||
@ -56,10 +56,10 @@ if(TARGET openvino::frontend::onnx AND NOT TARGET ngraph::ov_onnx_frontend)
|
||||
INTERFACE_LINK_LIBRARIES openvino::frontend::onnx)
|
||||
endif()
|
||||
|
||||
if(TARGET openvino::frontend::paddlepaddle AND NOT TARGET ngraph::ov_paddlepaddle_frontend)
|
||||
add_library(ngraph::ov_paddlepaddle_frontend INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::ov_paddlepaddle_frontend PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES openvino::frontend::paddlepaddle)
|
||||
if(TARGET openvino::frontend::paddle AND NOT TARGET ngraph::ov_paddle_frontend)
|
||||
add_library(ngraph::ov_paddle_frontend INTERFACE IMPORTED)
|
||||
set_target_properties(ngraph::ov_paddle_frontend PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES openvino::frontend::paddle)
|
||||
endif()
|
||||
|
||||
if(TARGET openvino::frontend::tensorflow AND NOT TARGET ngraph::ov_tensorflow_frontend)
|
||||
@ -85,7 +85,7 @@ if(ngraph_onnx_importer_FOUND)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(ngraph_paddlepaddle_frontend_FOUND ${OpenVINO_Frontend_PaddlePaddle_FOUND})
|
||||
set(ngraph_paddle_frontend_FOUND ${OpenVINO_Frontend_Paddle_FOUND})
|
||||
set(ngraph_tensorflow_frontend_FOUND ${OpenVINO_Frontend_TensorFlow_FOUND})
|
||||
set(ngraph_ir_frontend_FOUND ${OpenVINO_Frontend_IR_FOUND})
|
||||
|
||||
|
@ -136,8 +136,8 @@ if(ENABLE_TESTS)
|
||||
add_dependencies(test_model_zoo test_pip_prerequsites)
|
||||
endif()
|
||||
|
||||
if (ENABLE_OV_PDPD_FRONTEND AND ENABLE_OV_CORE_UNIT_TESTS)
|
||||
add_dependencies(test_model_zoo paddlepaddle_test_models)
|
||||
if (ENABLE_OV_PADDLE_FRONTEND AND ENABLE_OV_CORE_UNIT_TESTS)
|
||||
add_dependencies(test_model_zoo paddle_test_models)
|
||||
endif()
|
||||
|
||||
install(DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo"
|
||||
|
@ -46,11 +46,11 @@ This library contains the classes to:
|
||||
Starting from 2022.1 release, OpenVINO Runtime introduced a concept of frontend plugins. Such plugins can be automatically dynamically loaded by OpenVINO Runtime dynamically depending on file format:
|
||||
* Unix* OS:
|
||||
- `libov_ir_frontend.so` to read a network from IR
|
||||
- `libov_paddlepaddle_frontend.so` to read a network from PaddlePaddle model format
|
||||
- `libov_paddle_frontend.so` to read a network from PaddlePaddle model format
|
||||
- `libov_onnx_frontend.so` to read a network from ONNX model format
|
||||
* Windows* OS:
|
||||
- `ov_ir_frontend.dll` to read a network from IR
|
||||
- `ov_paddlepaddle_frontend.dll` to read a network from PaddlePaddle model format
|
||||
- `ov_paddle_frontend.dll` to read a network from PaddlePaddle model format
|
||||
- `ov_onnx_frontend.dll` to read a network from ONNX model format
|
||||
|
||||
### Device-specific Plugin Libraries ###
|
||||
|
39
docs/doxygen/ngraph_cpp_api.config
Normal file
39
docs/doxygen/ngraph_cpp_api.config
Normal file
@ -0,0 +1,39 @@
|
||||
# Doxyfile 1.8.18
|
||||
|
||||
# This file describes the settings to be used by the documentation system
|
||||
# doxygen (www.doxygen.org) for a project.
|
||||
#
|
||||
# All text after a double hash (##) is considered a comment and is placed in
|
||||
# front of the TAG it is preceding.
|
||||
#
|
||||
# All text after a single hash (#) is considered a comment and will be ignored.
|
||||
# The format is:
|
||||
# TAG = value [value, ...]
|
||||
# For lists, items can also be appended using:
|
||||
# TAG += value [value, ...]
|
||||
# Values that contain spaces should be placed between quotes (\" \").
|
||||
|
||||
@INCLUDE = "@IE_CONFIG_BUILD@"
|
||||
|
||||
EXCLUDE_SYMBOLS =
|
||||
|
||||
PREDEFINED =
|
||||
|
||||
EXCLUDE_PATTERNS = */python/*
|
||||
|
||||
FILE_PATTERNS = *.cpp \
|
||||
*.c \
|
||||
*.hpp \
|
||||
*.h
|
||||
|
||||
LAYOUT_FILE = "@NGRAPH_CPP_LAYOUT_BUILD@"
|
||||
|
||||
INPUT = "@CORE_DIR@/core/include/" \
|
||||
"@FRONTENDS_DIR@/onnx/frontend/include/" \
|
||||
"@FRONTENDS_DIR@/paddle/frontend/include/"
|
||||
|
||||
HTML_OUTPUT = "@NGRAPH_CPP_OUTPUT@"
|
||||
|
||||
GENERATE_TAGFILE = "@DOCS_BUILD_DIR@/ngraph_cpp_api.tag"
|
||||
|
||||
WARN_LOGFILE = "@DOCS_BUILD_DIR@/ngraph_cpp_api.log"
|
@ -3,8 +3,8 @@
|
||||
#
|
||||
add_subdirectory(shared)
|
||||
|
||||
if (ENABLE_OV_PDPD_FRONTEND)
|
||||
add_subdirectory(paddlepaddle)
|
||||
if (ENABLE_OV_PADDLE_FRONTEND)
|
||||
add_subdirectory(paddle)
|
||||
endif()
|
||||
|
||||
if (ENABLE_OV_ONNX_FRONTEND)
|
||||
|
@ -2,7 +2,7 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
set(TARGET_NAME "paddlepaddle_tests")
|
||||
set(TARGET_NAME "paddle_tests")
|
||||
|
||||
file(GLOB_RECURSE SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
|
||||
|
||||
@ -18,7 +18,7 @@ install(TARGETS ${TARGET_NAME}
|
||||
EXCLUDE_FROM_ALL)
|
||||
|
||||
# Test model generating
|
||||
ie_check_pip_package(paddlepaddle WARNING)
|
||||
ie_check_pip_package(paddlepaddle WARNING paddle)
|
||||
|
||||
set(TEST_PADDLE_MODELS_DIRNAME test_model_zoo/paddle_test_models)
|
||||
target_compile_definitions(${TARGET_NAME} PRIVATE -D TEST_PADDLE_MODELS_DIRNAME=\"${TEST_PADDLE_MODELS_DIRNAME}/\")
|
||||
@ -39,7 +39,7 @@ if (paddlepaddle_FOUND)
|
||||
${TEST_PADDLE_MODELS}
|
||||
DEPENDS ${PADDLE_ALL_SCRIPTS}
|
||||
)
|
||||
add_custom_target(paddlepaddle_test_models DEPENDS ${OUT_FILE})
|
||||
add_custom_target(paddle_test_models DEPENDS ${OUT_FILE})
|
||||
|
||||
install(DIRECTORY ${TEST_PADDLE_MODELS}
|
||||
DESTINATION tests/${TEST_PADDLE_MODELS_DIRNAME}
|
||||
@ -51,11 +51,11 @@ else()
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
-E cmake_echo_color --red "Warning: Unable to generate PaddlePaddle test models. Running '${TARGET_NAME}' will likely fail"
|
||||
)
|
||||
add_custom_target(paddlepaddle_test_models DEPENDS unable_build_paddle_models.txt)
|
||||
add_custom_target(paddle_test_models DEPENDS unable_build_paddle_models.txt)
|
||||
endif()
|
||||
|
||||
add_dependencies(${TARGET_NAME} paddlepaddle_test_models)
|
||||
add_dependencies(${TARGET_NAME} ov_paddlepaddle_frontend)
|
||||
add_dependencies(${TARGET_NAME} paddle_test_models)
|
||||
add_dependencies(${TARGET_NAME} ov_paddle_frontend)
|
||||
|
||||
# Fuzzy tests for PaddlePaddle use IE_CPU engine
|
||||
if (ENABLE_MKL_DNN)
|
@ -9,7 +9,7 @@
|
||||
using namespace ngraph;
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDBasicTest = FrontEndBasicTest;
|
||||
using PaddleBasicTest = FrontEndBasicTest;
|
||||
|
||||
static const std::vector<std::string> models{
|
||||
std::string("conv2d"),
|
||||
@ -20,7 +20,7 @@ static const std::vector<std::string> models{
|
||||
std::string("2in_2out_dynbatch/2in_2out_dynbatch.pdmodel"),
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(PDPDBasicTest,
|
||||
INSTANTIATE_TEST_SUITE_P(PaddleBasicTest,
|
||||
FrontEndBasicTest,
|
||||
::testing::Combine(::testing::Values(PADDLE_FE),
|
||||
::testing::Values(std::string(TEST_PADDLE_MODELS_DIRNAME)),
|
@ -9,7 +9,7 @@
|
||||
using namespace ov;
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDConvertModelTest = FrontEndConvertModelTest;
|
||||
using PaddleConvertModelTest = FrontEndConvertModelTest;
|
||||
|
||||
static const std::vector<std::string> models{
|
||||
std::string("conv2d"),
|
||||
@ -21,7 +21,7 @@ static const std::vector<std::string> models{
|
||||
std::string("pool2d_dyn_hw/pool2d_dyn_hw.pdmodel"),
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(PDPDConvertModelTest,
|
||||
INSTANTIATE_TEST_SUITE_P(PaddleConvertModelTest,
|
||||
FrontEndConvertModelTest,
|
||||
::testing::Combine(::testing::Values(PADDLE_FE),
|
||||
::testing::Values(std::string(TEST_PADDLE_MODELS_DIRNAME)),
|
@ -9,7 +9,7 @@
|
||||
using namespace ngraph;
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDCutTest = FrontEndCutModelTest;
|
||||
using PaddleCutTest = FrontEndCutModelTest;
|
||||
|
||||
static CutModelParam getTestData_2in_2out() {
|
||||
CutModelParam res;
|
||||
@ -26,7 +26,7 @@ static CutModelParam getTestData_2in_2out() {
|
||||
return res;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(PDPDCutTest,
|
||||
INSTANTIATE_TEST_SUITE_P(PaddleCutTest,
|
||||
FrontEndCutModelTest,
|
||||
::testing::Values(getTestData_2in_2out()),
|
||||
FrontEndCutModelTest::getTestCaseName);
|
@ -7,7 +7,7 @@
|
||||
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDJsonConfigTest = FrontEndJsonConfigTest;
|
||||
using PaddleJsonConfigTest = FrontEndJsonConfigTest;
|
||||
|
||||
static JsonConfigFEParam getTestData() {
|
||||
JsonConfigFEParam res;
|
||||
@ -17,7 +17,7 @@ static JsonConfigFEParam getTestData() {
|
||||
return res;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(PDPDJsonConfigTest,
|
||||
INSTANTIATE_TEST_SUITE_P(PaddleJsonConfigTest,
|
||||
FrontEndJsonConfigTest,
|
||||
::testing::Values(getTestData()),
|
||||
FrontEndJsonConfigTest::getTestCaseName);
|
@ -9,7 +9,7 @@
|
||||
using namespace ngraph;
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDCutTest = FrontEndLoadFromTest;
|
||||
using PaddleCutTest = FrontEndLoadFromTest;
|
||||
|
||||
static LoadFromFEParam getTestData() {
|
||||
LoadFromFEParam res;
|
||||
@ -22,7 +22,7 @@ static LoadFromFEParam getTestData() {
|
||||
return res;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(PDPDCutTest,
|
||||
INSTANTIATE_TEST_SUITE_P(PaddleCutTest,
|
||||
FrontEndLoadFromTest,
|
||||
::testing::Values(getTestData()),
|
||||
FrontEndLoadFromTest::getTestCaseName);
|
@ -18,7 +18,7 @@ using namespace InferenceEngine;
|
||||
using namespace ngraph;
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDFuzzyOpTest = FrontEndFuzzyOpTest;
|
||||
using PaddleFuzzyOpTest = FrontEndFuzzyOpTest;
|
||||
|
||||
static const std::vector<std::string> models{std::string("argmax"),
|
||||
std::string("argmax1"),
|
||||
@ -64,7 +64,7 @@ static const std::vector<std::string> models{std::string("argmax"),
|
||||
std::string("conv2d_strides_no_padding"),
|
||||
std::string("conv2d_strides_padding"),
|
||||
std::string("conv2d_transpose_dilation_assymetric_pads_strides"),
|
||||
// conv2d_transpose_SAME_padding(PDPD outputs wrong results),
|
||||
// conv2d_transpose_SAME_padding(Paddle outputs wrong results),
|
||||
std::string("conv2d_transpose_strides_assymetric_padding"),
|
||||
std::string("conv2d_transpose_strides_no_padding"),
|
||||
std::string("conv2d_transpose_strides_padding"),
|
||||
@ -261,9 +261,9 @@ static const std::vector<std::string> models{std::string("argmax"),
|
||||
// std::string("yolo_box_scale_xy"),
|
||||
std::string("yolo_box_uneven_wh")};
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(PDPDFuzzyOpTest,
|
||||
INSTANTIATE_TEST_SUITE_P(PaddleFuzzyOpTest,
|
||||
FrontEndFuzzyOpTest,
|
||||
::testing::Combine(::testing::Values(PADDLE_FE),
|
||||
::testing::Values(std::string(TEST_PADDLE_MODELS_DIRNAME)),
|
||||
::testing::ValuesIn(models)),
|
||||
PDPDFuzzyOpTest::getTestCaseName);
|
||||
PaddleFuzzyOpTest::getTestCaseName);
|
@ -9,7 +9,7 @@
|
||||
using namespace ngraph;
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDPartialShapeTest = FrontEndPartialShapeTest;
|
||||
using PaddlePartialShapeTest = FrontEndPartialShapeTest;
|
||||
|
||||
static PartShape getTestShape_2in_2out() {
|
||||
PartShape res;
|
||||
@ -57,7 +57,7 @@ static PartShape getTestShape_conv2d_relu() {
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(
|
||||
PDPDPartialShapeTest,
|
||||
PaddlePartialShapeTest,
|
||||
FrontEndPartialShapeTest,
|
||||
::testing::Combine(::testing::Values(BaseFEParam{PADDLE_FE, std::string(TEST_PADDLE_MODELS_DIRNAME)}),
|
||||
::testing::ValuesIn(std::vector<PartShape>{getTestShape_2in_2out(),
|
@ -53,7 +53,7 @@ std::vector<std::string> tensor_names = {
|
||||
"save_infer_model/scale_5.tmp_1",
|
||||
};
|
||||
|
||||
TEST(PDPD_Places, check_tensor_names) {
|
||||
TEST(Paddle_Places, check_tensor_names) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -67,7 +67,7 @@ TEST(PDPD_Places, check_tensor_names) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PDPD_Places, check_input_outputs) {
|
||||
TEST(Paddle_Places, check_input_outputs) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -101,7 +101,7 @@ TEST(PDPD_Places, check_input_outputs) {
|
||||
}
|
||||
|
||||
// all existed in the model ops have "Out" port
|
||||
TEST(PDPD_Places, check_out_port_of_all_ops) {
|
||||
TEST(Paddle_Places, check_out_port_of_all_ops) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -124,7 +124,7 @@ TEST(PDPD_Places, check_out_port_of_all_ops) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PDPD_Places, check_in_out_ports_of_model_outputs) {
|
||||
TEST(Paddle_Places, check_in_out_ports_of_model_outputs) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -159,7 +159,7 @@ TEST(PDPD_Places, check_in_out_ports_of_model_outputs) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PDPD_Places, check_source_target_tensors_of_model_outputs) {
|
||||
TEST(Paddle_Places, check_source_target_tensors_of_model_outputs) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -194,7 +194,7 @@ TEST(PDPD_Places, check_source_target_tensors_of_model_outputs) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PDPD_Places, check_producing_consuming_ops_of_model_outputs) {
|
||||
TEST(Paddle_Places, check_producing_consuming_ops_of_model_outputs) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -230,7 +230,7 @@ TEST(PDPD_Places, check_producing_consuming_ops_of_model_outputs) {
|
||||
}
|
||||
|
||||
// check data flow [ output port -> tensor -> input port ]
|
||||
TEST(PDPD_Places, check_data_flow) {
|
||||
TEST(Paddle_Places, check_data_flow) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -270,7 +270,7 @@ TEST(PDPD_Places, check_data_flow) {
|
||||
// -> input_port_2
|
||||
// -> input_port_N]
|
||||
// input_port, input_port_2, ... input_port_N are equal data
|
||||
TEST(PDPD_Places, check_tensor_to_multiple_ports) {
|
||||
TEST(Paddle_Places, check_tensor_to_multiple_ports) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -297,7 +297,7 @@ TEST(PDPD_Places, check_tensor_to_multiple_ports) {
|
||||
}
|
||||
|
||||
// consuming ops should be equal for tensor place and producing output port
|
||||
TEST(PDPD_Places, check_consuming_ops) {
|
||||
TEST(Paddle_Places, check_consuming_ops) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -339,7 +339,7 @@ TEST(PDPD_Places, check_consuming_ops) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PDPD_Places, check_consuming_ops_2) {
|
||||
TEST(Paddle_Places, check_consuming_ops_2) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -379,7 +379,7 @@ TEST(PDPD_Places, check_consuming_ops_2) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PDPD_Places, check_producing_ops) {
|
||||
TEST(Paddle_Places, check_producing_ops) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -402,7 +402,7 @@ TEST(PDPD_Places, check_producing_ops) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PDPD_Places, check_input_output_ports_dy_idx) {
|
||||
TEST(Paddle_Places, check_input_output_ports_dy_idx) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
||||
@ -429,7 +429,7 @@ TEST(PDPD_Places, check_input_output_ports_dy_idx) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST(PDPD_Places, check_ops_tensors_by_idx) {
|
||||
TEST(Paddle_Places, check_ops_tensors_by_idx) {
|
||||
FrontEndTestUtils::setupTestEnv();
|
||||
auto fem = FrontEndManager();
|
||||
FrontEnd::Ptr frontend;
|
@ -9,7 +9,7 @@
|
||||
using namespace ngraph;
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDCutTest = FrontEndElementTypeTest;
|
||||
using PaddleCutTest = FrontEndElementTypeTest;
|
||||
|
||||
static SetTypeFEParam getTestData_relu() {
|
||||
SetTypeFEParam res;
|
||||
@ -19,7 +19,7 @@ static SetTypeFEParam getTestData_relu() {
|
||||
return res;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(PDPDCutTest,
|
||||
INSTANTIATE_TEST_SUITE_P(PaddleCutTest,
|
||||
FrontEndElementTypeTest,
|
||||
::testing::Values(getTestData_relu()),
|
||||
FrontEndElementTypeTest::getTestCaseName);
|
@ -8,7 +8,7 @@
|
||||
|
||||
using namespace ov::frontend;
|
||||
|
||||
using PDPDTelemetryTest = FrontEndTelemetryTest;
|
||||
using PaddleTelemetryTest = FrontEndTelemetryTest;
|
||||
|
||||
static TelemetryFEParam getTestData() {
|
||||
TelemetryFEParam res;
|
||||
@ -22,7 +22,7 @@ static TelemetryFEParam getTestData() {
|
||||
return res;
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(PDPDTelemetryTest,
|
||||
INSTANTIATE_TEST_SUITE_P(PaddleTelemetryTest,
|
||||
FrontEndTelemetryTest,
|
||||
::testing::Values(getTestData()),
|
||||
FrontEndTelemetryTest::getTestCaseName);
|
@ -33,7 +33,7 @@ exe = fluid.Executor(fluid.CPUPlace())
|
||||
exe.run(fluid.default_startup_program())
|
||||
inp_dict = {'inputX1': inp_blob1, 'inputX2': inp_blob2}
|
||||
var = [relu3a, relu3b]
|
||||
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
|
||||
fluid.io.save_inference_model(os.path.join(sys.argv[1], "2in_2out"), list(inp_dict.keys()), var, exe,
|
||||
model_filename="2in_2out.pdmodel", params_filename="2in_2out.pdiparams")
|
@ -33,7 +33,7 @@ exe = fluid.Executor(fluid.CPUPlace())
|
||||
exe.run(fluid.default_startup_program())
|
||||
inp_dict = {'inputX1': inp_blob1, 'inputX2': inp_blob2}
|
||||
var = [relu3a, relu3b]
|
||||
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
|
||||
fluid.io.save_inference_model(os.path.join(sys.argv[1], "2in_2out_dynbatch"), list(inp_dict.keys()), var, exe,
|
||||
model_filename="2in_2out_dynbatch.pdmodel", params_filename="2in_2out_dynbatch.pdiparams")
|
@ -0,0 +1,60 @@
|
||||
#
|
||||
# pool2d paddle model generator
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
data_type = 'float32'
|
||||
|
||||
|
||||
def paddle_argmax(name : str, x, axis):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.argmax(x=node_x, axis=axis)
|
||||
out = paddle.cast(out, np.float32)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
def paddle_argmax1(name : str, x):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.argmax(x=node_x)
|
||||
out = paddle.cast(out, np.float32)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
def main():
|
||||
data = np.random.random([3,5,7,2]).astype("float32")
|
||||
axis = 0
|
||||
paddle_argmax("argmax", data, axis)
|
||||
paddle_argmax1("argmax1", data)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -3,20 +3,20 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def pdpd_assign_value(name, test_x):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
node_x = pdpd.static.data(name='x', shape=test_x.shape, dtype=test_x.dtype if test_x.dtype != np.bool else np.int32)
|
||||
node_x = pdpd.cast(node_x, dtype=test_x.dtype)
|
||||
const_value = pdpd.assign(test_x, output=None)
|
||||
result = pdpd.cast(pdpd.concat([node_x, const_value], 0), dtype=np.float32)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
def paddle_assign_value(name, test_x):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
node_x = paddle.static.data(name='x', shape=test_x.shape, dtype=test_x.dtype if test_x.dtype != np.bool else np.int32)
|
||||
node_x = paddle.cast(node_x, dtype=test_x.dtype)
|
||||
const_value = paddle.assign(test_x, output=None)
|
||||
result = paddle.cast(paddle.concat([node_x, const_value], 0), dtype=np.float32)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
if test_x.dtype == np.bool:
|
||||
test_x = test_x.astype(np.int32)
|
||||
|
||||
@ -49,7 +49,7 @@ def compare():
|
||||
}
|
||||
]
|
||||
for test in test_cases:
|
||||
pdpd_assign_value(test['name'], test['input'])
|
||||
paddle_assign_value(test['name'], test['input'])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@ -7,14 +7,14 @@ import sys
|
||||
|
||||
|
||||
def batch_norm1(name : str, x, scale, bias, mean, var, data_layout):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
scale_attr = pdpd.ParamAttr(name="scale1", initializer=pdpd.nn.initializer.Assign(scale))
|
||||
bias_attr = pdpd.ParamAttr(name="bias1", initializer=pdpd.nn.initializer.Assign(bias))
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
scale_attr = paddle.ParamAttr(name="scale1", initializer=paddle.nn.initializer.Assign(scale))
|
||||
bias_attr = paddle.ParamAttr(name="bias1", initializer=paddle.nn.initializer.Assign(bias))
|
||||
|
||||
out = pdpd.static.nn.batch_norm(node_x, epsilon=1e-5,
|
||||
out = paddle.static.nn.batch_norm(node_x, epsilon=1e-5,
|
||||
param_attr=scale_attr,
|
||||
bias_attr=bias_attr,
|
||||
moving_mean_name="bn_mean1",
|
||||
@ -22,12 +22,12 @@ def batch_norm1(name : str, x, scale, bias, mean, var, data_layout):
|
||||
use_global_stats=True,
|
||||
data_layout=data_layout)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
pdpd.static.global_scope().var("bn_mean1").get_tensor().set(mean, pdpd.CPUPlace())
|
||||
pdpd.static.global_scope().var("bn_variance1").get_tensor().set(var, pdpd.CPUPlace())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
paddle.static.global_scope().var("bn_mean1").get_tensor().set(mean, paddle.CPUPlace())
|
||||
paddle.static.global_scope().var("bn_variance1").get_tensor().set(var, paddle.CPUPlace())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -38,14 +38,14 @@ def batch_norm1(name : str, x, scale, bias, mean, var, data_layout):
|
||||
return outs[0]
|
||||
|
||||
def batch_norm2(name : str, x, scale, bias, mean, var, data_layout):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
scale_attr = pdpd.ParamAttr(name="scale2", initializer=pdpd.nn.initializer.Assign(scale))
|
||||
bias_attr = pdpd.ParamAttr(name="bias2", initializer=pdpd.nn.initializer.Assign(bias))
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
scale_attr = paddle.ParamAttr(name="scale2", initializer=paddle.nn.initializer.Assign(scale))
|
||||
bias_attr = paddle.ParamAttr(name="bias2", initializer=paddle.nn.initializer.Assign(bias))
|
||||
|
||||
out = pdpd.static.nn.batch_norm(node_x, epsilon=1e-5,
|
||||
out = paddle.static.nn.batch_norm(node_x, epsilon=1e-5,
|
||||
param_attr=scale_attr,
|
||||
bias_attr=bias_attr,
|
||||
moving_mean_name="bn_mean2",
|
||||
@ -53,12 +53,12 @@ def batch_norm2(name : str, x, scale, bias, mean, var, data_layout):
|
||||
use_global_stats=True,
|
||||
data_layout=data_layout)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
pdpd.static.global_scope().var("bn_mean2").get_tensor().set(mean, pdpd.CPUPlace())
|
||||
pdpd.static.global_scope().var("bn_variance2").get_tensor().set(var, pdpd.CPUPlace())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
paddle.static.global_scope().var("bn_mean2").get_tensor().set(mean, paddle.CPUPlace())
|
||||
paddle.static.global_scope().var("bn_variance2").get_tensor().set(var, paddle.CPUPlace())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -69,7 +69,7 @@ def batch_norm2(name : str, x, scale, bias, mean, var, data_layout):
|
||||
return outs[0]
|
||||
|
||||
def main():
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
data = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
|
||||
# data layout is NCHW
|
||||
scale = np.array([1.0, 1.5]).astype(np.float32)
|
@ -3,19 +3,19 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def pdpd_bmm(x1, x2):
|
||||
import paddle as pdpd
|
||||
def paddle_bmm(x1, x2):
|
||||
import paddle
|
||||
|
||||
pdpd.enable_static()
|
||||
node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype)
|
||||
node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype)
|
||||
bmm_node = pdpd.bmm(node_x1, node_x2)
|
||||
result = pdpd.static.nn.batch_norm(bmm_node, use_global_stats=True)
|
||||
paddle.enable_static()
|
||||
node_x1 = paddle.static.data(name='x1', shape=x1.shape, dtype=x1.dtype)
|
||||
node_x2 = paddle.static.data(name='x2', shape=x2.shape, dtype=x2.dtype)
|
||||
bmm_node = paddle.bmm(node_x1, node_x2)
|
||||
result = paddle.static.nn.batch_norm(bmm_node, use_global_stats=True)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x1': x1, 'x2': x2},
|
||||
@ -36,4 +36,4 @@ if __name__ == "__main__":
|
||||
[30., 31., 32., 33., 34.,]]]).astype(np.float32)
|
||||
|
||||
input2 = np.ones([1, 5, 7]).astype('float32')
|
||||
pdpd_result = pdpd_bmm(input1, input2)
|
||||
paddle_result = paddle_bmm(input1, input2)
|
@ -6,17 +6,17 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
def clip(name: str, x, min, max):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.fluid.layers.clip(node_x, min=min, max=max)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.fluid.layers.clip(node_x, min=min, max=max)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -17,6 +17,6 @@ exe = fluid.Executor(fluid.CPUPlace())
|
||||
exe.run(fluid.default_startup_program())
|
||||
inp_dict = {'x': inp_blob}
|
||||
var = [test_layer]
|
||||
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
|
||||
fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d"), list(inp_dict.keys()), var, exe)
|
@ -1,31 +1,31 @@
|
||||
from save_model import saveModel
|
||||
import numpy as np
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
|
||||
def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog):
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(start_prog)
|
||||
outs = exe.run(
|
||||
feed={'x': input_x},
|
||||
fetch_list=fetch_list,
|
||||
program=main_prog)
|
||||
|
||||
with pdpd.static.program_guard(main_prog, start_prog):
|
||||
with paddle.static.program_guard(main_prog, start_prog):
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x],
|
||||
outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
|
||||
def pdpd_conv2d(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True):
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
data = pdpd.static.data(name='x', shape=input_shape, dtype='float32')
|
||||
weight_attr = pdpd.ParamAttr(name="conv2d_weight", initializer=pdpd.nn.initializer.Assign(kernel))
|
||||
conv2d = pdpd.static.nn.conv2d(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4],
|
||||
def paddle_conv2d(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True):
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
data = paddle.static.data(name='x', shape=input_shape, dtype='float32')
|
||||
weight_attr = paddle.ParamAttr(name="conv2d_weight", initializer=paddle.nn.initializer.Assign(kernel))
|
||||
conv2d = paddle.static.nn.conv2d(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4],
|
||||
padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn)
|
||||
run_and_save_model(input_x, name, data, conv2d, main_program, startup_program)
|
||||
|
||||
@ -135,7 +135,7 @@ if __name__ == "__main__":
|
||||
]
|
||||
for test in test_cases:
|
||||
|
||||
pdpd_conv2d(test['input_x'], test['name'], test["input_shape"],
|
||||
paddle_conv2d(test['input_x'], test['name'], test["input_shape"],
|
||||
test['kernel'], test['dilation'],
|
||||
test['padding'],
|
||||
test['stride'],
|
@ -19,7 +19,7 @@ exe = fluid.Executor(fluid.CPUPlace())
|
||||
exe.run(fluid.default_startup_program())
|
||||
inp_dict = {'xxx': inp_blob}
|
||||
var = [relu]
|
||||
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
|
||||
fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_relu"), list(inp_dict.keys()), var, exe,
|
||||
model_filename="conv2d_relu.pdmodel", params_filename="conv2d_relu.pdiparams")
|
@ -16,7 +16,7 @@ exe = fluid.Executor(fluid.CPUPlace())
|
||||
exe.run(fluid.default_startup_program())
|
||||
inp_dict = {'x': inp_blob}
|
||||
var = [test_layer]
|
||||
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
|
||||
fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_s"), list(inp_dict.keys()), var, exe,
|
||||
model_filename="conv2d.pdmodel", params_filename="conv2d.pdiparams")
|
@ -1,30 +1,30 @@
|
||||
import numpy as np
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog):
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(start_prog)
|
||||
outs = exe.run(
|
||||
feed={'x': input_x},
|
||||
fetch_list=fetch_list,
|
||||
program=main_prog)
|
||||
with pdpd.static.program_guard(main_prog, start_prog):
|
||||
with paddle.static.program_guard(main_prog, start_prog):
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x],
|
||||
outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
|
||||
def pdpd_conv2d_transpose(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True):
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
data = pdpd.static.data(name='x', shape=input_shape, dtype='float32')
|
||||
weight_attr = pdpd.ParamAttr(name="conv2d_weight", initializer=pdpd.nn.initializer.Assign(kernel))
|
||||
conv2d = pdpd.static.nn.conv2d_transpose(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4],
|
||||
def paddle_conv2d_transpose(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True):
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
data = paddle.static.data(name='x', shape=input_shape, dtype='float32')
|
||||
weight_attr = paddle.ParamAttr(name="conv2d_weight", initializer=paddle.nn.initializer.Assign(kernel))
|
||||
conv2d = paddle.static.nn.conv2d_transpose(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4],
|
||||
padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn)
|
||||
run_and_save_model(input_x, name, data, conv2d, main_program, startup_program)
|
||||
|
||||
@ -134,7 +134,7 @@ if __name__ == "__main__":
|
||||
]
|
||||
for test in test_cases:
|
||||
|
||||
pdpd_conv2d_transpose(test['input_x'], test['name'], test["input_shape"],
|
||||
paddle_conv2d_transpose(test['input_x'], test['name'], test["input_shape"],
|
||||
test['kernel'], test['dilation'],
|
||||
test['padding'],
|
||||
test['stride'],
|
@ -3,23 +3,23 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
def cumsum(name:str, x, axis, dtype=None):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.cumsum(data, axis, dtype=dtype)
|
||||
out = pdpd.cast(out, np.float32)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
data = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.cumsum(data, axis, dtype=dtype)
|
||||
out = paddle.cast(out, np.float32)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -0,0 +1,47 @@
|
||||
#
|
||||
# pool2d paddle model generator
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def paddle_dropout(name : str, x, p, paddle_attrs):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.nn.functional.dropout(x=node_x, p=p, training=paddle_attrs['training'], mode=paddle_attrs['mode'])
|
||||
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x],
|
||||
outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def main():
|
||||
p=0.5
|
||||
data = np.random.random(size=(3, 10, 3, 7)).astype('float32')
|
||||
paddle_attrs = {
|
||||
'training' : False,
|
||||
'mode' : "downscale_in_infer"
|
||||
}
|
||||
paddle_attrs2 = {
|
||||
'training' : False,
|
||||
'mode' : "upscale_in_train"
|
||||
}
|
||||
paddle_dropout("dropout", data, p, paddle_attrs)
|
||||
paddle_dropout("dropout_upscale_in_train", data, p, paddle_attrs2)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -7,19 +7,19 @@ from save_model import saveModel
|
||||
|
||||
|
||||
def elementwise_add(name : str, x, y, axis, in_dtype):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=in_dtype)
|
||||
node_y = pdpd.static.data(name='y', shape=y.shape, dtype=in_dtype)
|
||||
out = pdpd.fluid.layers.nn.elementwise_add(node_x, node_y, axis=axis)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype)
|
||||
node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype)
|
||||
out = paddle.fluid.layers.nn.elementwise_add(node_x, node_y, axis=axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
fetch_list=[out])
|
||||
@ -29,19 +29,19 @@ def elementwise_add(name : str, x, y, axis, in_dtype):
|
||||
|
||||
|
||||
def elementwise_sub(name : str, x, y, axis, in_dtype):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=in_dtype)
|
||||
node_y = pdpd.static.data(name='y', shape=y.shape, dtype=in_dtype)
|
||||
out = pdpd.fluid.layers.nn.elementwise_sub(node_x, node_y, axis=axis)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype)
|
||||
node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype)
|
||||
out = paddle.fluid.layers.nn.elementwise_sub(node_x, node_y, axis=axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
fetch_list=[out])
|
||||
@ -51,19 +51,19 @@ def elementwise_sub(name : str, x, y, axis, in_dtype):
|
||||
|
||||
|
||||
def elementwise_div(name : str, x, y, axis, in_dtype):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = pdpd.fluid.layers.nn.elementwise_div(node_x, node_y, axis=axis)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = paddle.fluid.layers.nn.elementwise_div(node_x, node_y, axis=axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
fetch_list=[out])
|
||||
@ -73,19 +73,19 @@ def elementwise_div(name : str, x, y, axis, in_dtype):
|
||||
|
||||
|
||||
def elementwise_mul(name : str, x, y, axis, in_dtype):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = pdpd.fluid.layers.nn.elementwise_mul(node_x, node_y, axis=axis)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = paddle.fluid.layers.nn.elementwise_mul(node_x, node_y, axis=axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
fetch_list=[out])
|
||||
@ -95,19 +95,19 @@ def elementwise_mul(name : str, x, y, axis, in_dtype):
|
||||
|
||||
|
||||
def elementwise_min(name : str, x, y, axis, in_dtype):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = pdpd.fluid.layers.nn.elementwise_min(node_x, node_y, axis=axis)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = paddle.fluid.layers.nn.elementwise_min(node_x, node_y, axis=axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
fetch_list=[out])
|
||||
@ -117,19 +117,19 @@ def elementwise_min(name : str, x, y, axis, in_dtype):
|
||||
|
||||
|
||||
def elementwise_max(name : str, x, y, axis, in_dtype):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = pdpd.fluid.layers.nn.elementwise_max(node_x, node_y, axis=axis)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = paddle.fluid.layers.nn.elementwise_max(node_x, node_y, axis=axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
fetch_list=[out])
|
||||
@ -139,19 +139,19 @@ def elementwise_max(name : str, x, y, axis, in_dtype):
|
||||
|
||||
|
||||
def elementwise_pow(name : str, x, y, axis, in_dtype):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = pdpd.fluid.layers.nn.elementwise_pow(node_x, node_y, axis=axis)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype)
|
||||
out = paddle.fluid.layers.nn.elementwise_pow(node_x, node_y, axis=axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
fetch_list=[out])
|
@ -97,10 +97,10 @@ def embedding(name : str, ids, vocab_size, embedding_dim, padding_idx=None, spar
|
||||
ng_result = ngraph_embedding(ids, vocab_embeddings, vocab_size, embedding_dim, padding_idx, sparse)
|
||||
|
||||
ng_result = list(ng_result.values())[0]
|
||||
pdpd_result = list(outputs.values())[0]
|
||||
paddle_result = list(outputs.values())[0]
|
||||
|
||||
match = np.all(np.isclose(
|
||||
pdpd_result, ng_result, rtol=1e-4, atol=1e-5))
|
||||
paddle_result, ng_result, rtol=1e-4, atol=1e-5))
|
||||
|
||||
prefix_color = '\n\033[92m' if match else '\n\033[91m'
|
||||
print(prefix_color +
|
||||
@ -111,11 +111,11 @@ def embedding(name : str, ids, vocab_size, embedding_dim, padding_idx=None, spar
|
||||
np.set_printoptions(suppress=True)
|
||||
|
||||
print(prefix_color +
|
||||
'pdpd_result: {}'.format(pdpd_result) + '\033[0m\n')
|
||||
'paddle_result: {}'.format(paddle_result) + '\033[0m\n')
|
||||
print(prefix_color +
|
||||
'ng_result: {}'.format(ng_result) + '\033[0m\n')
|
||||
|
||||
raise ValueError(name + ': OV result does not match PDPD!')
|
||||
raise ValueError(name + ': OV result does not match paddle!')
|
||||
|
||||
return outputs
|
||||
|
@ -7,19 +7,19 @@ import sys
|
||||
|
||||
|
||||
def equal(name : str, x, y):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
node_y = pdpd.static.data(name='y', shape=y.shape, dtype='float32')
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')
|
||||
|
||||
out = pdpd.equal(node_x, node_y)
|
||||
out = pdpd.cast(out, np.float32)
|
||||
out = paddle.equal(node_x, node_y)
|
||||
out = paddle.cast(out, np.float32)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
@ -32,7 +32,7 @@ def equal(name : str, x, y):
|
||||
|
||||
|
||||
def main():
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
|
||||
data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32)
|
||||
|
@ -7,16 +7,16 @@ import sys
|
||||
|
||||
|
||||
def exp(name: str, x):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
out = pdpd.fluid.layers.exp(x=node_x)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
out = paddle.fluid.layers.exp(x=node_x)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -3,23 +3,23 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
|
||||
def expand_v2(name:str, x, shape:list):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.expand(node_x, shape=shape, name='expand_v2')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.expand(node_x, shape=shape, name='expand_v2')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -32,22 +32,22 @@ def expand_v2(name:str, x, shape:list):
|
||||
|
||||
|
||||
def expand_v2_tensor(name:str, x, out_shape, use_tensor_in_list):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
if use_tensor_in_list:
|
||||
out_shape[0] = pdpd.assign(np.array((out_shape[0],)).astype('int32'))
|
||||
out = pdpd.expand(node_x, shape=out_shape, name='expand_v2')
|
||||
out_shape[0] = paddle.assign(np.array((out_shape[0],)).astype('int32'))
|
||||
out = paddle.expand(node_x, shape=out_shape, name='expand_v2')
|
||||
else:
|
||||
out_shape = np.array(out_shape).astype('int32')
|
||||
node_shape = pdpd.assign(out_shape, output=None)
|
||||
out = pdpd.expand(node_x, shape=node_shape, name='expand_v2')
|
||||
node_shape = paddle.assign(out_shape, output=None)
|
||||
out = paddle.expand(node_x, shape=node_shape, name='expand_v2')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -3,23 +3,23 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
def fill_any_like(name:str, x, value, dtype=None):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.full_like(data, value, dtype=dtype)
|
||||
out = pdpd.cast(out, np.float32)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
data = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.full_like(data, value, dtype=dtype)
|
||||
out = paddle.cast(out, np.float32)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -0,0 +1,97 @@
|
||||
#
|
||||
# fill_const paddle model generator
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def fill_constant(name : str, shape : list, dtype, value):
|
||||
paddle.enable_static()
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
x1 = paddle.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant')
|
||||
x2 = paddle.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant')
|
||||
out = paddle.add(paddle.cast(x1, np.float32), paddle.cast(x2, np.float32))
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def fill_constant_tensor(name : str, shape : list, dtype, value):
|
||||
paddle.enable_static()
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_value = paddle.static.data(name='value', shape=[1], dtype=dtype)
|
||||
x1 = paddle.fluid.layers.fill_constant(shape=shape, value=node_value, dtype=dtype, name='fill_constant1')
|
||||
out = paddle.cast(x1, np.float32)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={"value": value},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=["value"], fetchlist=[out], inputs=[np.array([value]).astype(dtype)], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def fill_constant_shape_tensor(name : str, shape, dtype, value):
|
||||
paddle.enable_static()
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_shape = paddle.fluid.layers.fill_constant(shape=[2], value=shape, dtype='int32', name='shape')
|
||||
x1 = paddle.fluid.layers.fill_constant(shape=node_shape, value=value, dtype=dtype, name='fill_constant')
|
||||
out = paddle.cast(x1, np.float32)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def fill_constant_shape_tensor_list(name : str, shape: list, dtype, value):
|
||||
paddle.enable_static()
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_shape = paddle.fluid.layers.fill_constant(shape=[1], value=shape, dtype='int32', name='shape')
|
||||
x1 = paddle.fluid.layers.fill_constant(shape=[2, node_shape], value=value, dtype=dtype, name='fill_constant')
|
||||
out = paddle.cast(x1, np.float32)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def main():
|
||||
fill_constant("fill_constant", [2, 3, 4], 'float32', 0.03)
|
||||
fill_constant("fill_constant_int32", [2, 3, 4], "int32", 2)
|
||||
fill_constant("fill_constant_int64", [2, 3, 4], "int64", 4)
|
||||
fill_constant_tensor("fill_constant_tensor", [2, 3, 4], 'float32', 0.05)
|
||||
fill_constant_shape_tensor("fill_constant_shape_tensor", 2, 'float32', 0.05)
|
||||
fill_constant_shape_tensor_list("fill_constant_shape_tensor_list", 2, 'float32', 0.05)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -3,24 +3,24 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
def fill_constant_batch_size_like(name : str, x, shape, dtype, value, input_dim_idx=0, output_dim_idx=0):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
like = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.fluid.layers.fill_constant_batch_size_like(input=like, shape=shape, \
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
like = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.fluid.layers.fill_constant_batch_size_like(input=like, shape=shape, \
|
||||
value=value, dtype=dtype, \
|
||||
output_dim_idx=output_dim_idx, input_dim_idx=input_dim_idx)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -6,18 +6,18 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
def generate_flatten_contiguous_range(name : str, x, start_axis, stop_axis, in_dtype):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
out = pdpd.flatten(node_x, start_axis, stop_axis)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
out = paddle.flatten(node_x, start_axis, stop_axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
@ -3,22 +3,22 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
def gelu(name:str, x, approximate=False):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.fluid.layers.gelu(data, approximate=approximate)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
data = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.fluid.layers.gelu(data, approximate=approximate)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -3,27 +3,27 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def greater_equal(name : str, x, y, data_type, cast_to_fp32=False):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='input_x', shape=x.shape, dtype=data_type)
|
||||
node_y = pdpd.static.data(name='input_y', shape=y.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.greater_equal(x=node_x, y=node_y, name='greater_equal')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='input_x', shape=x.shape, dtype=data_type)
|
||||
node_y = paddle.static.data(name='input_y', shape=y.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.greater_equal(x=node_x, y=node_y, name='greater_equal')
|
||||
# FuzzyTest framework doesn't support boolean so cast to fp32/int32
|
||||
|
||||
if cast_to_fp32:
|
||||
data_type = "float32"
|
||||
|
||||
out = pdpd.cast(out, data_type)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.cast(out, data_type)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'input_x': x, 'input_y': y},
|
@ -3,21 +3,21 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def hard_sigmoid(name: str, x, slope: float = 0.2, offset: float = 0.5, data_type='float32'):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.fluid.layers.hard_sigmoid(node_x, slope=slope, offset=offset, name='hard_sigmoid')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.fluid.layers.hard_sigmoid(node_x, slope=slope, offset=offset, name='hard_sigmoid')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -3,21 +3,21 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def hard_swish(name: str, x, threshold=6.0, scale=6.0, offset=3.0, data_type='float32'):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.hard_swish(node_x, threshold=threshold, scale=scale, offset=offset, name='hard_swish')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.hard_swish(node_x, threshold=threshold, scale=scale, offset=offset, name='hard_swish')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -1,38 +1,38 @@
|
||||
import numpy as np
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
from paddle.nn.functional import interpolate
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
|
||||
def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog):
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(start_prog)
|
||||
outs = exe.run(
|
||||
feed={'x': input_x},
|
||||
fetch_list=fetch_list,
|
||||
program=main_prog)
|
||||
|
||||
with pdpd.static.program_guard(main_prog, start_prog):
|
||||
with paddle.static.program_guard(main_prog, start_prog):
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x],
|
||||
outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs
|
||||
|
||||
|
||||
def pdpd_interpolate(x, sizes=None, scale_factor=None, mode='nearest', align_corners=True,
|
||||
def paddle_interpolate(x, sizes=None, scale_factor=None, mode='nearest', align_corners=True,
|
||||
align_mode=0, data_format='NCHW', name=None):
|
||||
pdpd.enable_static()
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
paddle.enable_static()
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
interp = interpolate(node_x, size=sizes, scale_factor=scale_factor,
|
||||
mode=mode, align_corners=align_corners, align_mode=align_mode,
|
||||
data_format=data_format, name=name)
|
||||
out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
outs = run_and_save_model(x, name, node_x, out, main_program, startup_program)
|
||||
return outs[0]
|
||||
|
||||
@ -50,7 +50,7 @@ def resize_upsample_bilinear():
|
||||
{'name': 'bilinear_upsample_true_0', 'align_corners': True, 'align_mode': 0}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, [64, 64], None, mode='bilinear', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, [64, 64], None, mode='bilinear', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCHW', name=test['name'])
|
||||
|
||||
|
||||
@ -67,7 +67,7 @@ def resize_downsample_bilinear():
|
||||
{'name': 'bilinear_downsample_true_0', 'align_corners': True, 'align_mode': 0}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data_28, [2, 4], None, mode='bilinear', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data_28, [2, 4], None, mode='bilinear', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCHW', name=test['name'])
|
||||
|
||||
def resize_upsample_nearest():
|
||||
@ -84,7 +84,7 @@ def resize_upsample_nearest():
|
||||
]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, test['size'], None, mode='nearest', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, test['size'], None, mode='nearest', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCHW', name=test['name'])
|
||||
|
||||
|
||||
@ -97,7 +97,7 @@ def resize_downsample_nearest():
|
||||
]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data_64, test['size'], None, mode='nearest', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data_64, test['size'], None, mode='nearest', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCHW', name=test['name'])
|
||||
|
||||
|
||||
@ -109,20 +109,20 @@ def nearest_upsample_tensor_size():
|
||||
[13, 14, 15, 16]
|
||||
]]], dtype=np.float32)
|
||||
sizes = np.array([8, 8], dtype=np.int32)
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
test_case = [{'name': 'nearest_upsample_tensor_size', 'align_corners': False, 'align_mode': 0}]
|
||||
for test in test_case:
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
interp = interpolate(node_x, size=node_sizes, scale_factor=None,
|
||||
mode='nearest', align_corners=test['align_corners'], align_mode=test['align_mode'],
|
||||
data_format='NCHW', name=test['name'])
|
||||
out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(startup_program)
|
||||
outs = exe.run(
|
||||
feed={'x': data, 'sizes': sizes},
|
||||
@ -143,17 +143,17 @@ def bilinear_upsample_tensor_size():
|
||||
test_case = [{'name': 'bilinear_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}]
|
||||
|
||||
for test in test_case:
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
interp = interpolate(node_x, size=node_sizes, scale_factor=None,
|
||||
mode='bilinear', align_corners=test['align_corners'], align_mode=test['align_mode'],
|
||||
data_format='NCHW', name=test['name'])
|
||||
out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(startup_program)
|
||||
outs = exe.run(
|
||||
feed={'x': data, 'sizes': sizes},
|
||||
@ -174,7 +174,7 @@ def bilinear_upsample_scales():
|
||||
{'name': 'bilinear_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [2, 2]}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, None, 2, mode='bilinear', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, None, 2, mode='bilinear', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCHW', name=test['name'])
|
||||
|
||||
# trilinear
|
||||
@ -196,7 +196,7 @@ def resize_upsample_trilinear():
|
||||
{'name': 'trilinear_upsample_true_0', 'align_corners': True, 'align_mode': 0}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, [4, 64, 64], None, mode='TRILINEAR', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, [4, 64, 64], None, mode='TRILINEAR', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCDHW', name=test['name'])
|
||||
|
||||
|
||||
@ -218,7 +218,7 @@ def resize_downsample_trilinear():
|
||||
{'name': 'trilinear_downsample_true_0', 'align_corners': True, 'align_mode': 0}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data_28, [2, 2, 4], None, mode='TRILINEAR', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data_28, [2, 2, 4], None, mode='TRILINEAR', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCDHW', name=test['name'])
|
||||
|
||||
def trilinear_upsample_tensor_size():
|
||||
@ -233,17 +233,17 @@ def trilinear_upsample_tensor_size():
|
||||
test_case = [{'name': 'trilinear_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}]
|
||||
|
||||
for test in test_case:
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
interp = interpolate(node_x, size=node_sizes, scale_factor=None,
|
||||
mode='TRILINEAR', align_corners=test['align_corners'], align_mode=test['align_mode'],
|
||||
data_format='NCDHW', name=test['name'])
|
||||
out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(startup_program)
|
||||
outs = exe.run(
|
||||
feed={'x': data, 'sizes': sizes},
|
||||
@ -263,7 +263,7 @@ def trilinear_upsample_scales():
|
||||
{'name': 'trilinear_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [1, 2, 2]}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, None, 3, mode='TRILINEAR', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, None, 3, mode='TRILINEAR', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCDHW', name=test['name'])
|
||||
|
||||
|
||||
@ -280,7 +280,7 @@ def resize_upsample_bicubic():
|
||||
{'name': 'bicubic_upsample_true_0', 'align_corners': True, 'align_mode': 0}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, [6, 6], None, mode='bicubic', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, [6, 6], None, mode='bicubic', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCHW', name=test['name'])
|
||||
|
||||
|
||||
@ -297,7 +297,7 @@ def resize_downsample_bicubic():
|
||||
{'name': 'bicubic_downsample_true_0', 'align_corners': True, 'align_mode': 0}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data_28, [2, 4], None, mode='bicubic', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data_28, [2, 4], None, mode='bicubic', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCHW', name=test['name'])
|
||||
|
||||
def bicubic_upsample_tensor_size():
|
||||
@ -312,17 +312,17 @@ def bicubic_upsample_tensor_size():
|
||||
test_case = [{'name': 'bicubic_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}]
|
||||
|
||||
for test in test_case:
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
interp = interpolate(node_x, size=node_sizes, scale_factor=None,
|
||||
mode='bicubic', align_corners=test['align_corners'], align_mode=test['align_mode'],
|
||||
data_format='NCHW', name=test['name'])
|
||||
out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(startup_program)
|
||||
outs = exe.run(
|
||||
feed={'x': data, 'sizes': sizes},
|
||||
@ -342,7 +342,7 @@ def bicubic_upsample_scales():
|
||||
{'name': 'bicubic_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [2, 2]}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, None, 2, mode='bicubic', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, None, 2, mode='bicubic', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCHW', name=test['name'])
|
||||
|
||||
# linear
|
||||
@ -356,7 +356,7 @@ def resize_upsample_linear():
|
||||
{'name': 'linear_upsample_true_0', 'align_corners': True, 'align_mode': 0}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, [6,], None, mode='linear', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, [6,], None, mode='linear', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCW', name=test['name'])
|
||||
|
||||
|
||||
@ -371,7 +371,7 @@ def resize_downsample_linear():
|
||||
{'name': 'linear_downsample_true_0', 'align_corners': True, 'align_mode': 0}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data_28, [4,], None, mode='linear', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data_28, [4,], None, mode='linear', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCW', name=test['name'])
|
||||
|
||||
def linear_upsample_tensor_size():
|
||||
@ -383,17 +383,17 @@ def linear_upsample_tensor_size():
|
||||
test_case = [{'name': 'linear_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}]
|
||||
|
||||
for test in test_case:
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32')
|
||||
node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32')
|
||||
interp = interpolate(node_x, size=node_sizes, scale_factor=None,
|
||||
mode='linear', align_corners=test['align_corners'], align_mode=test['align_mode'],
|
||||
data_format='NCW', name=test['name'])
|
||||
out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(startup_program)
|
||||
outs = exe.run(
|
||||
feed={'x': data, 'sizes': sizes},
|
||||
@ -410,7 +410,7 @@ def linear_upsample_scales():
|
||||
{'name': 'linear_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [2, 2]}]
|
||||
|
||||
for test in test_case:
|
||||
pdpd_result = pdpd_interpolate(data, None, 2, mode='linear', align_corners=test['align_corners'],
|
||||
paddle_result = paddle_interpolate(data, None, 2, mode='linear', align_corners=test['align_corners'],
|
||||
align_mode=test['align_mode'], data_format='NCW', name=test['name'])
|
||||
|
||||
if __name__ == "__main__":
|
@ -4,23 +4,23 @@
|
||||
import numpy as np
|
||||
from paddle.fluid import param_attr
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
def layer_norm(name:str, x, begin_norm_axis, scale=True, shift=True, param_attr=None, bias_attr=None):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.static.nn.layer_norm(input=data, scale=scale, shift=shift,\
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
data = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.static.nn.layer_norm(input=data, scale=scale, shift=shift,\
|
||||
begin_norm_axis=begin_norm_axis, param_attr=param_attr, bias_attr=bias_attr)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -33,8 +33,8 @@ def layer_norm(name:str, x, begin_norm_axis, scale=True, shift=True, param_attr=
|
||||
def main():
|
||||
x = np.random.rand(8, 24, 32).astype(data_type)
|
||||
random_data = np.random.rand(24 * 32).astype(data_type)
|
||||
attr = pdpd.ParamAttr(
|
||||
initializer=pdpd.fluid.initializer.NumpyArrayInitializer(random_data))
|
||||
attr = paddle.ParamAttr(
|
||||
initializer=paddle.fluid.initializer.NumpyArrayInitializer(random_data))
|
||||
layer_norm("layer_norm", x, begin_norm_axis=1, param_attr=attr, bias_attr=attr)
|
||||
layer_norm("layer_norm_noscale", x, scale=False, begin_norm_axis=2)
|
||||
layer_norm("layer_norm_noshift", x, shift=False, begin_norm_axis=1)
|
@ -3,21 +3,21 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def leaky_relu(name: str, x, alpha: float = 0.02, data_type='float32'):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.fluid.layers.leaky_relu(node_x, alpha=alpha, name='leaky_relu')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.fluid.layers.leaky_relu(node_x, alpha=alpha, name='leaky_relu')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -3,21 +3,21 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def log(name: str, x, data_type='float32'):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.log(node_x, name='log')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.log(node_x, name='log')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -7,20 +7,20 @@ import sys
|
||||
|
||||
|
||||
def equal_logical_not(name : str, x, y):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
node_y = pdpd.static.data(name='y', shape=y.shape, dtype='float32')
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')
|
||||
|
||||
out = pdpd.equal(node_x, node_y)
|
||||
out = pdpd.logical_not(out)
|
||||
out = pdpd.cast(out, np.float32)
|
||||
out = paddle.equal(node_x, node_y)
|
||||
out = paddle.logical_not(out)
|
||||
out = paddle.cast(out, np.float32)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
@ -33,7 +33,7 @@ def equal_logical_not(name : str, x, y):
|
||||
|
||||
|
||||
def main():
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
|
||||
data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32)
|
||||
|
@ -3,19 +3,19 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
def matmul(name, x1, x2, x_transpose=False, y_transpose=False):
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
|
||||
pdpd.enable_static()
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype)
|
||||
node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype)
|
||||
result = pdpd.matmul(node_x1, node_x2, x_transpose, y_transpose)
|
||||
#result = pdpd.static.nn.batch_norm(mul_node, use_global_stats=True)
|
||||
paddle.enable_static()
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x1 = paddle.static.data(name='x1', shape=x1.shape, dtype=x1.dtype)
|
||||
node_x2 = paddle.static.data(name='x2', shape=x2.shape, dtype=x2.dtype)
|
||||
result = paddle.matmul(node_x1, node_x2, x_transpose, y_transpose)
|
||||
#result = paddle.static.nn.batch_norm(mul_node, use_global_stats=True)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x1': x1, 'x2': x2},
|
@ -2,20 +2,20 @@ import numpy as np
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
|
||||
def pdpd_matmul(name, x1, x2, x_transpose=False, y_transpose=False):
|
||||
import paddle as pdpd
|
||||
def paddle_matmul(name, x1, x2, x_transpose=False, y_transpose=False):
|
||||
import paddle
|
||||
|
||||
pdpd.enable_static()
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype)
|
||||
node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype)
|
||||
mul_node = pdpd.fluid.layers.matmul(node_x1, node_x2, x_transpose, y_transpose)
|
||||
result = pdpd.static.nn.batch_norm(mul_node, use_global_stats=True)
|
||||
paddle.enable_static()
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x1 = paddle.static.data(name='x1', shape=x1.shape, dtype=x1.dtype)
|
||||
node_x2 = paddle.static.data(name='x2', shape=x2.shape, dtype=x2.dtype)
|
||||
mul_node = paddle.fluid.layers.matmul(node_x1, node_x2, x_transpose, y_transpose)
|
||||
result = paddle.static.nn.batch_norm(mul_node, use_global_stats=True)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x1': x1, 'x2': x2},
|
||||
@ -44,6 +44,6 @@ if __name__ == "__main__":
|
||||
input_2x3 = np.array([[1, 2, 3],
|
||||
[4, 5, 6]]).astype(np.float32)
|
||||
|
||||
pdpd_matmul("matmul_xt", input_2x5, input_2x3, x_transpose=True, y_transpose=False)
|
||||
pdpd_matmul("matmul_yt", input_2x3, input_5x3, x_transpose=False, y_transpose=True)
|
||||
pdpd_matmul("matmul_xt_yt", input_2x5, input_5x2, x_transpose=True, y_transpose=True)
|
||||
paddle_matmul("matmul_xt", input_2x5, input_2x3, x_transpose=True, y_transpose=False)
|
||||
paddle_matmul("matmul_yt", input_2x3, input_5x3, x_transpose=False, y_transpose=True)
|
||||
paddle_matmul("matmul_xt_yt", input_2x5, input_5x2, x_transpose=True, y_transpose=True)
|
@ -12,7 +12,7 @@ import os
|
||||
def create_multi_output_model():
|
||||
paddle.enable_static()
|
||||
|
||||
# PDPD model creation and inference
|
||||
# paddle model creation and inference
|
||||
num_splits = 20
|
||||
inp_blob_1 = np.random.randn(2, num_splits, 4, 4).astype(np.float32)
|
||||
|
||||
@ -27,7 +27,7 @@ def create_multi_output_model():
|
||||
exe = fluid.Executor(fluid.CPUPlace())
|
||||
exe.run(fluid.default_startup_program())
|
||||
inp_dict = {'x': inp_blob_1}
|
||||
res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict)
|
||||
|
||||
fluid.io.save_inference_model(os.path.join(sys.argv[1], "multi_tensor_split"),
|
||||
list(inp_dict.keys()), var, exe,
|
@ -6,24 +6,24 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
def pad3d(name : str, x, in_dtype, pad, data_format, mode, value = 0):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype)
|
||||
|
||||
if mode == 'constant':
|
||||
pad_constant = pdpd.nn.Pad3D(padding=pad, mode=mode, value=value, data_format=data_format)
|
||||
pad_constant = paddle.nn.Pad3D(padding=pad, mode=mode, value=value, data_format=data_format)
|
||||
out = pad_constant(node_x)
|
||||
else:
|
||||
pad_other_mode = pdpd.nn.Pad3D(padding=pad, mode=mode, data_format=data_format)
|
||||
pad_other_mode = paddle.nn.Pad3D(padding=pad, mode=mode, data_format=data_format)
|
||||
out = pad_other_mode(node_x)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
@ -0,0 +1,53 @@
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def paddle_rnn_lstm(input_size, hidden_size, layers, direction):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
|
||||
num_of_directions = 1 if direction == 'forward' else 2
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
|
||||
rnn = paddle.nn.LSTM(input_size, hidden_size, layers, direction, name="lstm")
|
||||
|
||||
data = paddle.static.data(name='x', shape=[4, 3, input_size], dtype='float32')
|
||||
prev_h = paddle.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_1")
|
||||
prev_c = paddle.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_2")
|
||||
|
||||
y, (h, c) = rnn(data, (prev_h, prev_c))
|
||||
relu_1 = paddle.nn.functional.relu(c, name="relu_1")
|
||||
relu_2 = paddle.nn.functional.relu(c, name="relu_2")
|
||||
relu_3 = paddle.nn.functional.relu(c, name="relu_3")
|
||||
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(startup_program)
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': np.ones([4, 3, input_size]).astype(np.float32)},
|
||||
fetch_list=[y, h, c],
|
||||
program=main_program)
|
||||
saveModel("place_test_model", exe, feedkeys=['x'],
|
||||
fetchlist=[y, h, c, relu_1, relu_2, relu_3],
|
||||
inputs=[np.ones([4, 3, input_size]).astype(np.float32)],
|
||||
outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1])
|
||||
return outs[0]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
testCases = [
|
||||
{
|
||||
'input_size': 2,
|
||||
'hidden_size': 2,
|
||||
'layers': 1,
|
||||
'direction': 'forward',
|
||||
},
|
||||
]
|
||||
|
||||
for test in testCases:
|
||||
paddle_rnn_lstm(test['input_size'], test['hidden_size'], test['layers'], test['direction'])
|
@ -8,12 +8,12 @@ from save_model import saveModel
|
||||
data_type = 'float32'
|
||||
|
||||
def pool2d(name : str, x, attrs : dict):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.pool2d(node_x,
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.pool2d(node_x,
|
||||
pool_size=attrs['pool_size'],
|
||||
pool_type=attrs['pool_type'],
|
||||
pool_stride=attrs['pool_stride'],
|
||||
@ -23,10 +23,10 @@ def pool2d(name : str, x, attrs : dict):
|
||||
exclusive=attrs['exclusive'],
|
||||
data_format=attrs['data_format'])
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -37,21 +37,21 @@ def pool2d(name : str, x, attrs : dict):
|
||||
return outs[0]
|
||||
|
||||
def adaptive_pool2d(name : str, x, attrs : dict):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.adaptive_pool2d(
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.adaptive_pool2d(
|
||||
input=node_x,
|
||||
pool_size=attrs['pool_size'],
|
||||
pool_type=attrs['pool_type'],
|
||||
require_index=attrs['require_index'])
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -74,7 +74,7 @@ def main():
|
||||
for i, pooling_type in enumerate(pooling_types):
|
||||
# example 1:
|
||||
# ceil_mode = False
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
# input=data_NCHW, # shape: [2, 3, 8, 8]
|
||||
'pool_size' : [3,3],
|
||||
'pool_type' : pooling_type,
|
||||
@ -86,12 +86,12 @@ def main():
|
||||
'data_format' : "NCHW"
|
||||
}
|
||||
# shape of out_1: [2, 3, 4, 3]
|
||||
pool2d(pooling_type+'Pool_test1', data_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test1', data_NCHW, paddle_attrs)
|
||||
|
||||
# Cecilia: there is a bug of PaddlePaddle in this case.
|
||||
# example 2:
|
||||
# ceil_mode = True (different from example 1)
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
#input=data_NCHW,
|
||||
'pool_size':[3,3],
|
||||
'pool_type' : pooling_type,
|
||||
@ -103,11 +103,11 @@ def main():
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
# shape of out_2: [2, 3, 4, 4] which is different from out_1
|
||||
pool2d(pooling_type+'Pool_test2', data_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test2', data_NCHW, paddle_attrs)
|
||||
|
||||
# example 3:
|
||||
# pool_padding = "SAME" (different from example 1)
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
#input=data_NCHW,
|
||||
'pool_size':[3,3],
|
||||
'pool_type' : pooling_type,
|
||||
@ -119,11 +119,11 @@ def main():
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
# shape of out_3: [2, 3, 3, 3] which is different from out_1
|
||||
pool2d(pooling_type+'Pool_test3', data_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test3', data_NCHW, paddle_attrs)
|
||||
|
||||
# example 4:
|
||||
# pool_padding = "VALID" (different from example 1)
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
#input=data_NCHW,
|
||||
'pool_size':[3,3],
|
||||
'pool_type' : pooling_type,
|
||||
@ -135,12 +135,12 @@ def main():
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
# shape of out_4: [2, 3, 2, 2] which is different from out_1
|
||||
pool2d(pooling_type+'Pool_test4', data_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test4', data_NCHW, paddle_attrs)
|
||||
|
||||
# example 5:
|
||||
# global_pooling = True (different from example 1)
|
||||
# It will be set pool_size = [8,8] and pool_padding = [0,0] actually.
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
#input=data_NCHW,
|
||||
'pool_size':[3,3],
|
||||
'pool_type' : pooling_type,
|
||||
@ -152,11 +152,11 @@ def main():
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
# shape of out_5: [2, 3, 1, 1] which is different from out_1
|
||||
pool2d(pooling_type+'Pool_test5', data_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test5', data_NCHW, paddle_attrs)
|
||||
|
||||
# example 6:
|
||||
# data_format = "NHWC" (different from example 1)
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
#input=data_NHWC, # shape: [2, 8, 8, 3]
|
||||
'pool_size':[3,3],
|
||||
'pool_type' : pooling_type,
|
||||
@ -168,11 +168,11 @@ def main():
|
||||
'data_format':"NHWC"
|
||||
}
|
||||
# shape of out_6: [2, 4, 3, 3] which is different from out_1
|
||||
pool2d(pooling_type+'Pool_test6', data_NHWC, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test6', data_NHWC, paddle_attrs)
|
||||
|
||||
# example 7:
|
||||
# pool_size is [9, 9]
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
#input=data_NCHW,
|
||||
'pool_size':[9,9],
|
||||
'pool_type' : pooling_type,
|
||||
@ -183,11 +183,11 @@ def main():
|
||||
'exclusive':True,
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
pool2d(pooling_type+'Pool_test7', data_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test7', data_NCHW, paddle_attrs)
|
||||
|
||||
# example 8:
|
||||
# pool_padding size is 1
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
'pool_size':[3,3],
|
||||
'pool_type' : pooling_type,
|
||||
'pool_stride' : [3,3],
|
||||
@ -197,7 +197,7 @@ def main():
|
||||
'exclusive':True,
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
pool2d(pooling_type+'Pool_test8', data_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test8', data_NCHW, paddle_attrs)
|
||||
|
||||
#input data for test9 and test10
|
||||
N_data1, C_data1, H_data1, W_data1 = 2, 3, 8, 8
|
||||
@ -205,7 +205,7 @@ def main():
|
||||
data1_NCHW = data1.reshape(N_data1, C_data1, H_data1, W_data1)
|
||||
# example 9:
|
||||
# pool_padding size is 4: [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
'pool_size':[3,3],
|
||||
'pool_type' : pooling_type,
|
||||
'pool_stride' : [3,3],
|
||||
@ -215,11 +215,11 @@ def main():
|
||||
'exclusive':True,
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
pool2d(pooling_type+'Pool_test9', data1_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test9', data1_NCHW, paddle_attrs)
|
||||
|
||||
# example 10:
|
||||
# input=data_NCHW and pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
'pool_size':[3,3],
|
||||
'pool_type' : pooling_type,
|
||||
'pool_stride' : [3,3],
|
||||
@ -229,11 +229,11 @@ def main():
|
||||
'exclusive':True,
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
pool2d(pooling_type+'Pool_test10', data1_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test10', data1_NCHW, paddle_attrs)
|
||||
|
||||
# example 11:
|
||||
# input=data_NCHW and poolsize is the multiply by width & height. pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
'pool_size': 9,
|
||||
'pool_type' : pooling_type,
|
||||
'pool_stride' : [3,3],
|
||||
@ -243,17 +243,17 @@ def main():
|
||||
'exclusive':True,
|
||||
'data_format':"NCHW"
|
||||
}
|
||||
pool2d(pooling_type+'Pool_test11', data1_NCHW, pdpd_attrs)
|
||||
pool2d(pooling_type+'Pool_test11', data1_NCHW, paddle_attrs)
|
||||
|
||||
|
||||
# adaptive_pool2d
|
||||
for i, pooling_type in enumerate(pooling_types):
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
'pool_size': [3,3],
|
||||
'pool_type': pooling_type,
|
||||
'require_index': False
|
||||
}
|
||||
adaptive_pool2d(pooling_type+'AdaptivePool2D_test1', data_NCHW, pdpd_attrs)
|
||||
adaptive_pool2d(pooling_type+'AdaptivePool2D_test1', data_NCHW, paddle_attrs)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@ -3,24 +3,24 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def pdpd_pow(name : str, x, y, data_type):
|
||||
pdpd.enable_static()
|
||||
def paddle_pow(name : str, x, y, data_type):
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.pow(node_x, y, name='pow')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.pow(node_x, y, name='pow')
|
||||
#FuzzyTest supports int32 & float32
|
||||
if data_type == "int64":
|
||||
out = pdpd.cast(out, "float32")
|
||||
out = pdpd.cast(out, "float32")
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.cast(out, "float32")
|
||||
out = paddle.cast(out, "float32")
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -31,19 +31,19 @@ def pdpd_pow(name : str, x, y, data_type):
|
||||
return outs[0]
|
||||
|
||||
|
||||
def pdpd_pow_tensor(name : str, x, y, data_type):
|
||||
pdpd.enable_static()
|
||||
def paddle_pow_tensor(name : str, x, y, data_type):
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
node_y = pdpd.static.data(name='y', shape=y.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.pow(node_x, node_y, name='pow')
|
||||
out = pdpd.cast(out, "float32")
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
node_y = paddle.static.data(name='y', shape=y.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.pow(node_x, node_y, name='pow')
|
||||
out = paddle.cast(out, "float32")
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'y': y},
|
||||
@ -84,11 +84,11 @@ def main():
|
||||
]
|
||||
|
||||
for test in test_cases:
|
||||
pdpd_pow("pow_" + test['name'], test['x'], test['y'], test['dtype'])
|
||||
paddle_pow("pow_" + test['name'], test['x'], test['y'], test['dtype'])
|
||||
|
||||
x = np.array([0, 1, 2, -10]).astype("float32")
|
||||
y = np.array([2.0]).astype("float32")
|
||||
pdpd_pow_tensor("pow_y_tensor", x, y, 'float32')
|
||||
paddle_pow_tensor("pow_y_tensor", x, y, 'float32')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@ -7,16 +7,16 @@ import sys
|
||||
|
||||
|
||||
def prior_box(name: str, input_data, image_data, attrs: dict):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
Input = pdpd.static.data(
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
Input = paddle.static.data(
|
||||
name='Input', shape=input_data.shape, dtype=input_data.dtype)
|
||||
Image = pdpd.static.data(
|
||||
Image = paddle.static.data(
|
||||
name='Image', shape=image_data.shape, dtype=image_data.dtype)
|
||||
|
||||
box, var = pdpd.fluid.layers.prior_box(Input,
|
||||
box, var = paddle.fluid.layers.prior_box(Input,
|
||||
Image,
|
||||
min_sizes=attrs['min_sizes'],
|
||||
max_sizes=attrs['max_sizes'],
|
||||
@ -29,10 +29,10 @@ def prior_box(name: str, input_data, image_data, attrs: dict):
|
||||
name=None,
|
||||
min_max_aspect_ratios_order=attrs['min_max_aspect_ratios_order'])
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'Input': input_data, 'Image': image_data},
|
||||
@ -121,4 +121,4 @@ if __name__ == "__main__":
|
||||
image_h)).astype('float32')
|
||||
|
||||
for item in prior_box_attrs_list:
|
||||
pred_pdpd = prior_box(item['name'], input_data, image_data, item)
|
||||
pred_paddle = prior_box(item['name'], input_data, image_data, item)
|
@ -6,21 +6,21 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def pdpd_range(name : str, x, start, end, step, out_type):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
def paddle_range(name : str, x, start, end, step, out_type):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
# Range op only support fill_constant input, since dynamic op is not supported in ov
|
||||
out = pdpd.fluid.layers.range(start, end, step, out_type)
|
||||
out = pdpd.cast(out, np.float32)
|
||||
out = pdpd.add(node_x, out)
|
||||
#out = pdpd.cast(out, np.float32)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.fluid.layers.range(start, end, step, out_type)
|
||||
out = paddle.cast(out, np.float32)
|
||||
out = paddle.add(node_x, out)
|
||||
#out = paddle.cast(out, np.float32)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -38,7 +38,7 @@ def main():
|
||||
data = np.random.random([1, 5]).astype("float32")
|
||||
out_type = ["float32", "int32", "int64"]
|
||||
for i, dtype in enumerate(out_type):
|
||||
pdpd_range("range"+str(i), data, start, end, step, dtype)
|
||||
paddle_range("range"+str(i), data, start, end, step, dtype)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@ -7,16 +7,16 @@ import sys
|
||||
|
||||
|
||||
def relu(name: str, x):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.nn.functional.relu(node_x)
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.nn.functional.relu(node_x)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -3,21 +3,21 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def relu6(name: str, x, threshold: float = 6.0, data_type='float32'):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.relu6(node_x, threshold=threshold, name='relu6')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.relu6(node_x, threshold=threshold, name='relu6')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -9,17 +9,17 @@ data_type = 'float32'
|
||||
|
||||
|
||||
def reshape(name : str, x, out_shape):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.reshape(x=node_x, shape=out_shape)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -32,24 +32,24 @@ def reshape(name : str, x, out_shape):
|
||||
|
||||
|
||||
def reshape_tensor(name : str, x, out_shape, use_tensor_in_list):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
if use_tensor_in_list:
|
||||
out_shape[0] = pdpd.assign(np.array((out_shape[0],)).astype('int32'))
|
||||
out = pdpd.fluid.layers.reshape(x=node_x, shape=out_shape)
|
||||
out_shape[0] = paddle.assign(np.array((out_shape[0],)).astype('int32'))
|
||||
out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape)
|
||||
else:
|
||||
out_shape = np.array(out_shape).astype('int32')
|
||||
node_shape = pdpd.assign(out_shape)
|
||||
out = pdpd.fluid.layers.reshape(x=node_x, shape=node_shape)
|
||||
node_shape = paddle.assign(out_shape)
|
||||
out = paddle.fluid.layers.reshape(x=node_x, shape=node_shape)
|
||||
|
||||
out = pdpd.pow(out, 1)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
out = paddle.pow(out, 1)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -3,32 +3,32 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def pdpd_rnn_lstm(input_size, hidden_size, layers, direction, seq_len):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
def paddle_rnn_lstm(input_size, hidden_size, layers, direction, seq_len):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
main_program = paddle.static.Program()
|
||||
startup_program = paddle.static.Program()
|
||||
|
||||
num_of_directions = 1 if direction == 'forward' else 2
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
with paddle.static.program_guard(main_program, startup_program):
|
||||
|
||||
rnn = pdpd.nn.LSTM(input_size, hidden_size, layers, direction)
|
||||
rnn = paddle.nn.LSTM(input_size, hidden_size, layers, direction)
|
||||
|
||||
data = pdpd.static.data(
|
||||
data = paddle.static.data(
|
||||
name='x', shape=[4, 3, input_size], dtype='float32')
|
||||
prev_h = pdpd.ones(
|
||||
prev_h = paddle.ones(
|
||||
shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32)
|
||||
prev_c = pdpd.ones(
|
||||
prev_c = paddle.ones(
|
||||
shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32)
|
||||
|
||||
if seq_len:
|
||||
seq_lengths = pdpd.static.data(name='sl', shape=[4], dtype='int32')
|
||||
seq_lengths = paddle.static.data(name='sl', shape=[4], dtype='int32')
|
||||
y, (h, c) = rnn(data, (prev_h, prev_c), seq_lengths)
|
||||
else:
|
||||
y, (h, c) = rnn(data, (prev_h, prev_c))
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
exe.run(startup_program)
|
||||
|
||||
if seq_len:
|
||||
@ -99,5 +99,5 @@ if __name__ == "__main__":
|
||||
]
|
||||
|
||||
for test in testCases:
|
||||
pdpd_rnn_lstm(test['input_size'], test['hidden_size'],
|
||||
paddle_rnn_lstm(test['input_size'], test['hidden_size'],
|
||||
test['layers'], test['direction'], test['seq_len'])
|
@ -0,0 +1,93 @@
|
||||
#
|
||||
# pool2d paddle model generator
|
||||
#
|
||||
import numpy as np
|
||||
import sys
|
||||
from save_model import saveModel
|
||||
|
||||
|
||||
def paddle_scale(name : str, x, scale, bias, attrs : dict, data_type):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.scale(x=node_x, scale=scale, bias=bias,
|
||||
bias_after_scale=attrs['bias_after_scale'])
|
||||
#FuzzyTest only support FP32 now, so cast result to fp32
|
||||
out = paddle.cast(out, "float32")
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def paddle_scale_tensor(name : str, x, scale, bias, attrs : dict, data_type):
|
||||
import paddle as paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
node_scale = paddle.static.data(name='scale', shape=[1], dtype='float32')
|
||||
out = paddle.scale(x=node_x, scale=node_scale, bias=bias,
|
||||
bias_after_scale=attrs['bias_after_scale'])
|
||||
#FuzzyTest only support FP32 now, so cast result to fp32
|
||||
out = paddle.cast(out, "float32")
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'scale': scale},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x', 'scale'], fetchlist=[out], inputs=[x, np.array([scale]).astype('float32')], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
def main():
|
||||
scale = 2.0
|
||||
bias = 1.0
|
||||
data = np.random.random([2, 3]).astype("float32")
|
||||
|
||||
test_cases = [
|
||||
"float32",
|
||||
"int32",
|
||||
"int64"
|
||||
]
|
||||
|
||||
paddle_attrs = {
|
||||
'bias_after_scale': True,
|
||||
}
|
||||
paddle_scale_tensor("scale_tensor_bias_after", data, scale, bias, paddle_attrs, 'float32')
|
||||
|
||||
paddle_attrs = {
|
||||
'bias_after_scale': False,
|
||||
}
|
||||
paddle_scale_tensor("scale_tensor_bias_before", data, scale, bias, paddle_attrs, 'float32')
|
||||
|
||||
for test in test_cases:
|
||||
data = np.random.random([2, 3]).astype(test)
|
||||
paddle_attrs = {
|
||||
'bias_after_scale': True,
|
||||
}
|
||||
paddle_scale("scale_bias_after_" + test, data, scale, bias, paddle_attrs, test)
|
||||
|
||||
paddle_attrs = {
|
||||
'bias_after_scale': False,
|
||||
}
|
||||
paddle_scale("scale_bias_before_" + test, data, scale, bias, paddle_attrs, test)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -6,18 +6,18 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def pdpd_shape(name : str, x):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
def paddle_shape(name : str, x):
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.shape(node_x)
|
||||
out = pdpd.cast(out, np.float32)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.shape(node_x)
|
||||
out = paddle.cast(out, np.float32)
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -31,7 +31,7 @@ def pdpd_shape(name : str, x):
|
||||
def main():
|
||||
|
||||
data = np.random.random(size=(2, 3)).astype('float32')
|
||||
pdpd_shape("shape", data)
|
||||
paddle_shape("shape", data)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@ -3,21 +3,21 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
|
||||
def sigmoid(name: str, x, data_type):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.fluid.layers.sigmoid(node_x, name='sigmoid')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = paddle.fluid.layers.sigmoid(node_x, name='sigmoid')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -5,7 +5,7 @@ import sys
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
|
||||
from save_model import exportModel
|
||||
from save_model import saveModel
|
||||
@ -13,16 +13,16 @@ from save_model import saveModel
|
||||
data_type = 'float32'
|
||||
|
||||
def slice(name : str, x, axes : list, start : list, end : list):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -34,14 +34,14 @@ def slice(name : str, x, axes : list, start : list, end : list):
|
||||
|
||||
|
||||
def slice_dyn(test_shape=[2,8,10,10]):
|
||||
pdpd.disable_static()
|
||||
paddle.disable_static()
|
||||
|
||||
data = pdpd.rand(shape=test_shape, dtype='float32')
|
||||
data = paddle.rand(shape=test_shape, dtype='float32')
|
||||
|
||||
'''
|
||||
slice w/ decrease_axis
|
||||
'''
|
||||
@pdpd.jit.to_static
|
||||
@paddle.jit.to_static
|
||||
def test_slice_decrease_axis(x):
|
||||
return x[0, 1:3, :, 5]
|
||||
exportModel('slice_decrease_axis', test_slice_decrease_axis, [data], target_dir=sys.argv[1]) # output shape (2, 10)
|
||||
@ -49,15 +49,15 @@ def slice_dyn(test_shape=[2,8,10,10]):
|
||||
'''
|
||||
slice w/o decrease_axis
|
||||
'''
|
||||
@pdpd.jit.to_static
|
||||
@paddle.jit.to_static
|
||||
def test_slice(x):
|
||||
return pdpd.slice(x, axes=[0,1,3], starts=[0,1,5], ends=[1,3,6])
|
||||
return paddle.slice(x, axes=[0,1,3], starts=[0,1,5], ends=[1,3,6])
|
||||
# exportModel('slice_dyn', test_slice, [data], target_dir=sys.argv[1]) # output shape (1, 2, 10, 1) # disable it by default as this kind of test model already there. It's for comparsion only.
|
||||
|
||||
'''
|
||||
slice w/ decrease_axis of all dims
|
||||
'''
|
||||
@pdpd.jit.to_static
|
||||
@paddle.jit.to_static
|
||||
def test_slice_decrease_axis_all(x):
|
||||
return x[0, 0, 0, 0]
|
||||
exportModel('slice_decrease_axis_all', test_slice_decrease_axis_all, [data], target_dir=sys.argv[1]) # output shape (1,)
|
||||
@ -65,23 +65,23 @@ def slice_dyn(test_shape=[2,8,10,10]):
|
||||
'''
|
||||
slice w/o decrease_axis of all dims
|
||||
'''
|
||||
@pdpd.jit.to_static
|
||||
@paddle.jit.to_static
|
||||
def test_slice_alldim(x):
|
||||
return pdpd.slice(x, axes=[0,1,2,3], starts=[0,0,0,0], ends=[1,1,1,1])
|
||||
return paddle.slice(x, axes=[0,1,2,3], starts=[0,0,0,0], ends=[1,1,1,1])
|
||||
# exportModel('slice_alldim', test_slice_alldim, [data], target_dir=sys.argv[1]) # output shape (1, 1, 1, 1) # disable it by default as this kind of test model already there. It's for comparsion only.
|
||||
|
||||
'''
|
||||
a test case simulating the last reshape2 of ocrnet which accepts slice (with decrease_axes in all dims) as its parents.
|
||||
'''
|
||||
def slice_reshape(B=1, C=256, H=16, W=32):
|
||||
pdpd.disable_static()
|
||||
paddle.disable_static()
|
||||
|
||||
data = pdpd.rand(shape=[B, C, H*W], dtype='float32')
|
||||
data = paddle.rand(shape=[B, C, H*W], dtype='float32')
|
||||
|
||||
@pdpd.jit.to_static
|
||||
@paddle.jit.to_static
|
||||
def test_model(x):
|
||||
x2 = pdpd.assign([-1, -1, 16, 32]).astype('int32')
|
||||
node_reshape = pdpd.reshape(x, [0, 256, x2[2], x2[3]])
|
||||
x2 = paddle.assign([-1, -1, 16, 32]).astype('int32')
|
||||
node_reshape = paddle.reshape(x, [0, 256, x2[2], x2[3]])
|
||||
return node_reshape
|
||||
exportModel('slice_reshape', test_model, [data], target_dir=sys.argv[1])
|
||||
|
@ -7,16 +7,16 @@ from save_model import saveModel
|
||||
|
||||
|
||||
def softmax(name: str, x, axis):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.nn.functional.softmax(x=node_x, axis=axis)
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.nn.functional.softmax(x=node_x, axis=axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -7,16 +7,16 @@ from save_model import saveModel
|
||||
|
||||
|
||||
def softplus(name: str, x, beta, threshold):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.nn.functional.softplus(x=node_x, beta=beta, threshold=threshold)
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.nn.functional.softplus(x=node_x, beta=beta, threshold=threshold)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -7,17 +7,17 @@ import sys
|
||||
|
||||
|
||||
def split(name : str, x, attrs : dict):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
out = pdpd.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=attrs['axis'])
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
out = paddle.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=attrs['axis'])
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -31,18 +31,18 @@ def split(name : str, x, attrs : dict):
|
||||
|
||||
|
||||
def split_dim_tensor(name : str, x, attrs : dict, dim):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
dim_node = pdpd.assign(dim)
|
||||
out = pdpd.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=dim_node)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
dim_node = paddle.assign(dim)
|
||||
out = paddle.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=dim_node)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -56,19 +56,19 @@ def split_dim_tensor(name : str, x, attrs : dict, dim):
|
||||
|
||||
|
||||
def split_test_list_tensor(name : str, x, attrs : dict):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
section = attrs['num_or_sections']
|
||||
section[0] = pdpd.assign(np.array((section[0],)).astype('int32'))
|
||||
out = pdpd.fluid.layers.split(node_x, num_or_sections=section, dim=attrs['axis'])
|
||||
section[0] = paddle.assign(np.array((section[0],)).astype('int32'))
|
||||
out = paddle.fluid.layers.split(node_x, num_or_sections=section, dim=attrs['axis'])
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
@ -91,12 +91,12 @@ def main():
|
||||
for t in data_types:
|
||||
for s in num_or_sections:
|
||||
for i in axes:
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
'num_or_sections': s,
|
||||
'axis': i
|
||||
}
|
||||
data_NCHW = np.random.rand(3,9,5).astype(t)
|
||||
split("split_test{}".format(idx), data_NCHW, pdpd_attrs)
|
||||
split("split_test{}".format(idx), data_NCHW, paddle_attrs)
|
||||
idx+=1
|
||||
|
||||
split("split_test_list", data_NCHW, {
|
@ -3,22 +3,22 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
def squeeze(name : str, x, axes : list):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.fluid.layers.squeeze(node_x, axes=axes, name='squeeze')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.fluid.layers.squeeze(node_x, axes=axes, name='squeeze')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -7,26 +7,26 @@ import sys
|
||||
|
||||
|
||||
def stack(axis, input1, input2, input3):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
data1 = pdpd.static.data(
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
data1 = paddle.static.data(
|
||||
'data1', shape=input1.shape, dtype=input1.dtype)
|
||||
data2 = pdpd.static.data(
|
||||
data2 = paddle.static.data(
|
||||
'data2', shape=input2.shape, dtype=input2.dtype)
|
||||
data3 = pdpd.static.data(
|
||||
data3 = paddle.static.data(
|
||||
'data3', shape=input3.shape, dtype=input3.dtype)
|
||||
|
||||
if (axis == None):
|
||||
out = pdpd.paddle.stack([data1, data2, data3])
|
||||
out = paddle.paddle.stack([data1, data2, data3])
|
||||
else:
|
||||
out = pdpd.paddle.stack([data1, data2, data3], axis)
|
||||
out = paddle.paddle.stack([data1, data2, data3], axis)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={"data1": input1,
|
@ -3,22 +3,22 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
def tanh(name:str, x):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.tanh(data)
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
data = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.tanh(data)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -3,22 +3,22 @@
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
import sys
|
||||
|
||||
data_type = 'float32'
|
||||
|
||||
def unsqueeze(name : str, x, axes : list):
|
||||
pdpd.enable_static()
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = pdpd.fluid.layers.unsqueeze(node_x, axes = axes, name = 'unsqueeze')
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type)
|
||||
out = paddle.fluid.layers.unsqueeze(node_x, axes = axes, name = 'unsqueeze')
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -7,7 +7,7 @@ import sys
|
||||
|
||||
import os
|
||||
import numpy as np
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
|
||||
|
||||
# print numpy array like C structure
|
||||
@ -60,22 +60,22 @@ def saveModel(name, exe, feedkeys: list, fetchlist: list, inputs: list, outputs:
|
||||
np.save(os.path.join(model_dir, "output{}".format(i)), output)
|
||||
|
||||
# composited model + scattered model
|
||||
pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe)
|
||||
pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name + ".pdmodel",
|
||||
paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe)
|
||||
paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name + ".pdmodel",
|
||||
params_filename=name + ".pdiparams")
|
||||
|
||||
|
||||
def relu(name: str, x):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.nn.functional.relu(node_x)
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = paddle.nn.functional.relu(node_x)
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
@ -6,13 +6,13 @@ from save_model import saveModel
|
||||
import sys
|
||||
|
||||
def yolo_box(name : str, x, img_size, attrs : dict):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
import paddle
|
||||
paddle.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
node_img_size = pdpd.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype)
|
||||
boxes, scores = pdpd.vision.ops.yolo_box(node_x,
|
||||
with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()):
|
||||
node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
|
||||
node_img_size = paddle.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype)
|
||||
boxes, scores = paddle.vision.ops.yolo_box(node_x,
|
||||
node_img_size,
|
||||
anchors=attrs['anchors'],
|
||||
class_num=attrs['class_num'],
|
||||
@ -22,10 +22,10 @@ def yolo_box(name : str, x, img_size, attrs : dict):
|
||||
name=None,
|
||||
scale_x_y=attrs['scale_x_y'])
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
cpu = paddle.static.cpu_places(1)
|
||||
exe = paddle.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
exe.run(paddle.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'img_size': img_size},
|
||||
@ -41,7 +41,7 @@ def yolo_box(name : str, x, img_size, attrs : dict):
|
||||
|
||||
def TEST1():
|
||||
# yolo_box
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
'name': "yolo_box_default",
|
||||
'anchors': [10, 13, 16, 30, 33, 23],
|
||||
'class_num': 2,
|
||||
@ -51,7 +51,7 @@ def TEST1():
|
||||
'scale_x_y': 1.0
|
||||
}
|
||||
|
||||
pdpd_attrs_clip_box = {
|
||||
paddle_attrs_clip_box = {
|
||||
'name': "yolo_box_clip_box",
|
||||
'anchors': [10, 13, 16, 30, 33, 23],
|
||||
'class_num': 2,
|
||||
@ -61,7 +61,7 @@ def TEST1():
|
||||
'scale_x_y': 1.0
|
||||
}
|
||||
|
||||
pdpd_attrs_scale_xy = {
|
||||
paddle_attrs_scale_xy = {
|
||||
'name': "yolo_box_scale_xy",
|
||||
'anchors': [10, 13, 16, 30, 33, 23],
|
||||
'class_num': 2,
|
||||
@ -71,23 +71,23 @@ def TEST1():
|
||||
'scale_x_y': 1.2
|
||||
}
|
||||
|
||||
pdpd_attrs_list = [pdpd_attrs, pdpd_attrs_clip_box, pdpd_attrs_scale_xy]
|
||||
paddle_attrs_list = [paddle_attrs, paddle_attrs_clip_box, paddle_attrs_scale_xy]
|
||||
|
||||
N = 32
|
||||
num_anchors = int(len(pdpd_attrs['anchors'])//2)
|
||||
x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), 13, 13)
|
||||
num_anchors = int(len(paddle_attrs['anchors'])//2)
|
||||
x_shape = (N, num_anchors * (5 + paddle_attrs['class_num']), 13, 13)
|
||||
imgsize_shape = (N, 2)
|
||||
|
||||
data = np.random.random(x_shape).astype('float32')
|
||||
data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32')
|
||||
|
||||
for item in pdpd_attrs_list:
|
||||
pred_pdpd = yolo_box(item['name'], data, data_ImSize, item)
|
||||
for item in paddle_attrs_list:
|
||||
pred_paddle = yolo_box(item['name'], data, data_ImSize, item)
|
||||
|
||||
|
||||
def TEST2():
|
||||
# yolo_box uneven spatial width and height
|
||||
pdpd_attrs = {
|
||||
paddle_attrs = {
|
||||
'name': "yolo_box_uneven_wh",
|
||||
'anchors': [10, 13, 16, 30, 33, 23],
|
||||
'class_num': 2,
|
||||
@ -100,14 +100,14 @@ def TEST2():
|
||||
N = 16
|
||||
SPATIAL_WIDTH = 13
|
||||
SPATIAL_HEIGHT = 9
|
||||
num_anchors = int(len(pdpd_attrs['anchors'])//2)
|
||||
x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), SPATIAL_HEIGHT, SPATIAL_WIDTH)
|
||||
num_anchors = int(len(paddle_attrs['anchors'])//2)
|
||||
x_shape = (N, num_anchors * (5 + paddle_attrs['class_num']), SPATIAL_HEIGHT, SPATIAL_WIDTH)
|
||||
imgsize_shape = (N, 2)
|
||||
|
||||
data = np.random.random(x_shape).astype('float32')
|
||||
data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32')
|
||||
|
||||
pred_pdpd = yolo_box(pdpd_attrs['name'], data, data_ImSize, pdpd_attrs)
|
||||
pred_paddle = yolo_box(paddle_attrs['name'], data, data_ImSize, paddle_attrs)
|
||||
|
||||
if __name__ == "__main__":
|
||||
TEST1()
|
@ -1,7 +1,7 @@
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
import paddle as pdpd
|
||||
import paddle
|
||||
|
||||
|
||||
#print numpy array like C structure
|
||||
@ -52,8 +52,8 @@ def saveModel(name, exe, feedkeys:list, fetchlist:list, inputs:list, outputs:lis
|
||||
np.save(os.path.join(model_dir, "output{}".format(i)), output)
|
||||
|
||||
# composited model + scattered model
|
||||
pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe)
|
||||
pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name+".pdmodel", params_filename=name+".pdiparams")
|
||||
paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe)
|
||||
paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name+".pdmodel", params_filename=name+".pdiparams")
|
||||
|
||||
|
||||
'''
|
||||
@ -70,17 +70,17 @@ def exportModel(name, dyn_func, input_data:list, target_dir:str):
|
||||
for idx, data in enumerate(input_data):
|
||||
input_name = 'input{}'.format(idx)
|
||||
input_specs.append(
|
||||
pdpd.static.InputSpec(shape=data.shape, dtype=data.dtype, name=input_name)
|
||||
paddle.static.InputSpec(shape=data.shape, dtype=data.dtype, name=input_name)
|
||||
)
|
||||
|
||||
# dump input
|
||||
np.save(os.path.join(model_dir, "input{}".format(idx)), data)
|
||||
|
||||
pdpd.jit.save(dyn_func, save_path, input_specs)
|
||||
paddle.jit.save(dyn_func, save_path, input_specs)
|
||||
print('saved exported model to {}'.format(save_path))
|
||||
|
||||
# infer
|
||||
model = pdpd.jit.load(save_path)
|
||||
model = paddle.jit.load(save_path)
|
||||
|
||||
result = model(*[input[:] for input in input_data])
|
||||
|
@ -1,60 +0,0 @@
|
||||
#
|
||||
# pool2d paddle model generator
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
data_type = 'float32'
|
||||
|
||||
|
||||
def pdpd_argmax(name : str, x, axis):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.argmax(x=node_x, axis=axis)
|
||||
out = pdpd.cast(out, np.float32)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
def pdpd_argmax1(name : str, x):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.argmax(x=node_x)
|
||||
out = pdpd.cast(out, np.float32)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
def main():
|
||||
data = np.random.random([3,5,7,2]).astype("float32")
|
||||
axis = 0
|
||||
pdpd_argmax("argmax", data, axis)
|
||||
pdpd_argmax1("argmax1", data)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,47 +0,0 @@
|
||||
#
|
||||
# pool2d paddle model generator
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def pdpd_dropout(name : str, x, p, pdpd_attrs):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
|
||||
out = pdpd.nn.functional.dropout(x=node_x, p=p, training=pdpd_attrs['training'], mode=pdpd_attrs['mode'])
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x],
|
||||
outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def main():
|
||||
p=0.5
|
||||
data = np.random.random(size=(3, 10, 3, 7)).astype('float32')
|
||||
pdpd_attrs = {
|
||||
'training' : False,
|
||||
'mode' : "downscale_in_infer"
|
||||
}
|
||||
pdpd_attrs2 = {
|
||||
'training' : False,
|
||||
'mode' : "upscale_in_train"
|
||||
}
|
||||
pdpd_dropout("dropout", data, p, pdpd_attrs)
|
||||
pdpd_dropout("dropout_upscale_in_train", data, p, pdpd_attrs2)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,97 +0,0 @@
|
||||
#
|
||||
# fill_const paddle model generator
|
||||
#
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import paddle as pdpd
|
||||
import sys
|
||||
|
||||
|
||||
def fill_constant(name : str, shape : list, dtype, value):
|
||||
pdpd.enable_static()
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
x1 = pdpd.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant')
|
||||
x2 = pdpd.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant')
|
||||
out = pdpd.add(pdpd.cast(x1, np.float32), pdpd.cast(x2, np.float32))
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def fill_constant_tensor(name : str, shape : list, dtype, value):
|
||||
pdpd.enable_static()
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_value = pdpd.static.data(name='value', shape=[1], dtype=dtype)
|
||||
x1 = pdpd.fluid.layers.fill_constant(shape=shape, value=node_value, dtype=dtype, name='fill_constant1')
|
||||
out = pdpd.cast(x1, np.float32)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={"value": value},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=["value"], fetchlist=[out], inputs=[np.array([value]).astype(dtype)], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def fill_constant_shape_tensor(name : str, shape, dtype, value):
|
||||
pdpd.enable_static()
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_shape = pdpd.fluid.layers.fill_constant(shape=[2], value=shape, dtype='int32', name='shape')
|
||||
x1 = pdpd.fluid.layers.fill_constant(shape=node_shape, value=value, dtype=dtype, name='fill_constant')
|
||||
out = pdpd.cast(x1, np.float32)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def fill_constant_shape_tensor_list(name : str, shape: list, dtype, value):
|
||||
pdpd.enable_static()
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_shape = pdpd.fluid.layers.fill_constant(shape=[1], value=shape, dtype='int32', name='shape')
|
||||
x1 = pdpd.fluid.layers.fill_constant(shape=[2, node_shape], value=value, dtype=dtype, name='fill_constant')
|
||||
out = pdpd.cast(x1, np.float32)
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def main():
|
||||
fill_constant("fill_constant", [2, 3, 4], 'float32', 0.03)
|
||||
fill_constant("fill_constant_int32", [2, 3, 4], "int32", 2)
|
||||
fill_constant("fill_constant_int64", [2, 3, 4], "int64", 4)
|
||||
fill_constant_tensor("fill_constant_tensor", [2, 3, 4], 'float32', 0.05)
|
||||
fill_constant_shape_tensor("fill_constant_shape_tensor", 2, 'float32', 0.05)
|
||||
fill_constant_shape_tensor_list("fill_constant_shape_tensor_list", 2, 'float32', 0.05)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -1,53 +0,0 @@
|
||||
import numpy as np
|
||||
from save_model import saveModel
|
||||
import sys
|
||||
|
||||
|
||||
def pdpd_rnn_lstm(input_size, hidden_size, layers, direction):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
main_program = pdpd.static.Program()
|
||||
startup_program = pdpd.static.Program()
|
||||
|
||||
num_of_directions = 1 if direction == 'forward' else 2
|
||||
with pdpd.static.program_guard(main_program, startup_program):
|
||||
|
||||
rnn = pdpd.nn.LSTM(input_size, hidden_size, layers, direction, name="lstm")
|
||||
|
||||
data = pdpd.static.data(name='x', shape=[4, 3, input_size], dtype='float32')
|
||||
prev_h = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_1")
|
||||
prev_c = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_2")
|
||||
|
||||
y, (h, c) = rnn(data, (prev_h, prev_c))
|
||||
relu_1 = pdpd.nn.functional.relu(c, name="relu_1")
|
||||
relu_2 = pdpd.nn.functional.relu(c, name="relu_2")
|
||||
relu_3 = pdpd.nn.functional.relu(c, name="relu_3")
|
||||
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
exe.run(startup_program)
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': np.ones([4, 3, input_size]).astype(np.float32)},
|
||||
fetch_list=[y, h, c],
|
||||
program=main_program)
|
||||
saveModel("place_test_model", exe, feedkeys=['x'],
|
||||
fetchlist=[y, h, c, relu_1, relu_2, relu_3],
|
||||
inputs=[np.ones([4, 3, input_size]).astype(np.float32)],
|
||||
outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1])
|
||||
return outs[0]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
testCases = [
|
||||
{
|
||||
'input_size': 2,
|
||||
'hidden_size': 2,
|
||||
'layers': 1,
|
||||
'direction': 'forward',
|
||||
},
|
||||
]
|
||||
|
||||
for test in testCases:
|
||||
pdpd_rnn_lstm(test['input_size'], test['hidden_size'], test['layers'], test['direction'])
|
@ -1,93 +0,0 @@
|
||||
#
|
||||
# pool2d paddle model generator
|
||||
#
|
||||
import numpy as np
|
||||
import sys
|
||||
from save_model import saveModel
|
||||
|
||||
|
||||
def pdpd_scale(name : str, x, scale, bias, attrs : dict, data_type):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
out = pdpd.scale(x=node_x, scale=scale, bias=bias,
|
||||
bias_after_scale=attrs['bias_after_scale'])
|
||||
#FuzzyTest only support FP32 now, so cast result to fp32
|
||||
out = pdpd.cast(out, "float32")
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
|
||||
def pdpd_scale_tensor(name : str, x, scale, bias, attrs : dict, data_type):
|
||||
import paddle as pdpd
|
||||
pdpd.enable_static()
|
||||
|
||||
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
|
||||
node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type)
|
||||
node_scale = pdpd.static.data(name='scale', shape=[1], dtype='float32')
|
||||
out = pdpd.scale(x=node_x, scale=node_scale, bias=bias,
|
||||
bias_after_scale=attrs['bias_after_scale'])
|
||||
#FuzzyTest only support FP32 now, so cast result to fp32
|
||||
out = pdpd.cast(out, "float32")
|
||||
cpu = pdpd.static.cpu_places(1)
|
||||
exe = pdpd.static.Executor(cpu[0])
|
||||
# startup program will call initializer to initialize the parameters.
|
||||
exe.run(pdpd.static.default_startup_program())
|
||||
|
||||
outs = exe.run(
|
||||
feed={'x': x, 'scale': scale},
|
||||
fetch_list=[out])
|
||||
|
||||
saveModel(name, exe, feedkeys=['x', 'scale'], fetchlist=[out], inputs=[x, np.array([scale]).astype('float32')], outputs=[outs[0]], target_dir=sys.argv[1])
|
||||
|
||||
return outs[0]
|
||||
|
||||
def main():
|
||||
scale = 2.0
|
||||
bias = 1.0
|
||||
data = np.random.random([2, 3]).astype("float32")
|
||||
|
||||
test_cases = [
|
||||
"float32",
|
||||
"int32",
|
||||
"int64"
|
||||
]
|
||||
|
||||
pdpd_attrs = {
|
||||
'bias_after_scale': True,
|
||||
}
|
||||
pdpd_scale_tensor("scale_tensor_bias_after", data, scale, bias, pdpd_attrs, 'float32')
|
||||
|
||||
pdpd_attrs = {
|
||||
'bias_after_scale': False,
|
||||
}
|
||||
pdpd_scale_tensor("scale_tensor_bias_before", data, scale, bias, pdpd_attrs, 'float32')
|
||||
|
||||
for test in test_cases:
|
||||
data = np.random.random([2, 3]).astype(test)
|
||||
pdpd_attrs = {
|
||||
'bias_after_scale': True,
|
||||
}
|
||||
pdpd_scale("scale_bias_after_" + test, data, scale, bias, pdpd_attrs, test)
|
||||
|
||||
pdpd_attrs = {
|
||||
'bias_after_scale': False,
|
||||
}
|
||||
pdpd_scale("scale_bias_before_" + test, data, scale, bias, pdpd_attrs, test)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -12,8 +12,8 @@ if(ENABLE_OV_ONNX_FRONTEND)
|
||||
add_subdirectory(onnx)
|
||||
endif()
|
||||
|
||||
if(ENABLE_OV_PDPD_FRONTEND)
|
||||
add_subdirectory(paddlepaddle)
|
||||
if(ENABLE_OV_PADDLE_FRONTEND)
|
||||
add_subdirectory(paddle)
|
||||
endif()
|
||||
|
||||
if(ENABLE_OV_IR_FRONTEND)
|
||||
|
@ -7,8 +7,9 @@
|
||||
#include <openvino/util/env_util.hpp>
|
||||
#include <openvino/util/file_util.hpp>
|
||||
|
||||
#include "ngraph/except.hpp"
|
||||
#include "openvino/frontend/exception.hpp"
|
||||
#include "openvino/frontend/place.hpp"
|
||||
#include "openvino/util/env_util.hpp"
|
||||
#include "plugin_loader.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
@ -32,7 +33,7 @@ public:
|
||||
{"ir", "ir"},
|
||||
{"onnx", "onnx"},
|
||||
{"tf", "tensorflow"},
|
||||
{"paddle", "paddlepaddle"},
|
||||
{"paddle", "paddle"},
|
||||
};
|
||||
auto it = predefined_frontends.find(framework);
|
||||
std::lock_guard<std::mutex> guard(m_loading_mutex);
|
||||
@ -119,7 +120,7 @@ private:
|
||||
{".xml", {"ir", "ir"}},
|
||||
{".onnx", {"onnx", "onnx"}},
|
||||
{".pb", {"tf", "tensorflow"}},
|
||||
{".pdmodel", {"paddle", "paddlepaddle"}},
|
||||
{".pdmodel", {"paddle", "paddle"}},
|
||||
};
|
||||
|
||||
// List of prioritized frontends.
|
||||
@ -127,7 +128,7 @@ private:
|
||||
{"ir", "ir"},
|
||||
{"onnx", "onnx"},
|
||||
{"tf", "tensorflow"},
|
||||
{"paddle", "paddlepaddle"},
|
||||
{"paddle", "paddle"},
|
||||
};
|
||||
if (variants.empty()) {
|
||||
return nullptr;
|
||||
|
@ -45,7 +45,7 @@ void load_static_plugins(std::vector<PluginInfo>& res) {
|
||||
{"ir", "ir"},
|
||||
{"onnx", "onnx"},
|
||||
{"tf", "tensorflow"},
|
||||
{"paddle", "paddlepaddle"},
|
||||
{"paddle", "paddle"},
|
||||
};
|
||||
auto it = predefined_frontends.find(factory.m_name);
|
||||
if (it != predefined_frontends.end()) {
|
||||
|
@ -2,8 +2,8 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
# TODO: Add LINKABLE_FRONTEND option when tensorflow frontend directory is moved to openvino folder
|
||||
ov_add_frontend(NAME paddlepaddle
|
||||
ov_add_frontend(NAME paddle
|
||||
LINKABLE_FRONTEND
|
||||
PROTOBUF_LITE
|
||||
FILEDESCRIPTION "FrontEnd to load and convert PaddlePaddle file format"
|
||||
LINK_LIBRARIES inference_engine_transformations)
|
@ -6,18 +6,21 @@
|
||||
|
||||
#include <openvino/frontend/extension/decoder_transformation.hpp>
|
||||
#include <openvino/frontend/extension/telemetry.hpp>
|
||||
#include <openvino/frontend/manager.hpp>
|
||||
#include <openvino/frontend/frontend.hpp>
|
||||
#include <openvino/frontend/input_model.hpp>
|
||||
|
||||
#include "exceptions.hpp"
|
||||
#include "model.hpp"
|
||||
#include "openvino/frontend/paddle/visibility.hpp"
|
||||
|
||||
namespace ov {
|
||||
namespace frontend {
|
||||
class OpPlacePDPD;
|
||||
namespace paddle {
|
||||
|
||||
class PDPD_API FrontEndPDPD : public FrontEnd {
|
||||
class OpPlace;
|
||||
|
||||
class PADDLE_API FrontEnd : public ov::frontend::FrontEnd {
|
||||
public:
|
||||
FrontEndPDPD() = default;
|
||||
FrontEnd() = default;
|
||||
|
||||
/// \brief Completely convert the remaining, not converted part of a function.
|
||||
/// \param partiallyConverted partially converted OV Model
|
||||
@ -52,7 +55,7 @@ public:
|
||||
void add_extension(const std::shared_ptr<ov::Extension>& extension) override;
|
||||
|
||||
protected:
|
||||
/// \brief Check if FrontEndPDPD can recognize model from given parts
|
||||
/// \brief Check if FrontEnd can recognize model from given parts
|
||||
/// \param params Can be path to folder which contains __model__ file or path to
|
||||
/// .pdmodel file
|
||||
/// \return InputModel::Ptr
|
||||
@ -67,12 +70,13 @@ protected:
|
||||
|
||||
private:
|
||||
static std::shared_ptr<Model> convert_each_node(
|
||||
const std::shared_ptr<InputModelPDPD>& model,
|
||||
const std::shared_ptr<InputModel>& frontend_model,
|
||||
std::function<std::map<std::string, OutputVector>(const std::map<std::string, Output<Node>>&,
|
||||
const std::shared_ptr<OpPlacePDPD>&)> func);
|
||||
const std::shared_ptr<OpPlace>&)> func);
|
||||
std::shared_ptr<TelemetryExtension> m_telemetry;
|
||||
std::vector<std::shared_ptr<DecoderTransformationExtension>> m_transformation_extensions;
|
||||
};
|
||||
|
||||
} // namespace paddle
|
||||
} // namespace frontend
|
||||
} // namespace ov
|
@ -0,0 +1,20 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "openvino/frontend/exception.hpp"
|
||||
|
||||
#ifdef OPENVINO_STATIC_LIBRARY
|
||||
# define PADDLE_API
|
||||
# define PADDLE_C_API
|
||||
#else
|
||||
# ifdef ov_paddle_frontend_EXPORTS
|
||||
# define PADDLE_API OPENVINO_CORE_EXPORTS
|
||||
# define PADDLE_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS
|
||||
# else
|
||||
# define PADDLE_API OPENVINO_CORE_IMPORTS
|
||||
# define PADDLE_C_API OPENVINO_EXTERN_C OPENVINO_CORE_IMPORTS
|
||||
# endif // ov_paddle_frontend_EXPORTS
|
||||
#endif // OPENVINO_STATIC_LIBRARY
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user