diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index 90c9aa11d17..383a22feb07 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -116,8 +116,8 @@ jobs: python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/wheel/requirements-dev.txt # For running Python API tests python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/src/requirements-dev.txt - # For running PaddlePaddle frontend unit tests - python3 -m pip install -r $(REPO_DIR)/src/core/tests/frontend/paddlepaddle/requirements_dev.txt + # For running Paddle frontend unit tests + python3 -m pip install -r $(REPO_DIR)/src/core/tests/frontend/paddle/requirements_dev.txt # For running ONNX frontend unit tests python3 -m pip install -r $(REPO_DIR)/src/core/tests/requirements_test_onnx.txt # For MO unit tests @@ -248,8 +248,8 @@ jobs: displayName: 'OV Core UT' continueOnError: false - - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/paddlepaddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-PaddlePaddle.xml - displayName: 'PaddlePaddle Frontend UT' + - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/paddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-Paddle.xml + displayName: 'Paddle Frontend UT' continueOnError: false - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/tensorflow_tests --gtest_print_time=1 --gtest_output=xml:TEST-Tensorflow.xml diff --git a/.ci/azure/windows.yml b/.ci/azure/windows.yml index 81e2b47a622..7363d058c0f 100644 --- a/.ci/azure/windows.yml +++ b/.ci/azure/windows.yml @@ -109,8 +109,8 @@ jobs: python -m pip install --upgrade pip rem For running Python API tests python -m pip install -r $(REPO_DIR)\inference-engine\ie_bridges\python\src\requirements-dev.txt - rem For running PaddlePaddle frontend unit tests - python -m pip install -r $(REPO_DIR)\src\core\tests\frontend\paddlepaddle\requirements_dev.txt + rem For running Paddle frontend unit tests + python -m pip install -r $(REPO_DIR)\src\core\tests\frontend\paddle\requirements_dev.txt rem For running ONNX frontend unit tests python -m pip install -r $(REPO_DIR)\src\core\tests\requirements_test_onnx.txt rem For MO unit tests @@ -207,8 +207,8 @@ jobs: displayName: 'OV Core UT' continueOnError: false - - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\paddlepaddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-PaddlePaddle.xml - displayName: 'PaddlePaddle Frontend UT' + - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\paddle_tests --gtest_print_time=1 --gtest_output=xml:TEST-Paddle.xml + displayName: 'Paddle Frontend UT' continueOnError: false - script: call $(SETUPVARS) && $(INSTALL_TEST_DIR)\tensorflow_tests --gtest_print_time=1 --gtest_output=xml:TEST-Tensorflow.xml diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index e1bdb66ce1d..6068d4e5af0 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -126,8 +126,8 @@ function(ie_add_plugin) if(TARGET ov_onnx_frontend) add_dependencies(${IE_PLUGIN_NAME} ov_onnx_frontend) endif() - if(TARGET ov_paddlepaddle_frontend) - add_dependencies(${IE_PLUGIN_NAME} ov_paddlepaddle_frontend) + if(TARGET ov_paddle_frontend) + add_dependencies(${IE_PLUGIN_NAME} ov_paddle_frontend) endif() if(TARGET ov_tensorflow_frontend) add_dependencies(${IE_PLUGIN_NAME} ov_tensorflow_frontend) diff --git a/cmake/features.cmake b/cmake/features.cmake index 1d0353f9fbd..8ceefe75d85 100644 --- a/cmake/features.cmake +++ b/cmake/features.cmake @@ -163,11 +163,11 @@ else() endif() ie_dependent_option(ENABLE_OV_ONNX_FRONTEND "Enable ONNX FrontEnd" ON "protoc_available" OFF) -ie_dependent_option(ENABLE_OV_PDPD_FRONTEND "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF) +ie_dependent_option(ENABLE_OV_PADDLE_FRONTEND "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF) ie_option(ENABLE_OV_IR_FRONTEND "Enable IR FrontEnd" ON) ie_dependent_option(ENABLE_OV_TF_FRONTEND "Enable TensorFlow FrontEnd" ON "protoc_available" OFF) ie_dependent_option(ENABLE_SYSTEM_PROTOBUF "Use system protobuf" OFF - "ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PDPD_FRONTEND OR ENABLE_OV_TF_FRONTEND;BUILD_SHARED_LIBS" OFF) + "ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_TF_FRONTEND;BUILD_SHARED_LIBS" OFF) ie_dependent_option(ENABLE_OV_CORE_UNIT_TESTS "Enables OpenVINO core unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF) ie_dependent_option(ENABLE_OV_CORE_BACKEND_UNIT_TESTS "Control the building of unit tests using backends" ON "ENABLE_OV_CORE_UNIT_TESTS" OFF) diff --git a/cmake/templates/OpenVINOConfig.cmake.in b/cmake/templates/OpenVINOConfig.cmake.in index fd6ad5d66a0..5ab40ad9153 100644 --- a/cmake/templates/OpenVINOConfig.cmake.in +++ b/cmake/templates/OpenVINOConfig.cmake.in @@ -11,7 +11,7 @@ # # * `Runtime`: OpenVINO C++ and C Core & Inference Runtime, frontend common # * `ONNX`: OpenVINO ONNX frontend -# * `PaddlePaddle`: OpenVINO PaddlePaddle frontend +# * `Paddle`: OpenVINO Paddle frontend # # If no components are specified, `Runtime` component is provided: # @@ -43,8 +43,8 @@ # `openvino::frontend::onnx` # ONNX FrontEnd target (optional) # -# `openvino::frontend::paddlepaddle` -# PaddlePaddle FrontEnd target (optional) +# `openvino::frontend::paddle` +# Paddle FrontEnd target (optional) # # `openvino::frontend::tensorflow` # TensorFlow FrontEnd target (optional) @@ -63,8 +63,8 @@ # `OpenVINO_Frontend_ONNX_FOUND` # OpenVINO ONNX frontend is available # -# `OpenVINO_Frontend_PaddlePaddle_FOUND` -# OpenVINO PaddlePaddle frontend is available +# `OpenVINO_Frontend_Paddle_FOUND` +# OpenVINO Paddle frontend is available # # `OpenVINO_Frontend_TensorFlow_FOUND` # OpenVINO TensorFlow frontend is available @@ -193,12 +193,12 @@ endif() set(${CMAKE_FIND_PACKAGE_NAME}_Runtime_FOUND ON) set(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND @ENABLE_OV_ONNX_FRONTEND@) -set(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND @ENABLE_OV_PDPD_FRONTEND@) +set(${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND @ENABLE_OV_PADDLE_FRONTEND@) set(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND @ENABLE_OV_TF_FRONTEND@) set(${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND @ENABLE_OV_IR_FRONTEND@) set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_ONNX_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND}) -set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_PaddlePaddle_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND}) +set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_Paddle_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND}) set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_TensorFlow_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND}) set(${CMAKE_FIND_PACKAGE_NAME}_Frontend_IR_FOUND ${${CMAKE_FIND_PACKAGE_NAME}_IR_FOUND}) @@ -212,7 +212,7 @@ endif() # foreach(target openvino::runtime openvino::runtime::c - openvino::frontend::onnx openvino::frontend::paddlepaddle openvino::frontend::tensorflow) + openvino::frontend::onnx openvino::frontend::paddle openvino::frontend::tensorflow) if(TARGET ${target} AND _ov_as_external_package) _ov_target_no_deprecation_error(${target}) endif() @@ -230,6 +230,6 @@ if(_need_package_name_reset) unset(_need_package_name_reset) endif() -unset(${CMAKE_FIND_PACKAGE_NAME}_PaddlePaddle_FOUND) +unset(${CMAKE_FIND_PACKAGE_NAME}_Paddle_FOUND) unset(${CMAKE_FIND_PACKAGE_NAME}_ONNX_FOUND) unset(${CMAKE_FIND_PACKAGE_NAME}_TensorFlow_FOUND) diff --git a/cmake/templates/ngraphConfig.cmake.in b/cmake/templates/ngraphConfig.cmake.in index a67c3f3c473..f9fd12cb0f7 100644 --- a/cmake/templates/ngraphConfig.cmake.in +++ b/cmake/templates/ngraphConfig.cmake.in @@ -29,8 +29,8 @@ # ngraph_ov_onnx_frontend_FOUND - True if the system has ov_onnx_frontend library # ngraph::ov_onnx_frontend - ONNX FrontEnd target (optional) # -# ngraph_paddlepaddle_frontend_FOUND - True if the system has PDPD frontend -# ngraph::ov_paddlepaddle_frontend - nGraph PDPD frontend (optional) +# ngraph_paddle_frontend_FOUND - True if the system has Paddle frontend +# ngraph::ov_paddle_frontend - nGraph Paddle frontend (optional) # @PACKAGE_INIT@ @@ -56,10 +56,10 @@ if(TARGET openvino::frontend::onnx AND NOT TARGET ngraph::ov_onnx_frontend) INTERFACE_LINK_LIBRARIES openvino::frontend::onnx) endif() -if(TARGET openvino::frontend::paddlepaddle AND NOT TARGET ngraph::ov_paddlepaddle_frontend) - add_library(ngraph::ov_paddlepaddle_frontend INTERFACE IMPORTED) - set_target_properties(ngraph::ov_paddlepaddle_frontend PROPERTIES - INTERFACE_LINK_LIBRARIES openvino::frontend::paddlepaddle) +if(TARGET openvino::frontend::paddle AND NOT TARGET ngraph::ov_paddle_frontend) + add_library(ngraph::ov_paddle_frontend INTERFACE IMPORTED) + set_target_properties(ngraph::ov_paddle_frontend PROPERTIES + INTERFACE_LINK_LIBRARIES openvino::frontend::paddle) endif() if(TARGET openvino::frontend::tensorflow AND NOT TARGET ngraph::ov_tensorflow_frontend) @@ -85,7 +85,7 @@ if(ngraph_onnx_importer_FOUND) endif() endif() -set(ngraph_paddlepaddle_frontend_FOUND ${OpenVINO_Frontend_PaddlePaddle_FOUND}) +set(ngraph_paddle_frontend_FOUND ${OpenVINO_Frontend_Paddle_FOUND}) set(ngraph_tensorflow_frontend_FOUND ${OpenVINO_Frontend_TensorFlow_FOUND}) set(ngraph_ir_frontend_FOUND ${OpenVINO_Frontend_IR_FOUND}) diff --git a/cmake/test_model_zoo.cmake b/cmake/test_model_zoo.cmake index de29223baa7..23353762279 100644 --- a/cmake/test_model_zoo.cmake +++ b/cmake/test_model_zoo.cmake @@ -136,8 +136,8 @@ if(ENABLE_TESTS) add_dependencies(test_model_zoo test_pip_prerequsites) endif() - if (ENABLE_OV_PDPD_FRONTEND AND ENABLE_OV_CORE_UNIT_TESTS) - add_dependencies(test_model_zoo paddlepaddle_test_models) + if (ENABLE_OV_PADDLE_FRONTEND AND ENABLE_OV_CORE_UNIT_TESTS) + add_dependencies(test_model_zoo paddle_test_models) endif() install(DIRECTORY "${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/test_model_zoo" diff --git a/docs/IE_DG/inference_engine_intro.md b/docs/IE_DG/inference_engine_intro.md index e48b7578211..9692fabe72d 100644 --- a/docs/IE_DG/inference_engine_intro.md +++ b/docs/IE_DG/inference_engine_intro.md @@ -46,11 +46,11 @@ This library contains the classes to: Starting from 2022.1 release, OpenVINO Runtime introduced a concept of frontend plugins. Such plugins can be automatically dynamically loaded by OpenVINO Runtime dynamically depending on file format: * Unix* OS: - `libov_ir_frontend.so` to read a network from IR - - `libov_paddlepaddle_frontend.so` to read a network from PaddlePaddle model format + - `libov_paddle_frontend.so` to read a network from PaddlePaddle model format - `libov_onnx_frontend.so` to read a network from ONNX model format * Windows* OS: - `ov_ir_frontend.dll` to read a network from IR - - `ov_paddlepaddle_frontend.dll` to read a network from PaddlePaddle model format + - `ov_paddle_frontend.dll` to read a network from PaddlePaddle model format - `ov_onnx_frontend.dll` to read a network from ONNX model format ### Device-specific Plugin Libraries ### diff --git a/docs/doxygen/ngraph_cpp_api.config b/docs/doxygen/ngraph_cpp_api.config new file mode 100644 index 00000000000..56ab7dd7ebc --- /dev/null +++ b/docs/doxygen/ngraph_cpp_api.config @@ -0,0 +1,39 @@ +# Doxyfile 1.8.18 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +@INCLUDE = "@IE_CONFIG_BUILD@" + +EXCLUDE_SYMBOLS = + +PREDEFINED = + +EXCLUDE_PATTERNS = */python/* + +FILE_PATTERNS = *.cpp \ + *.c \ + *.hpp \ + *.h + +LAYOUT_FILE = "@NGRAPH_CPP_LAYOUT_BUILD@" + +INPUT = "@CORE_DIR@/core/include/" \ + "@FRONTENDS_DIR@/onnx/frontend/include/" \ + "@FRONTENDS_DIR@/paddle/frontend/include/" + +HTML_OUTPUT = "@NGRAPH_CPP_OUTPUT@" + +GENERATE_TAGFILE = "@DOCS_BUILD_DIR@/ngraph_cpp_api.tag" + +WARN_LOGFILE = "@DOCS_BUILD_DIR@/ngraph_cpp_api.log" diff --git a/src/core/tests/frontend/CMakeLists.txt b/src/core/tests/frontend/CMakeLists.txt index 723776d0407..971b6533119 100644 --- a/src/core/tests/frontend/CMakeLists.txt +++ b/src/core/tests/frontend/CMakeLists.txt @@ -3,8 +3,8 @@ # add_subdirectory(shared) -if (ENABLE_OV_PDPD_FRONTEND) - add_subdirectory(paddlepaddle) +if (ENABLE_OV_PADDLE_FRONTEND) + add_subdirectory(paddle) endif() if (ENABLE_OV_ONNX_FRONTEND) diff --git a/src/core/tests/frontend/paddlepaddle/CMakeLists.txt b/src/core/tests/frontend/paddle/CMakeLists.txt similarity index 85% rename from src/core/tests/frontend/paddlepaddle/CMakeLists.txt rename to src/core/tests/frontend/paddle/CMakeLists.txt index 1f759130daa..998be1684ef 100644 --- a/src/core/tests/frontend/paddlepaddle/CMakeLists.txt +++ b/src/core/tests/frontend/paddle/CMakeLists.txt @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -set(TARGET_NAME "paddlepaddle_tests") +set(TARGET_NAME "paddle_tests") file(GLOB_RECURSE SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) @@ -18,7 +18,7 @@ install(TARGETS ${TARGET_NAME} EXCLUDE_FROM_ALL) # Test model generating -ie_check_pip_package(paddlepaddle WARNING) +ie_check_pip_package(paddlepaddle WARNING paddle) set(TEST_PADDLE_MODELS_DIRNAME test_model_zoo/paddle_test_models) target_compile_definitions(${TARGET_NAME} PRIVATE -D TEST_PADDLE_MODELS_DIRNAME=\"${TEST_PADDLE_MODELS_DIRNAME}/\") @@ -39,7 +39,7 @@ if (paddlepaddle_FOUND) ${TEST_PADDLE_MODELS} DEPENDS ${PADDLE_ALL_SCRIPTS} ) - add_custom_target(paddlepaddle_test_models DEPENDS ${OUT_FILE}) + add_custom_target(paddle_test_models DEPENDS ${OUT_FILE}) install(DIRECTORY ${TEST_PADDLE_MODELS} DESTINATION tests/${TEST_PADDLE_MODELS_DIRNAME} @@ -51,11 +51,11 @@ else() COMMAND ${CMAKE_COMMAND} -E cmake_echo_color --red "Warning: Unable to generate PaddlePaddle test models. Running '${TARGET_NAME}' will likely fail" ) - add_custom_target(paddlepaddle_test_models DEPENDS unable_build_paddle_models.txt) + add_custom_target(paddle_test_models DEPENDS unable_build_paddle_models.txt) endif() -add_dependencies(${TARGET_NAME} paddlepaddle_test_models) -add_dependencies(${TARGET_NAME} ov_paddlepaddle_frontend) +add_dependencies(${TARGET_NAME} paddle_test_models) +add_dependencies(${TARGET_NAME} ov_paddle_frontend) # Fuzzy tests for PaddlePaddle use IE_CPU engine if (ENABLE_MKL_DNN) diff --git a/src/core/tests/frontend/paddlepaddle/basic_api.cpp b/src/core/tests/frontend/paddle/basic_api.cpp similarity index 91% rename from src/core/tests/frontend/paddlepaddle/basic_api.cpp rename to src/core/tests/frontend/paddle/basic_api.cpp index f01ff120c10..605f1df2bfb 100644 --- a/src/core/tests/frontend/paddlepaddle/basic_api.cpp +++ b/src/core/tests/frontend/paddle/basic_api.cpp @@ -9,7 +9,7 @@ using namespace ngraph; using namespace ov::frontend; -using PDPDBasicTest = FrontEndBasicTest; +using PaddleBasicTest = FrontEndBasicTest; static const std::vector models{ std::string("conv2d"), @@ -20,7 +20,7 @@ static const std::vector models{ std::string("2in_2out_dynbatch/2in_2out_dynbatch.pdmodel"), }; -INSTANTIATE_TEST_SUITE_P(PDPDBasicTest, +INSTANTIATE_TEST_SUITE_P(PaddleBasicTest, FrontEndBasicTest, ::testing::Combine(::testing::Values(PADDLE_FE), ::testing::Values(std::string(TEST_PADDLE_MODELS_DIRNAME)), diff --git a/src/core/tests/frontend/paddlepaddle/convert_model.cpp b/src/core/tests/frontend/paddle/convert_model.cpp similarity index 90% rename from src/core/tests/frontend/paddlepaddle/convert_model.cpp rename to src/core/tests/frontend/paddle/convert_model.cpp index 3dbb8e0ecf8..e3290e3e681 100644 --- a/src/core/tests/frontend/paddlepaddle/convert_model.cpp +++ b/src/core/tests/frontend/paddle/convert_model.cpp @@ -9,7 +9,7 @@ using namespace ov; using namespace ov::frontend; -using PDPDConvertModelTest = FrontEndConvertModelTest; +using PaddleConvertModelTest = FrontEndConvertModelTest; static const std::vector models{ std::string("conv2d"), @@ -21,7 +21,7 @@ static const std::vector models{ std::string("pool2d_dyn_hw/pool2d_dyn_hw.pdmodel"), }; -INSTANTIATE_TEST_SUITE_P(PDPDConvertModelTest, +INSTANTIATE_TEST_SUITE_P(PaddleConvertModelTest, FrontEndConvertModelTest, ::testing::Combine(::testing::Values(PADDLE_FE), ::testing::Values(std::string(TEST_PADDLE_MODELS_DIRNAME)), diff --git a/src/core/tests/frontend/paddlepaddle/convert_unsupported.cpp b/src/core/tests/frontend/paddle/convert_unsupported.cpp similarity index 100% rename from src/core/tests/frontend/paddlepaddle/convert_unsupported.cpp rename to src/core/tests/frontend/paddle/convert_unsupported.cpp diff --git a/src/core/tests/frontend/paddlepaddle/cut_specific_model.cpp b/src/core/tests/frontend/paddle/cut_specific_model.cpp similarity index 92% rename from src/core/tests/frontend/paddlepaddle/cut_specific_model.cpp rename to src/core/tests/frontend/paddle/cut_specific_model.cpp index cacb420770a..7c52a0b439b 100644 --- a/src/core/tests/frontend/paddlepaddle/cut_specific_model.cpp +++ b/src/core/tests/frontend/paddle/cut_specific_model.cpp @@ -9,7 +9,7 @@ using namespace ngraph; using namespace ov::frontend; -using PDPDCutTest = FrontEndCutModelTest; +using PaddleCutTest = FrontEndCutModelTest; static CutModelParam getTestData_2in_2out() { CutModelParam res; @@ -26,7 +26,7 @@ static CutModelParam getTestData_2in_2out() { return res; } -INSTANTIATE_TEST_SUITE_P(PDPDCutTest, +INSTANTIATE_TEST_SUITE_P(PaddleCutTest, FrontEndCutModelTest, ::testing::Values(getTestData_2in_2out()), FrontEndCutModelTest::getTestCaseName); diff --git a/src/core/tests/frontend/paddlepaddle/incorrect_cut_model.cpp b/src/core/tests/frontend/paddle/incorrect_cut_model.cpp similarity index 100% rename from src/core/tests/frontend/paddlepaddle/incorrect_cut_model.cpp rename to src/core/tests/frontend/paddle/incorrect_cut_model.cpp diff --git a/src/core/tests/frontend/paddlepaddle/json_config_extension.cpp b/src/core/tests/frontend/paddle/json_config_extension.cpp similarity index 85% rename from src/core/tests/frontend/paddlepaddle/json_config_extension.cpp rename to src/core/tests/frontend/paddle/json_config_extension.cpp index 507a3f9db23..415ca76367f 100644 --- a/src/core/tests/frontend/paddlepaddle/json_config_extension.cpp +++ b/src/core/tests/frontend/paddle/json_config_extension.cpp @@ -7,7 +7,7 @@ using namespace ov::frontend; -using PDPDJsonConfigTest = FrontEndJsonConfigTest; +using PaddleJsonConfigTest = FrontEndJsonConfigTest; static JsonConfigFEParam getTestData() { JsonConfigFEParam res; @@ -17,7 +17,7 @@ static JsonConfigFEParam getTestData() { return res; } -INSTANTIATE_TEST_SUITE_P(PDPDJsonConfigTest, +INSTANTIATE_TEST_SUITE_P(PaddleJsonConfigTest, FrontEndJsonConfigTest, ::testing::Values(getTestData()), FrontEndJsonConfigTest::getTestCaseName); diff --git a/src/core/tests/frontend/paddlepaddle/load_from.cpp b/src/core/tests/frontend/paddle/load_from.cpp similarity index 90% rename from src/core/tests/frontend/paddlepaddle/load_from.cpp rename to src/core/tests/frontend/paddle/load_from.cpp index ac3dfd51ba5..5b3e1845151 100644 --- a/src/core/tests/frontend/paddlepaddle/load_from.cpp +++ b/src/core/tests/frontend/paddle/load_from.cpp @@ -9,7 +9,7 @@ using namespace ngraph; using namespace ov::frontend; -using PDPDCutTest = FrontEndLoadFromTest; +using PaddleCutTest = FrontEndLoadFromTest; static LoadFromFEParam getTestData() { LoadFromFEParam res; @@ -22,7 +22,7 @@ static LoadFromFEParam getTestData() { return res; } -INSTANTIATE_TEST_SUITE_P(PDPDCutTest, +INSTANTIATE_TEST_SUITE_P(PaddleCutTest, FrontEndLoadFromTest, ::testing::Values(getTestData()), FrontEndLoadFromTest::getTestCaseName); diff --git a/src/core/tests/frontend/paddlepaddle/main.cpp b/src/core/tests/frontend/paddle/main.cpp similarity index 100% rename from src/core/tests/frontend/paddlepaddle/main.cpp rename to src/core/tests/frontend/paddle/main.cpp diff --git a/src/core/tests/frontend/paddlepaddle/op_fuzzy.cpp b/src/core/tests/frontend/paddle/op_fuzzy.cpp similarity index 99% rename from src/core/tests/frontend/paddlepaddle/op_fuzzy.cpp rename to src/core/tests/frontend/paddle/op_fuzzy.cpp index 6ae5f3a659e..f7082035140 100644 --- a/src/core/tests/frontend/paddlepaddle/op_fuzzy.cpp +++ b/src/core/tests/frontend/paddle/op_fuzzy.cpp @@ -18,7 +18,7 @@ using namespace InferenceEngine; using namespace ngraph; using namespace ov::frontend; -using PDPDFuzzyOpTest = FrontEndFuzzyOpTest; +using PaddleFuzzyOpTest = FrontEndFuzzyOpTest; static const std::vector models{std::string("argmax"), std::string("argmax1"), @@ -64,7 +64,7 @@ static const std::vector models{std::string("argmax"), std::string("conv2d_strides_no_padding"), std::string("conv2d_strides_padding"), std::string("conv2d_transpose_dilation_assymetric_pads_strides"), - // conv2d_transpose_SAME_padding(PDPD outputs wrong results), + // conv2d_transpose_SAME_padding(Paddle outputs wrong results), std::string("conv2d_transpose_strides_assymetric_padding"), std::string("conv2d_transpose_strides_no_padding"), std::string("conv2d_transpose_strides_padding"), @@ -261,9 +261,9 @@ static const std::vector models{std::string("argmax"), // std::string("yolo_box_scale_xy"), std::string("yolo_box_uneven_wh")}; -INSTANTIATE_TEST_SUITE_P(PDPDFuzzyOpTest, +INSTANTIATE_TEST_SUITE_P(PaddleFuzzyOpTest, FrontEndFuzzyOpTest, ::testing::Combine(::testing::Values(PADDLE_FE), ::testing::Values(std::string(TEST_PADDLE_MODELS_DIRNAME)), ::testing::ValuesIn(models)), - PDPDFuzzyOpTest::getTestCaseName); + PaddleFuzzyOpTest::getTestCaseName); diff --git a/src/core/tests/frontend/paddlepaddle/paddle_utils.hpp b/src/core/tests/frontend/paddle/paddle_utils.hpp similarity index 100% rename from src/core/tests/frontend/paddlepaddle/paddle_utils.hpp rename to src/core/tests/frontend/paddle/paddle_utils.hpp diff --git a/src/core/tests/frontend/paddlepaddle/partial_shape.cpp b/src/core/tests/frontend/paddle/partial_shape.cpp similarity index 96% rename from src/core/tests/frontend/paddlepaddle/partial_shape.cpp rename to src/core/tests/frontend/paddle/partial_shape.cpp index 525c4684813..80a995c4264 100644 --- a/src/core/tests/frontend/paddlepaddle/partial_shape.cpp +++ b/src/core/tests/frontend/paddle/partial_shape.cpp @@ -9,7 +9,7 @@ using namespace ngraph; using namespace ov::frontend; -using PDPDPartialShapeTest = FrontEndPartialShapeTest; +using PaddlePartialShapeTest = FrontEndPartialShapeTest; static PartShape getTestShape_2in_2out() { PartShape res; @@ -57,7 +57,7 @@ static PartShape getTestShape_conv2d_relu() { } INSTANTIATE_TEST_SUITE_P( - PDPDPartialShapeTest, + PaddlePartialShapeTest, FrontEndPartialShapeTest, ::testing::Combine(::testing::Values(BaseFEParam{PADDLE_FE, std::string(TEST_PADDLE_MODELS_DIRNAME)}), ::testing::ValuesIn(std::vector{getTestShape_2in_2out(), diff --git a/src/core/tests/frontend/paddlepaddle/places.cpp b/src/core/tests/frontend/paddle/places.cpp similarity index 96% rename from src/core/tests/frontend/paddlepaddle/places.cpp rename to src/core/tests/frontend/paddle/places.cpp index 2092920cbee..df74eb66ab1 100644 --- a/src/core/tests/frontend/paddlepaddle/places.cpp +++ b/src/core/tests/frontend/paddle/places.cpp @@ -53,7 +53,7 @@ std::vector tensor_names = { "save_infer_model/scale_5.tmp_1", }; -TEST(PDPD_Places, check_tensor_names) { +TEST(Paddle_Places, check_tensor_names) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -67,7 +67,7 @@ TEST(PDPD_Places, check_tensor_names) { } } -TEST(PDPD_Places, check_input_outputs) { +TEST(Paddle_Places, check_input_outputs) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -101,7 +101,7 @@ TEST(PDPD_Places, check_input_outputs) { } // all existed in the model ops have "Out" port -TEST(PDPD_Places, check_out_port_of_all_ops) { +TEST(Paddle_Places, check_out_port_of_all_ops) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -124,7 +124,7 @@ TEST(PDPD_Places, check_out_port_of_all_ops) { } } -TEST(PDPD_Places, check_in_out_ports_of_model_outputs) { +TEST(Paddle_Places, check_in_out_ports_of_model_outputs) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -159,7 +159,7 @@ TEST(PDPD_Places, check_in_out_ports_of_model_outputs) { } } -TEST(PDPD_Places, check_source_target_tensors_of_model_outputs) { +TEST(Paddle_Places, check_source_target_tensors_of_model_outputs) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -194,7 +194,7 @@ TEST(PDPD_Places, check_source_target_tensors_of_model_outputs) { } } -TEST(PDPD_Places, check_producing_consuming_ops_of_model_outputs) { +TEST(Paddle_Places, check_producing_consuming_ops_of_model_outputs) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -230,7 +230,7 @@ TEST(PDPD_Places, check_producing_consuming_ops_of_model_outputs) { } // check data flow [ output port -> tensor -> input port ] -TEST(PDPD_Places, check_data_flow) { +TEST(Paddle_Places, check_data_flow) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -270,7 +270,7 @@ TEST(PDPD_Places, check_data_flow) { // -> input_port_2 // -> input_port_N] // input_port, input_port_2, ... input_port_N are equal data -TEST(PDPD_Places, check_tensor_to_multiple_ports) { +TEST(Paddle_Places, check_tensor_to_multiple_ports) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -297,7 +297,7 @@ TEST(PDPD_Places, check_tensor_to_multiple_ports) { } // consuming ops should be equal for tensor place and producing output port -TEST(PDPD_Places, check_consuming_ops) { +TEST(Paddle_Places, check_consuming_ops) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -339,7 +339,7 @@ TEST(PDPD_Places, check_consuming_ops) { } } -TEST(PDPD_Places, check_consuming_ops_2) { +TEST(Paddle_Places, check_consuming_ops_2) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -379,7 +379,7 @@ TEST(PDPD_Places, check_consuming_ops_2) { } } -TEST(PDPD_Places, check_producing_ops) { +TEST(Paddle_Places, check_producing_ops) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -402,7 +402,7 @@ TEST(PDPD_Places, check_producing_ops) { } } -TEST(PDPD_Places, check_input_output_ports_dy_idx) { +TEST(Paddle_Places, check_input_output_ports_dy_idx) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; @@ -429,7 +429,7 @@ TEST(PDPD_Places, check_input_output_ports_dy_idx) { } } -TEST(PDPD_Places, check_ops_tensors_by_idx) { +TEST(Paddle_Places, check_ops_tensors_by_idx) { FrontEndTestUtils::setupTestEnv(); auto fem = FrontEndManager(); FrontEnd::Ptr frontend; diff --git a/src/core/tests/frontend/paddlepaddle/requirements_dev.txt b/src/core/tests/frontend/paddle/requirements_dev.txt similarity index 100% rename from src/core/tests/frontend/paddlepaddle/requirements_dev.txt rename to src/core/tests/frontend/paddle/requirements_dev.txt diff --git a/src/core/tests/frontend/paddlepaddle/set_element_type.cpp b/src/core/tests/frontend/paddle/set_element_type.cpp similarity index 87% rename from src/core/tests/frontend/paddlepaddle/set_element_type.cpp rename to src/core/tests/frontend/paddle/set_element_type.cpp index f9decce7316..a1f3d2b7b4c 100644 --- a/src/core/tests/frontend/paddlepaddle/set_element_type.cpp +++ b/src/core/tests/frontend/paddle/set_element_type.cpp @@ -9,7 +9,7 @@ using namespace ngraph; using namespace ov::frontend; -using PDPDCutTest = FrontEndElementTypeTest; +using PaddleCutTest = FrontEndElementTypeTest; static SetTypeFEParam getTestData_relu() { SetTypeFEParam res; @@ -19,7 +19,7 @@ static SetTypeFEParam getTestData_relu() { return res; } -INSTANTIATE_TEST_SUITE_P(PDPDCutTest, +INSTANTIATE_TEST_SUITE_P(PaddleCutTest, FrontEndElementTypeTest, ::testing::Values(getTestData_relu()), FrontEndElementTypeTest::getTestCaseName); diff --git a/src/core/tests/frontend/paddlepaddle/telemetry.cpp b/src/core/tests/frontend/paddle/telemetry.cpp similarity index 90% rename from src/core/tests/frontend/paddlepaddle/telemetry.cpp rename to src/core/tests/frontend/paddle/telemetry.cpp index 9a7f22f6567..1b2980d5610 100644 --- a/src/core/tests/frontend/paddlepaddle/telemetry.cpp +++ b/src/core/tests/frontend/paddle/telemetry.cpp @@ -8,7 +8,7 @@ using namespace ov::frontend; -using PDPDTelemetryTest = FrontEndTelemetryTest; +using PaddleTelemetryTest = FrontEndTelemetryTest; static TelemetryFEParam getTestData() { TelemetryFEParam res; @@ -22,7 +22,7 @@ static TelemetryFEParam getTestData() { return res; } -INSTANTIATE_TEST_SUITE_P(PDPDTelemetryTest, +INSTANTIATE_TEST_SUITE_P(PaddleTelemetryTest, FrontEndTelemetryTest, ::testing::Values(getTestData()), FrontEndTelemetryTest::getTestCaseName); diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_2in_2out.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_2in_2out.py similarity index 94% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_2in_2out.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_2in_2out.py index b8bb0a7cb84..8a7aa75bdc4 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_2in_2out.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_2in_2out.py @@ -33,7 +33,7 @@ exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) inp_dict = {'inputX1': inp_blob1, 'inputX2': inp_blob2} var = [relu3a, relu3b] -res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) +res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) fluid.io.save_inference_model(os.path.join(sys.argv[1], "2in_2out"), list(inp_dict.keys()), var, exe, model_filename="2in_2out.pdmodel", params_filename="2in_2out.pdiparams") diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_2in_2out_dynbatch.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_2in_2out_dynbatch.py similarity index 94% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_2in_2out_dynbatch.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_2in_2out_dynbatch.py index 3453189b3b7..eb68446a46d 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_2in_2out_dynbatch.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_2in_2out_dynbatch.py @@ -33,7 +33,7 @@ exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) inp_dict = {'inputX1': inp_blob1, 'inputX2': inp_blob2} var = [relu3a, relu3b] -res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) +res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) fluid.io.save_inference_model(os.path.join(sys.argv[1], "2in_2out_dynbatch"), list(inp_dict.keys()), var, exe, model_filename="2in_2out_dynbatch.pdmodel", params_filename="2in_2out_dynbatch.pdiparams") diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_argmax.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_argmax.py new file mode 100644 index 00000000000..92f57c72e93 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_argmax.py @@ -0,0 +1,60 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys +data_type = 'float32' + + +def paddle_argmax(name : str, x, axis): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.argmax(x=node_x, axis=axis) + out = paddle.cast(out, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def paddle_argmax1(name : str, x): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.argmax(x=node_x) + out = paddle.cast(out, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + data = np.random.random([3,5,7,2]).astype("float32") + axis = 0 + paddle_argmax("argmax", data, axis) + paddle_argmax1("argmax1", data) + + +if __name__ == "__main__": + main() diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_assign_value.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_assign_value.py similarity index 57% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_assign_value.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_assign_value.py index 10e5d5ea904..3554ed67b72 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_assign_value.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_assign_value.py @@ -3,20 +3,20 @@ from save_model import saveModel import sys -def pdpd_assign_value(name, test_x): - import paddle as pdpd - pdpd.enable_static() - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - node_x = pdpd.static.data(name='x', shape=test_x.shape, dtype=test_x.dtype if test_x.dtype != np.bool else np.int32) - node_x = pdpd.cast(node_x, dtype=test_x.dtype) - const_value = pdpd.assign(test_x, output=None) - result = pdpd.cast(pdpd.concat([node_x, const_value], 0), dtype=np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) +def paddle_assign_value(name, test_x): + import paddle + paddle.enable_static() + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + node_x = paddle.static.data(name='x', shape=test_x.shape, dtype=test_x.dtype if test_x.dtype != np.bool else np.int32) + node_x = paddle.cast(node_x, dtype=test_x.dtype) + const_value = paddle.assign(test_x, output=None) + result = paddle.cast(paddle.concat([node_x, const_value], 0), dtype=np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) if test_x.dtype == np.bool: test_x = test_x.astype(np.int32) @@ -49,7 +49,7 @@ def compare(): } ] for test in test_cases: - pdpd_assign_value(test['name'], test['input']) + paddle_assign_value(test['name'], test['input']) if __name__ == "__main__": diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_batch_norm.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_batch_norm.py similarity index 61% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_batch_norm.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_batch_norm.py index fbbba99160c..a9c8d405e5b 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_batch_norm.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_batch_norm.py @@ -7,14 +7,14 @@ import sys def batch_norm1(name : str, x, scale, bias, mean, var, data_layout): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - scale_attr = pdpd.ParamAttr(name="scale1", initializer=pdpd.nn.initializer.Assign(scale)) - bias_attr = pdpd.ParamAttr(name="bias1", initializer=pdpd.nn.initializer.Assign(bias)) + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + scale_attr = paddle.ParamAttr(name="scale1", initializer=paddle.nn.initializer.Assign(scale)) + bias_attr = paddle.ParamAttr(name="bias1", initializer=paddle.nn.initializer.Assign(bias)) - out = pdpd.static.nn.batch_norm(node_x, epsilon=1e-5, + out = paddle.static.nn.batch_norm(node_x, epsilon=1e-5, param_attr=scale_attr, bias_attr=bias_attr, moving_mean_name="bn_mean1", @@ -22,12 +22,12 @@ def batch_norm1(name : str, x, scale, bias, mean, var, data_layout): use_global_stats=True, data_layout=data_layout) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - pdpd.static.global_scope().var("bn_mean1").get_tensor().set(mean, pdpd.CPUPlace()) - pdpd.static.global_scope().var("bn_variance1").get_tensor().set(var, pdpd.CPUPlace()) + exe.run(paddle.static.default_startup_program()) + paddle.static.global_scope().var("bn_mean1").get_tensor().set(mean, paddle.CPUPlace()) + paddle.static.global_scope().var("bn_variance1").get_tensor().set(var, paddle.CPUPlace()) outs = exe.run( feed={'x': x}, @@ -38,14 +38,14 @@ def batch_norm1(name : str, x, scale, bias, mean, var, data_layout): return outs[0] def batch_norm2(name : str, x, scale, bias, mean, var, data_layout): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - scale_attr = pdpd.ParamAttr(name="scale2", initializer=pdpd.nn.initializer.Assign(scale)) - bias_attr = pdpd.ParamAttr(name="bias2", initializer=pdpd.nn.initializer.Assign(bias)) + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + scale_attr = paddle.ParamAttr(name="scale2", initializer=paddle.nn.initializer.Assign(scale)) + bias_attr = paddle.ParamAttr(name="bias2", initializer=paddle.nn.initializer.Assign(bias)) - out = pdpd.static.nn.batch_norm(node_x, epsilon=1e-5, + out = paddle.static.nn.batch_norm(node_x, epsilon=1e-5, param_attr=scale_attr, bias_attr=bias_attr, moving_mean_name="bn_mean2", @@ -53,12 +53,12 @@ def batch_norm2(name : str, x, scale, bias, mean, var, data_layout): use_global_stats=True, data_layout=data_layout) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - pdpd.static.global_scope().var("bn_mean2").get_tensor().set(mean, pdpd.CPUPlace()) - pdpd.static.global_scope().var("bn_variance2").get_tensor().set(var, pdpd.CPUPlace()) + exe.run(paddle.static.default_startup_program()) + paddle.static.global_scope().var("bn_mean2").get_tensor().set(mean, paddle.CPUPlace()) + paddle.static.global_scope().var("bn_variance2").get_tensor().set(var, paddle.CPUPlace()) outs = exe.run( feed={'x': x}, @@ -69,7 +69,7 @@ def batch_norm2(name : str, x, scale, bias, mean, var, data_layout): return outs[0] def main(): - import paddle as pdpd + import paddle data = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) # data layout is NCHW scale = np.array([1.0, 1.5]).astype(np.float32) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_bmm.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_bmm.py similarity index 62% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_bmm.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_bmm.py index 92a468db917..016e89634ed 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_bmm.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_bmm.py @@ -3,19 +3,19 @@ from save_model import saveModel import sys -def pdpd_bmm(x1, x2): - import paddle as pdpd +def paddle_bmm(x1, x2): + import paddle - pdpd.enable_static() - node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) - node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) - bmm_node = pdpd.bmm(node_x1, node_x2) - result = pdpd.static.nn.batch_norm(bmm_node, use_global_stats=True) + paddle.enable_static() + node_x1 = paddle.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) + node_x2 = paddle.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) + bmm_node = paddle.bmm(node_x1, node_x2) + result = paddle.static.nn.batch_norm(bmm_node, use_global_stats=True) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x1': x1, 'x2': x2}, @@ -36,4 +36,4 @@ if __name__ == "__main__": [30., 31., 32., 33., 34.,]]]).astype(np.float32) input2 = np.ones([1, 5, 7]).astype('float32') - pdpd_result = pdpd_bmm(input1, input2) + paddle_result = paddle_bmm(input1, input2) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_clip.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_clip.py similarity index 57% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_clip.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_clip.py index 55edd6c62dd..64868b5f95a 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_clip.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_clip.py @@ -6,17 +6,17 @@ from save_model import saveModel import sys def clip(name: str, x, min, max): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.fluid.layers.clip(node_x, min=min, max=max) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.fluid.layers.clip(node_x, min=min, max=max) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d.py similarity index 88% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d.py index b3dc2c4aab2..7b7deebc09a 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d.py @@ -17,6 +17,6 @@ exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) inp_dict = {'x': inp_blob} var = [test_layer] -res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) +res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d"), list(inp_dict.keys()), var, exe) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_combinations.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_combinations.py similarity index 87% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_combinations.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_combinations.py index e54643decfe..348ed4dd97f 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_combinations.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_combinations.py @@ -1,31 +1,31 @@ from save_model import saveModel import numpy as np -import paddle as pdpd +import paddle import sys -pdpd.enable_static() +paddle.enable_static() def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(start_prog) outs = exe.run( feed={'x': input_x}, fetch_list=fetch_list, program=main_prog) - with pdpd.static.program_guard(main_prog, start_prog): + with paddle.static.program_guard(main_prog, start_prog): saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], outputs=[outs[0]], target_dir=sys.argv[1]) -def pdpd_conv2d(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True): - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - data = pdpd.static.data(name='x', shape=input_shape, dtype='float32') - weight_attr = pdpd.ParamAttr(name="conv2d_weight", initializer=pdpd.nn.initializer.Assign(kernel)) - conv2d = pdpd.static.nn.conv2d(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4], +def paddle_conv2d(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True): + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + data = paddle.static.data(name='x', shape=input_shape, dtype='float32') + weight_attr = paddle.ParamAttr(name="conv2d_weight", initializer=paddle.nn.initializer.Assign(kernel)) + conv2d = paddle.static.nn.conv2d(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4], padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn) run_and_save_model(input_x, name, data, conv2d, main_program, startup_program) @@ -135,7 +135,7 @@ if __name__ == "__main__": ] for test in test_cases: - pdpd_conv2d(test['input_x'], test['name'], test["input_shape"], + paddle_conv2d(test['input_x'], test['name'], test["input_shape"], test['kernel'], test['dilation'], test['padding'], test['stride'], diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_relu.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_relu.py similarity index 90% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_relu.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_relu.py index 28e818d5d6f..c42b8c143f1 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_relu.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_relu.py @@ -19,7 +19,7 @@ exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) inp_dict = {'xxx': inp_blob} var = [relu] -res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) +res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_relu"), list(inp_dict.keys()), var, exe, model_filename="conv2d_relu.pdmodel", params_filename="conv2d_relu.pdiparams") diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_s.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_s.py similarity index 90% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_s.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_s.py index fae73f3ee84..14fe9e24ae4 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_s.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_s.py @@ -16,7 +16,7 @@ exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) inp_dict = {'x': inp_blob} var = [test_layer] -res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) +res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_s"), list(inp_dict.keys()), var, exe, model_filename="conv2d.pdmodel", params_filename="conv2d.pdiparams") diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_transpose.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_transpose.py similarity index 87% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_transpose.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_transpose.py index 79608128bc6..de92d9a377e 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_conv2d_transpose.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_conv2d_transpose.py @@ -1,30 +1,30 @@ import numpy as np -import paddle as pdpd -pdpd.enable_static() +import paddle +paddle.enable_static() from save_model import saveModel import sys def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(start_prog) outs = exe.run( feed={'x': input_x}, fetch_list=fetch_list, program=main_prog) - with pdpd.static.program_guard(main_prog, start_prog): + with paddle.static.program_guard(main_prog, start_prog): saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], outputs=[outs[0]], target_dir=sys.argv[1]) -def pdpd_conv2d_transpose(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True): - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - data = pdpd.static.data(name='x', shape=input_shape, dtype='float32') - weight_attr = pdpd.ParamAttr(name="conv2d_weight", initializer=pdpd.nn.initializer.Assign(kernel)) - conv2d = pdpd.static.nn.conv2d_transpose(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4], +def paddle_conv2d_transpose(input_x, name, input_shape, kernel, dilation, padding, stride, groups=1, use_cudnn=True): + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + data = paddle.static.data(name='x', shape=input_shape, dtype='float32') + weight_attr = paddle.ParamAttr(name="conv2d_weight", initializer=paddle.nn.initializer.Assign(kernel)) + conv2d = paddle.static.nn.conv2d_transpose(input=data, num_filters=kernel.shape[0], filter_size=kernel.shape[2:4], padding=padding, param_attr=weight_attr, dilation=dilation, stride=stride, groups=groups, use_cudnn=use_cudnn) run_and_save_model(input_x, name, data, conv2d, main_program, startup_program) @@ -134,7 +134,7 @@ if __name__ == "__main__": ] for test in test_cases: - pdpd_conv2d_transpose(test['input_x'], test['name'], test["input_shape"], + paddle_conv2d_transpose(test['input_x'], test['name'], test["input_shape"], test['kernel'], test['dilation'], test['padding'], test['stride'], diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_cumsum.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_cumsum.py similarity index 64% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_cumsum.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_cumsum.py index 6d612477892..b7651152326 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_cumsum.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_cumsum.py @@ -3,23 +3,23 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def cumsum(name:str, x, axis, dtype=None): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.cumsum(data, axis, dtype=dtype) - out = pdpd.cast(out, np.float32) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.cumsum(data, axis, dtype=dtype) + out = paddle.cast(out, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_dropout.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_dropout.py new file mode 100644 index 00000000000..2880eb3c2b0 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_dropout.py @@ -0,0 +1,47 @@ +# +# pool2d paddle model generator +# +import numpy as np +from save_model import saveModel +import sys + + +def paddle_dropout(name : str, x, p, paddle_attrs): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.nn.functional.dropout(x=node_x, p=p, training=paddle_attrs['training'], mode=paddle_attrs['mode']) + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], + outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + p=0.5 + data = np.random.random(size=(3, 10, 3, 7)).astype('float32') + paddle_attrs = { + 'training' : False, + 'mode' : "downscale_in_infer" + } + paddle_attrs2 = { + 'training' : False, + 'mode' : "upscale_in_train" + } + paddle_dropout("dropout", data, p, paddle_attrs) + paddle_dropout("dropout_upscale_in_train", data, p, paddle_attrs2) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_dynamic_pool2d.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_dynamic_pool2d.py similarity index 100% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_dynamic_pool2d.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_dynamic_pool2d.py diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_elementwise_ops.py similarity index 52% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_elementwise_ops.py index f1592434631..e3b8e47b83d 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_elementwise_ops.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_elementwise_ops.py @@ -7,19 +7,19 @@ from save_model import saveModel def elementwise_add(name : str, x, y, axis, in_dtype): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=in_dtype) - node_y = pdpd.static.data(name='y', shape=y.shape, dtype=in_dtype) - out = pdpd.fluid.layers.nn.elementwise_add(node_x, node_y, axis=axis) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) + out = paddle.fluid.layers.nn.elementwise_add(node_x, node_y, axis=axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) @@ -29,19 +29,19 @@ def elementwise_add(name : str, x, y, axis, in_dtype): def elementwise_sub(name : str, x, y, axis, in_dtype): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=in_dtype) - node_y = pdpd.static.data(name='y', shape=y.shape, dtype=in_dtype) - out = pdpd.fluid.layers.nn.elementwise_sub(node_x, node_y, axis=axis) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) + out = paddle.fluid.layers.nn.elementwise_sub(node_x, node_y, axis=axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) @@ -51,19 +51,19 @@ def elementwise_sub(name : str, x, y, axis, in_dtype): def elementwise_div(name : str, x, y, axis, in_dtype): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_div(node_x, node_y, axis=axis) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = paddle.fluid.layers.nn.elementwise_div(node_x, node_y, axis=axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) @@ -73,19 +73,19 @@ def elementwise_div(name : str, x, y, axis, in_dtype): def elementwise_mul(name : str, x, y, axis, in_dtype): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_mul(node_x, node_y, axis=axis) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = paddle.fluid.layers.nn.elementwise_mul(node_x, node_y, axis=axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) @@ -95,19 +95,19 @@ def elementwise_mul(name : str, x, y, axis, in_dtype): def elementwise_min(name : str, x, y, axis, in_dtype): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_min(node_x, node_y, axis=axis) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = paddle.fluid.layers.nn.elementwise_min(node_x, node_y, axis=axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) @@ -117,19 +117,19 @@ def elementwise_min(name : str, x, y, axis, in_dtype): def elementwise_max(name : str, x, y, axis, in_dtype): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_max(node_x, node_y, axis=axis) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = paddle.fluid.layers.nn.elementwise_max(node_x, node_y, axis=axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) @@ -139,19 +139,19 @@ def elementwise_max(name : str, x, y, axis, in_dtype): def elementwise_pow(name : str, x, y, axis, in_dtype): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = pdpd.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = pdpd.fluid.layers.nn.elementwise_pow(node_x, node_y, axis=axis) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + out = paddle.fluid.layers.nn.elementwise_pow(node_x, node_y, axis=axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_embedding.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_embedding.py similarity index 96% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_embedding.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_embedding.py index d20b2d891d1..09e0b985406 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_embedding.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_embedding.py @@ -97,10 +97,10 @@ def embedding(name : str, ids, vocab_size, embedding_dim, padding_idx=None, spar ng_result = ngraph_embedding(ids, vocab_embeddings, vocab_size, embedding_dim, padding_idx, sparse) ng_result = list(ng_result.values())[0] - pdpd_result = list(outputs.values())[0] + paddle_result = list(outputs.values())[0] match = np.all(np.isclose( - pdpd_result, ng_result, rtol=1e-4, atol=1e-5)) + paddle_result, ng_result, rtol=1e-4, atol=1e-5)) prefix_color = '\n\033[92m' if match else '\n\033[91m' print(prefix_color + @@ -111,11 +111,11 @@ def embedding(name : str, ids, vocab_size, embedding_dim, padding_idx=None, spar np.set_printoptions(suppress=True) print(prefix_color + - 'pdpd_result: {}'.format(pdpd_result) + '\033[0m\n') + 'paddle_result: {}'.format(paddle_result) + '\033[0m\n') print(prefix_color + 'ng_result: {}'.format(ng_result) + '\033[0m\n') - raise ValueError(name + ': OV result does not match PDPD!') + raise ValueError(name + ': OV result does not match paddle!') return outputs diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_equal.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_equal.py similarity index 61% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_equal.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_equal.py index b311bf89e89..953c3419990 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_equal.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_equal.py @@ -7,19 +7,19 @@ import sys def equal(name : str, x, y): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - node_y = pdpd.static.data(name='y', shape=y.shape, dtype='float32') + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32') - out = pdpd.equal(node_x, node_y) - out = pdpd.cast(out, np.float32) + out = paddle.equal(node_x, node_y) + out = paddle.cast(out, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, @@ -32,7 +32,7 @@ def equal(name : str, x, y): def main(): - import paddle as pdpd + import paddle data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_exp.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_exp.py similarity index 60% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_exp.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_exp.py index 69496a2dab2..97e3b22a083 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_exp.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_exp.py @@ -7,16 +7,16 @@ import sys def exp(name: str, x): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) - out = pdpd.fluid.layers.exp(x=node_x) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.fluid.layers.exp(x=node_x) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_expand_v2.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_expand_v2.py similarity index 54% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_expand_v2.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_expand_v2.py index ab5aacaacec..b1afcecb80e 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_expand_v2.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_expand_v2.py @@ -3,23 +3,23 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def expand_v2(name:str, x, shape:list): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.expand(node_x, shape=shape, name='expand_v2') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.expand(node_x, shape=shape, name='expand_v2') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -32,22 +32,22 @@ def expand_v2(name:str, x, shape:list): def expand_v2_tensor(name:str, x, out_shape, use_tensor_in_list): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) if use_tensor_in_list: - out_shape[0] = pdpd.assign(np.array((out_shape[0],)).astype('int32')) - out = pdpd.expand(node_x, shape=out_shape, name='expand_v2') + out_shape[0] = paddle.assign(np.array((out_shape[0],)).astype('int32')) + out = paddle.expand(node_x, shape=out_shape, name='expand_v2') else: out_shape = np.array(out_shape).astype('int32') - node_shape = pdpd.assign(out_shape, output=None) - out = pdpd.expand(node_x, shape=node_shape, name='expand_v2') + node_shape = paddle.assign(out_shape, output=None) + out = paddle.expand(node_x, shape=node_shape, name='expand_v2') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_any_like.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_any_like.py similarity index 66% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_any_like.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_any_like.py index 9d74dfcdb72..a9daebd745b 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_any_like.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_any_like.py @@ -3,23 +3,23 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def fill_any_like(name:str, x, value, dtype=None): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.full_like(data, value, dtype=dtype) - out = pdpd.cast(out, np.float32) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.full_like(data, value, dtype=dtype) + out = paddle.cast(out, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_constant.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_constant.py new file mode 100644 index 00000000000..e8afc96c48c --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_constant.py @@ -0,0 +1,97 @@ +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def fill_constant(name : str, shape : list, dtype, value): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + x1 = paddle.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') + x2 = paddle.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') + out = paddle.add(paddle.cast(x1, np.float32), paddle.cast(x2, np.float32)) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def fill_constant_tensor(name : str, shape : list, dtype, value): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_value = paddle.static.data(name='value', shape=[1], dtype=dtype) + x1 = paddle.fluid.layers.fill_constant(shape=shape, value=node_value, dtype=dtype, name='fill_constant1') + out = paddle.cast(x1, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={"value": value}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=["value"], fetchlist=[out], inputs=[np.array([value]).astype(dtype)], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def fill_constant_shape_tensor(name : str, shape, dtype, value): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_shape = paddle.fluid.layers.fill_constant(shape=[2], value=shape, dtype='int32', name='shape') + x1 = paddle.fluid.layers.fill_constant(shape=node_shape, value=value, dtype=dtype, name='fill_constant') + out = paddle.cast(x1, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def fill_constant_shape_tensor_list(name : str, shape: list, dtype, value): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_shape = paddle.fluid.layers.fill_constant(shape=[1], value=shape, dtype='int32', name='shape') + x1 = paddle.fluid.layers.fill_constant(shape=[2, node_shape], value=value, dtype=dtype, name='fill_constant') + out = paddle.cast(x1, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + fill_constant("fill_constant", [2, 3, 4], 'float32', 0.03) + fill_constant("fill_constant_int32", [2, 3, 4], "int32", 2) + fill_constant("fill_constant_int64", [2, 3, 4], "int64", 4) + fill_constant_tensor("fill_constant_tensor", [2, 3, 4], 'float32', 0.05) + fill_constant_shape_tensor("fill_constant_shape_tensor", 2, 'float32', 0.05) + fill_constant_shape_tensor_list("fill_constant_shape_tensor_list", 2, 'float32', 0.05) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py similarity index 65% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py index 25bde96ad59..e80b5106d3e 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_fill_constant_batch_size_like.py @@ -3,24 +3,24 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def fill_constant_batch_size_like(name : str, x, shape, dtype, value, input_dim_idx=0, output_dim_idx=0): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - like = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.fluid.layers.fill_constant_batch_size_like(input=like, shape=shape, \ + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + like = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.fluid.layers.fill_constant_batch_size_like(input=like, shape=shape, \ value=value, dtype=dtype, \ output_dim_idx=output_dim_idx, input_dim_idx=input_dim_idx) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_flatten_contiguous_range.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_flatten_contiguous_range.py similarity index 65% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_flatten_contiguous_range.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_flatten_contiguous_range.py index 5d6274587f5..ba7488a3e62 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_flatten_contiguous_range.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_flatten_contiguous_range.py @@ -6,18 +6,18 @@ from save_model import saveModel import sys def generate_flatten_contiguous_range(name : str, x, start_axis, stop_axis, in_dtype): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - out = pdpd.flatten(node_x, start_axis, stop_axis) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + out = paddle.flatten(node_x, start_axis, stop_axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, fetch_list=[out]) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_gelu.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_gelu.py similarity index 58% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_gelu.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_gelu.py index 3ac50226ed6..211dbd33765 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_gelu.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_gelu.py @@ -3,22 +3,22 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def gelu(name:str, x, approximate=False): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.fluid.layers.gelu(data, approximate=approximate) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.fluid.layers.gelu(data, approximate=approximate) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_greater_equal.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_greater_equal.py similarity index 68% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_greater_equal.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_greater_equal.py index 506e9ccb214..8b73fb0ec8b 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_greater_equal.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_greater_equal.py @@ -3,27 +3,27 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys def greater_equal(name : str, x, y, data_type, cast_to_fp32=False): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='input_x', shape=x.shape, dtype=data_type) - node_y = pdpd.static.data(name='input_y', shape=y.shape, dtype=data_type) - out = pdpd.fluid.layers.greater_equal(x=node_x, y=node_y, name='greater_equal') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='input_x', shape=x.shape, dtype=data_type) + node_y = paddle.static.data(name='input_y', shape=y.shape, dtype=data_type) + out = paddle.fluid.layers.greater_equal(x=node_x, y=node_y, name='greater_equal') # FuzzyTest framework doesn't support boolean so cast to fp32/int32 if cast_to_fp32: data_type = "float32" - out = pdpd.cast(out, data_type) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.cast(out, data_type) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'input_x': x, 'input_y': y}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_sigmoid.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_hard_sigmoid.py similarity index 61% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_sigmoid.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_hard_sigmoid.py index 2a9d8e55842..e7691f50e38 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_sigmoid.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_hard_sigmoid.py @@ -3,21 +3,21 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys def hard_sigmoid(name: str, x, slope: float = 0.2, offset: float = 0.5, data_type='float32'): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.fluid.layers.hard_sigmoid(node_x, slope=slope, offset=offset, name='hard_sigmoid') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.fluid.layers.hard_sigmoid(node_x, slope=slope, offset=offset, name='hard_sigmoid') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_swish.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_hard_swish.py similarity index 60% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_swish.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_hard_swish.py index bc1bec52f5b..329abb07346 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_hard_swish.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_hard_swish.py @@ -3,21 +3,21 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys def hard_swish(name: str, x, threshold=6.0, scale=6.0, offset=3.0, data_type='float32'): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.hard_swish(node_x, threshold=threshold, scale=scale, offset=offset, name='hard_swish') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.fluid.layers.hard_swish(node_x, threshold=threshold, scale=scale, offset=offset, name='hard_swish') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_interpolate.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_interpolate.py similarity index 74% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_interpolate.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_interpolate.py index 3a2fc9e042b..2a3cd62b82e 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_interpolate.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_interpolate.py @@ -1,38 +1,38 @@ import numpy as np -import paddle as pdpd +import paddle from paddle.nn.functional import interpolate from save_model import saveModel import sys -pdpd.enable_static() +paddle.enable_static() def run_and_save_model(input_x, name, feed, fetch_list, main_prog, start_prog): - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(start_prog) outs = exe.run( feed={'x': input_x}, fetch_list=fetch_list, program=main_prog) - with pdpd.static.program_guard(main_prog, start_prog): + with paddle.static.program_guard(main_prog, start_prog): saveModel(name, exe, feedkeys=['x'], fetchlist=fetch_list, inputs=[input_x], outputs=[outs[0]], target_dir=sys.argv[1]) return outs -def pdpd_interpolate(x, sizes=None, scale_factor=None, mode='nearest', align_corners=True, +def paddle_interpolate(x, sizes=None, scale_factor=None, mode='nearest', align_corners=True, align_mode=0, data_format='NCHW', name=None): - pdpd.enable_static() - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + paddle.enable_static() + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') interp = interpolate(node_x, size=sizes, scale_factor=scale_factor, mode=mode, align_corners=align_corners, align_mode=align_mode, data_format=data_format, name=name) - out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) outs = run_and_save_model(x, name, node_x, out, main_program, startup_program) return outs[0] @@ -50,7 +50,7 @@ def resize_upsample_bilinear(): {'name': 'bilinear_upsample_true_0', 'align_corners': True, 'align_mode': 0}] for test in test_case: - pdpd_result = pdpd_interpolate(data, [64, 64], None, mode='bilinear', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, [64, 64], None, mode='bilinear', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) @@ -67,7 +67,7 @@ def resize_downsample_bilinear(): {'name': 'bilinear_downsample_true_0', 'align_corners': True, 'align_mode': 0}] for test in test_case: - pdpd_result = pdpd_interpolate(data_28, [2, 4], None, mode='bilinear', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data_28, [2, 4], None, mode='bilinear', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) def resize_upsample_nearest(): @@ -84,7 +84,7 @@ def resize_upsample_nearest(): ] for test in test_case: - pdpd_result = pdpd_interpolate(data, test['size'], None, mode='nearest', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, test['size'], None, mode='nearest', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) @@ -97,7 +97,7 @@ def resize_downsample_nearest(): ] for test in test_case: - pdpd_result = pdpd_interpolate(data_64, test['size'], None, mode='nearest', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data_64, test['size'], None, mode='nearest', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) @@ -109,20 +109,20 @@ def nearest_upsample_tensor_size(): [13, 14, 15, 16] ]]], dtype=np.float32) sizes = np.array([8, 8], dtype=np.int32) - pdpd.enable_static() + paddle.enable_static() test_case = [{'name': 'nearest_upsample_tensor_size', 'align_corners': False, 'align_mode': 0}] for test in test_case: - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') - node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32') interp = interpolate(node_x, size=node_sizes, scale_factor=None, mode='nearest', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) - out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(startup_program) outs = exe.run( feed={'x': data, 'sizes': sizes}, @@ -143,17 +143,17 @@ def bilinear_upsample_tensor_size(): test_case = [{'name': 'bilinear_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}] for test in test_case: - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') - node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32') interp = interpolate(node_x, size=node_sizes, scale_factor=None, mode='bilinear', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) - out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(startup_program) outs = exe.run( feed={'x': data, 'sizes': sizes}, @@ -174,7 +174,7 @@ def bilinear_upsample_scales(): {'name': 'bilinear_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [2, 2]}] for test in test_case: - pdpd_result = pdpd_interpolate(data, None, 2, mode='bilinear', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, None, 2, mode='bilinear', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) # trilinear @@ -196,7 +196,7 @@ def resize_upsample_trilinear(): {'name': 'trilinear_upsample_true_0', 'align_corners': True, 'align_mode': 0}] for test in test_case: - pdpd_result = pdpd_interpolate(data, [4, 64, 64], None, mode='TRILINEAR', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, [4, 64, 64], None, mode='TRILINEAR', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCDHW', name=test['name']) @@ -218,7 +218,7 @@ def resize_downsample_trilinear(): {'name': 'trilinear_downsample_true_0', 'align_corners': True, 'align_mode': 0}] for test in test_case: - pdpd_result = pdpd_interpolate(data_28, [2, 2, 4], None, mode='TRILINEAR', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data_28, [2, 2, 4], None, mode='TRILINEAR', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCDHW', name=test['name']) def trilinear_upsample_tensor_size(): @@ -233,17 +233,17 @@ def trilinear_upsample_tensor_size(): test_case = [{'name': 'trilinear_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}] for test in test_case: - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') - node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32') interp = interpolate(node_x, size=node_sizes, scale_factor=None, mode='TRILINEAR', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCDHW', name=test['name']) - out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(startup_program) outs = exe.run( feed={'x': data, 'sizes': sizes}, @@ -263,7 +263,7 @@ def trilinear_upsample_scales(): {'name': 'trilinear_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [1, 2, 2]}] for test in test_case: - pdpd_result = pdpd_interpolate(data, None, 3, mode='TRILINEAR', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, None, 3, mode='TRILINEAR', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCDHW', name=test['name']) @@ -280,7 +280,7 @@ def resize_upsample_bicubic(): {'name': 'bicubic_upsample_true_0', 'align_corners': True, 'align_mode': 0}] for test in test_case: - pdpd_result = pdpd_interpolate(data, [6, 6], None, mode='bicubic', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, [6, 6], None, mode='bicubic', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) @@ -297,7 +297,7 @@ def resize_downsample_bicubic(): {'name': 'bicubic_downsample_true_0', 'align_corners': True, 'align_mode': 0}] for test in test_case: - pdpd_result = pdpd_interpolate(data_28, [2, 4], None, mode='bicubic', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data_28, [2, 4], None, mode='bicubic', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) def bicubic_upsample_tensor_size(): @@ -312,17 +312,17 @@ def bicubic_upsample_tensor_size(): test_case = [{'name': 'bicubic_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}] for test in test_case: - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') - node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32') interp = interpolate(node_x, size=node_sizes, scale_factor=None, mode='bicubic', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) - out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(startup_program) outs = exe.run( feed={'x': data, 'sizes': sizes}, @@ -342,7 +342,7 @@ def bicubic_upsample_scales(): {'name': 'bicubic_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [2, 2]}] for test in test_case: - pdpd_result = pdpd_interpolate(data, None, 2, mode='bicubic', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, None, 2, mode='bicubic', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCHW', name=test['name']) # linear @@ -356,7 +356,7 @@ def resize_upsample_linear(): {'name': 'linear_upsample_true_0', 'align_corners': True, 'align_mode': 0}] for test in test_case: - pdpd_result = pdpd_interpolate(data, [6,], None, mode='linear', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, [6,], None, mode='linear', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCW', name=test['name']) @@ -371,7 +371,7 @@ def resize_downsample_linear(): {'name': 'linear_downsample_true_0', 'align_corners': True, 'align_mode': 0}] for test in test_case: - pdpd_result = pdpd_interpolate(data_28, [4,], None, mode='linear', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data_28, [4,], None, mode='linear', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCW', name=test['name']) def linear_upsample_tensor_size(): @@ -383,17 +383,17 @@ def linear_upsample_tensor_size(): test_case = [{'name': 'linear_upsample_tensor_size', 'align_corners': False, 'align_mode': 1}] for test in test_case: - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - with pdpd.static.program_guard(main_program, startup_program): - node_x = pdpd.static.data(name='x', shape=data.shape, dtype='float32') - node_sizes = pdpd.static.data(name='sizes', shape=sizes.shape, dtype='int32') + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + node_x = paddle.static.data(name='x', shape=data.shape, dtype='float32') + node_sizes = paddle.static.data(name='sizes', shape=sizes.shape, dtype='int32') interp = interpolate(node_x, size=node_sizes, scale_factor=None, mode='linear', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCW', name=test['name']) - out = pdpd.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.static.nn.batch_norm(interp, use_global_stats=True, epsilon=0) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(startup_program) outs = exe.run( feed={'x': data, 'sizes': sizes}, @@ -410,7 +410,7 @@ def linear_upsample_scales(): {'name': 'linear_upsample_scales2', 'align_corners': False, 'align_mode': 1, "scales": [2, 2]}] for test in test_case: - pdpd_result = pdpd_interpolate(data, None, 2, mode='linear', align_corners=test['align_corners'], + paddle_result = paddle_interpolate(data, None, 2, mode='linear', align_corners=test['align_corners'], align_mode=test['align_mode'], data_format='NCW', name=test['name']) if __name__ == "__main__": diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_layer_norm.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_layer_norm.py similarity index 66% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_layer_norm.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_layer_norm.py index 5cd60061aad..d9e8a28a334 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_layer_norm.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_layer_norm.py @@ -4,23 +4,23 @@ import numpy as np from paddle.fluid import param_attr from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def layer_norm(name:str, x, begin_norm_axis, scale=True, shift=True, param_attr=None, bias_attr=None): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.static.nn.layer_norm(input=data, scale=scale, shift=shift,\ + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.static.nn.layer_norm(input=data, scale=scale, shift=shift,\ begin_norm_axis=begin_norm_axis, param_attr=param_attr, bias_attr=bias_attr) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -33,8 +33,8 @@ def layer_norm(name:str, x, begin_norm_axis, scale=True, shift=True, param_attr= def main(): x = np.random.rand(8, 24, 32).astype(data_type) random_data = np.random.rand(24 * 32).astype(data_type) - attr = pdpd.ParamAttr( - initializer=pdpd.fluid.initializer.NumpyArrayInitializer(random_data)) + attr = paddle.ParamAttr( + initializer=paddle.fluid.initializer.NumpyArrayInitializer(random_data)) layer_norm("layer_norm", x, begin_norm_axis=1, param_attr=attr, bias_attr=attr) layer_norm("layer_norm_noscale", x, scale=False, begin_norm_axis=2) layer_norm("layer_norm_noshift", x, shift=False, begin_norm_axis=1) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_leaky_relu.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_leaky_relu.py similarity index 60% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_leaky_relu.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_leaky_relu.py index bc2a54741d7..c9da226a38b 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_leaky_relu.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_leaky_relu.py @@ -3,21 +3,21 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys def leaky_relu(name: str, x, alpha: float = 0.02, data_type='float32'): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.fluid.layers.leaky_relu(node_x, alpha=alpha, name='leaky_relu') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.fluid.layers.leaky_relu(node_x, alpha=alpha, name='leaky_relu') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_log.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_log.py similarity index 60% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_log.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_log.py index 6e269e03ed5..a5909e1739c 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_log.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_log.py @@ -3,21 +3,21 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys def log(name: str, x, data_type='float32'): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.log(node_x, name='log') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.fluid.layers.log(node_x, name='log') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_logical_not.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_not.py similarity index 60% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_logical_not.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_not.py index 6e638738134..74dd66b6aa3 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_logical_not.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_logical_not.py @@ -7,20 +7,20 @@ import sys def equal_logical_not(name : str, x, y): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - node_y = pdpd.static.data(name='y', shape=y.shape, dtype='float32') + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32') - out = pdpd.equal(node_x, node_y) - out = pdpd.logical_not(out) - out = pdpd.cast(out, np.float32) + out = paddle.equal(node_x, node_y) + out = paddle.logical_not(out) + out = paddle.cast(out, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, @@ -33,7 +33,7 @@ def equal_logical_not(name : str, x, y): def main(): - import paddle as pdpd + import paddle data_x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32) data_y = np.array([[[[2, 0, 3]], [[3, 1, 4]]]]).astype(np.float32) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_lower_version.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_lower_version.py similarity index 100% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_lower_version.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_lower_version.py diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_matmul_v2.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_matmul_v2.py similarity index 74% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_matmul_v2.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_matmul_v2.py index 08a5eab50dd..dee36612a8b 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_matmul_v2.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_matmul_v2.py @@ -3,19 +3,19 @@ from save_model import saveModel import sys def matmul(name, x1, x2, x_transpose=False, y_transpose=False): - import paddle as pdpd + import paddle - pdpd.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) - node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) - result = pdpd.matmul(node_x1, node_x2, x_transpose, y_transpose) - #result = pdpd.static.nn.batch_norm(mul_node, use_global_stats=True) + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x1 = paddle.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) + node_x2 = paddle.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) + result = paddle.matmul(node_x1, node_x2, x_transpose, y_transpose) + #result = paddle.static.nn.batch_norm(mul_node, use_global_stats=True) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x1': x1, 'x2': x2}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_mul.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_mul.py similarity index 51% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_mul.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_mul.py index 02ce1d6d9ee..fa9efa7f251 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_mul.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_mul.py @@ -2,20 +2,20 @@ import numpy as np from save_model import saveModel import sys -def pdpd_matmul(name, x1, x2, x_transpose=False, y_transpose=False): - import paddle as pdpd +def paddle_matmul(name, x1, x2, x_transpose=False, y_transpose=False): + import paddle - pdpd.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x1 = pdpd.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) - node_x2 = pdpd.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) - mul_node = pdpd.fluid.layers.matmul(node_x1, node_x2, x_transpose, y_transpose) - result = pdpd.static.nn.batch_norm(mul_node, use_global_stats=True) + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x1 = paddle.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) + node_x2 = paddle.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) + mul_node = paddle.fluid.layers.matmul(node_x1, node_x2, x_transpose, y_transpose) + result = paddle.static.nn.batch_norm(mul_node, use_global_stats=True) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x1': x1, 'x2': x2}, @@ -44,6 +44,6 @@ if __name__ == "__main__": input_2x3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - pdpd_matmul("matmul_xt", input_2x5, input_2x3, x_transpose=True, y_transpose=False) - pdpd_matmul("matmul_yt", input_2x3, input_5x3, x_transpose=False, y_transpose=True) - pdpd_matmul("matmul_xt_yt", input_2x5, input_5x2, x_transpose=True, y_transpose=True) + paddle_matmul("matmul_xt", input_2x5, input_2x3, x_transpose=True, y_transpose=False) + paddle_matmul("matmul_yt", input_2x3, input_5x3, x_transpose=False, y_transpose=True) + paddle_matmul("matmul_xt_yt", input_2x5, input_5x2, x_transpose=True, y_transpose=True) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_multi_tensor_split.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_multi_tensor_split.py similarity index 90% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_multi_tensor_split.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_multi_tensor_split.py index 231dedde46e..f8ce53f6390 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_multi_tensor_split.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_multi_tensor_split.py @@ -12,7 +12,7 @@ import os def create_multi_output_model(): paddle.enable_static() - # PDPD model creation and inference + # paddle model creation and inference num_splits = 20 inp_blob_1 = np.random.randn(2, num_splits, 4, 4).astype(np.float32) @@ -27,7 +27,7 @@ def create_multi_output_model(): exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) inp_dict = {'x': inp_blob_1} - res_pdpd = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) + res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) fluid.io.save_inference_model(os.path.join(sys.argv[1], "multi_tensor_split"), list(inp_dict.keys()), var, exe, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pad3d.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pad3d.py similarity index 78% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pad3d.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pad3d.py index 5a711f6f83d..e13b797e89c 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pad3d.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pad3d.py @@ -6,24 +6,24 @@ from save_model import saveModel import sys def pad3d(name : str, x, in_dtype, pad, data_format, mode, value = 0): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name = 'x', shape = x.shape, dtype = in_dtype) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) if mode == 'constant': - pad_constant = pdpd.nn.Pad3D(padding=pad, mode=mode, value=value, data_format=data_format) + pad_constant = paddle.nn.Pad3D(padding=pad, mode=mode, value=value, data_format=data_format) out = pad_constant(node_x) else: - pad_other_mode = pdpd.nn.Pad3D(padding=pad, mode=mode, data_format=data_format) + pad_other_mode = paddle.nn.Pad3D(padding=pad, mode=mode, data_format=data_format) out = pad_other_mode(node_x) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, fetch_list=[out]) diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_place_test_model.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_place_test_model.py new file mode 100644 index 00000000000..072d21c2e2e --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_place_test_model.py @@ -0,0 +1,53 @@ +import numpy as np +from save_model import saveModel +import sys + + +def paddle_rnn_lstm(input_size, hidden_size, layers, direction): + import paddle + paddle.enable_static() + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + + num_of_directions = 1 if direction == 'forward' else 2 + with paddle.static.program_guard(main_program, startup_program): + + rnn = paddle.nn.LSTM(input_size, hidden_size, layers, direction, name="lstm") + + data = paddle.static.data(name='x', shape=[4, 3, input_size], dtype='float32') + prev_h = paddle.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_1") + prev_c = paddle.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_2") + + y, (h, c) = rnn(data, (prev_h, prev_c)) + relu_1 = paddle.nn.functional.relu(c, name="relu_1") + relu_2 = paddle.nn.functional.relu(c, name="relu_2") + relu_3 = paddle.nn.functional.relu(c, name="relu_3") + + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + exe.run(startup_program) + + outs = exe.run( + feed={'x': np.ones([4, 3, input_size]).astype(np.float32)}, + fetch_list=[y, h, c], + program=main_program) + saveModel("place_test_model", exe, feedkeys=['x'], + fetchlist=[y, h, c, relu_1, relu_2, relu_3], + inputs=[np.ones([4, 3, input_size]).astype(np.float32)], + outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1]) + return outs[0] + + +if __name__ == "__main__": + + testCases = [ + { + 'input_size': 2, + 'hidden_size': 2, + 'layers': 1, + 'direction': 'forward', + }, + ] + + for test in testCases: + paddle_rnn_lstm(test['input_size'], test['hidden_size'], test['layers'], test['direction']) \ No newline at end of file diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pool2d.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pool2d.py similarity index 79% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pool2d.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pool2d.py index 1f6c32e242b..e1709b1a979 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pool2d.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pool2d.py @@ -8,12 +8,12 @@ from save_model import saveModel data_type = 'float32' def pool2d(name : str, x, attrs : dict): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.pool2d(node_x, + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.fluid.layers.pool2d(node_x, pool_size=attrs['pool_size'], pool_type=attrs['pool_type'], pool_stride=attrs['pool_stride'], @@ -23,10 +23,10 @@ def pool2d(name : str, x, attrs : dict): exclusive=attrs['exclusive'], data_format=attrs['data_format']) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -37,21 +37,21 @@ def pool2d(name : str, x, attrs : dict): return outs[0] def adaptive_pool2d(name : str, x, attrs : dict): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.adaptive_pool2d( + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.fluid.layers.adaptive_pool2d( input=node_x, pool_size=attrs['pool_size'], pool_type=attrs['pool_type'], require_index=attrs['require_index']) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -74,7 +74,7 @@ def main(): for i, pooling_type in enumerate(pooling_types): # example 1: # ceil_mode = False - pdpd_attrs = { + paddle_attrs = { # input=data_NCHW, # shape: [2, 3, 8, 8] 'pool_size' : [3,3], 'pool_type' : pooling_type, @@ -86,12 +86,12 @@ def main(): 'data_format' : "NCHW" } # shape of out_1: [2, 3, 4, 3] - pool2d(pooling_type+'Pool_test1', data_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test1', data_NCHW, paddle_attrs) # Cecilia: there is a bug of PaddlePaddle in this case. # example 2: # ceil_mode = True (different from example 1) - pdpd_attrs = { + paddle_attrs = { #input=data_NCHW, 'pool_size':[3,3], 'pool_type' : pooling_type, @@ -103,11 +103,11 @@ def main(): 'data_format':"NCHW" } # shape of out_2: [2, 3, 4, 4] which is different from out_1 - pool2d(pooling_type+'Pool_test2', data_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test2', data_NCHW, paddle_attrs) # example 3: # pool_padding = "SAME" (different from example 1) - pdpd_attrs = { + paddle_attrs = { #input=data_NCHW, 'pool_size':[3,3], 'pool_type' : pooling_type, @@ -119,11 +119,11 @@ def main(): 'data_format':"NCHW" } # shape of out_3: [2, 3, 3, 3] which is different from out_1 - pool2d(pooling_type+'Pool_test3', data_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test3', data_NCHW, paddle_attrs) # example 4: # pool_padding = "VALID" (different from example 1) - pdpd_attrs = { + paddle_attrs = { #input=data_NCHW, 'pool_size':[3,3], 'pool_type' : pooling_type, @@ -135,12 +135,12 @@ def main(): 'data_format':"NCHW" } # shape of out_4: [2, 3, 2, 2] which is different from out_1 - pool2d(pooling_type+'Pool_test4', data_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test4', data_NCHW, paddle_attrs) # example 5: # global_pooling = True (different from example 1) # It will be set pool_size = [8,8] and pool_padding = [0,0] actually. - pdpd_attrs = { + paddle_attrs = { #input=data_NCHW, 'pool_size':[3,3], 'pool_type' : pooling_type, @@ -152,11 +152,11 @@ def main(): 'data_format':"NCHW" } # shape of out_5: [2, 3, 1, 1] which is different from out_1 - pool2d(pooling_type+'Pool_test5', data_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test5', data_NCHW, paddle_attrs) # example 6: # data_format = "NHWC" (different from example 1) - pdpd_attrs = { + paddle_attrs = { #input=data_NHWC, # shape: [2, 8, 8, 3] 'pool_size':[3,3], 'pool_type' : pooling_type, @@ -168,11 +168,11 @@ def main(): 'data_format':"NHWC" } # shape of out_6: [2, 4, 3, 3] which is different from out_1 - pool2d(pooling_type+'Pool_test6', data_NHWC, pdpd_attrs) + pool2d(pooling_type+'Pool_test6', data_NHWC, paddle_attrs) # example 7: # pool_size is [9, 9] - pdpd_attrs = { + paddle_attrs = { #input=data_NCHW, 'pool_size':[9,9], 'pool_type' : pooling_type, @@ -183,11 +183,11 @@ def main(): 'exclusive':True, 'data_format':"NCHW" } - pool2d(pooling_type+'Pool_test7', data_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test7', data_NCHW, paddle_attrs) # example 8: # pool_padding size is 1 - pdpd_attrs = { + paddle_attrs = { 'pool_size':[3,3], 'pool_type' : pooling_type, 'pool_stride' : [3,3], @@ -197,7 +197,7 @@ def main(): 'exclusive':True, 'data_format':"NCHW" } - pool2d(pooling_type+'Pool_test8', data_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test8', data_NCHW, paddle_attrs) #input data for test9 and test10 N_data1, C_data1, H_data1, W_data1 = 2, 3, 8, 8 @@ -205,7 +205,7 @@ def main(): data1_NCHW = data1.reshape(N_data1, C_data1, H_data1, W_data1) # example 9: # pool_padding size is 4: [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] - pdpd_attrs = { + paddle_attrs = { 'pool_size':[3,3], 'pool_type' : pooling_type, 'pool_stride' : [3,3], @@ -215,11 +215,11 @@ def main(): 'exclusive':True, 'data_format':"NCHW" } - pool2d(pooling_type+'Pool_test9', data1_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test9', data1_NCHW, paddle_attrs) # example 10: # input=data_NCHW and pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]] - pdpd_attrs = { + paddle_attrs = { 'pool_size':[3,3], 'pool_type' : pooling_type, 'pool_stride' : [3,3], @@ -229,11 +229,11 @@ def main(): 'exclusive':True, 'data_format':"NCHW" } - pool2d(pooling_type+'Pool_test10', data1_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test10', data1_NCHW, paddle_attrs) # example 11: # input=data_NCHW and poolsize is the multiply by width & height. pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]] - pdpd_attrs = { + paddle_attrs = { 'pool_size': 9, 'pool_type' : pooling_type, 'pool_stride' : [3,3], @@ -243,17 +243,17 @@ def main(): 'exclusive':True, 'data_format':"NCHW" } - pool2d(pooling_type+'Pool_test11', data1_NCHW, pdpd_attrs) + pool2d(pooling_type+'Pool_test11', data1_NCHW, paddle_attrs) # adaptive_pool2d for i, pooling_type in enumerate(pooling_types): - pdpd_attrs = { + paddle_attrs = { 'pool_size': [3,3], 'pool_type': pooling_type, 'require_index': False } - adaptive_pool2d(pooling_type+'AdaptivePool2D_test1', data_NCHW, pdpd_attrs) + adaptive_pool2d(pooling_type+'AdaptivePool2D_test1', data_NCHW, paddle_attrs) if __name__ == "__main__": diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pow.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pow.py similarity index 58% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pow.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pow.py index 97e1df3ae72..c71d6f6e89c 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_pow.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_pow.py @@ -3,24 +3,24 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys -def pdpd_pow(name : str, x, y, data_type): - pdpd.enable_static() +def paddle_pow(name : str, x, y, data_type): + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.pow(node_x, y, name='pow') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.fluid.layers.pow(node_x, y, name='pow') #FuzzyTest supports int32 & float32 if data_type == "int64": - out = pdpd.cast(out, "float32") - out = pdpd.cast(out, "float32") - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.cast(out, "float32") + out = paddle.cast(out, "float32") + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -31,19 +31,19 @@ def pdpd_pow(name : str, x, y, data_type): return outs[0] -def pdpd_pow_tensor(name : str, x, y, data_type): - pdpd.enable_static() +def paddle_pow_tensor(name : str, x, y, data_type): + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - node_y = pdpd.static.data(name='y', shape=y.shape, dtype=data_type) - out = pdpd.fluid.layers.pow(node_x, node_y, name='pow') - out = pdpd.cast(out, "float32") + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=data_type) + out = paddle.fluid.layers.pow(node_x, node_y, name='pow') + out = paddle.cast(out, "float32") - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'y': y}, @@ -84,11 +84,11 @@ def main(): ] for test in test_cases: - pdpd_pow("pow_" + test['name'], test['x'], test['y'], test['dtype']) + paddle_pow("pow_" + test['name'], test['x'], test['y'], test['dtype']) x = np.array([0, 1, 2, -10]).astype("float32") y = np.array([2.0]).astype("float32") - pdpd_pow_tensor("pow_y_tensor", x, y, 'float32') + paddle_pow_tensor("pow_y_tensor", x, y, 'float32') if __name__ == "__main__": diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_prior_box.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_prior_box.py similarity index 89% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_prior_box.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_prior_box.py index 12e9ddd80fe..41541fa17dc 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_prior_box.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_prior_box.py @@ -7,16 +7,16 @@ import sys def prior_box(name: str, input_data, image_data, attrs: dict): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - Input = pdpd.static.data( + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + Input = paddle.static.data( name='Input', shape=input_data.shape, dtype=input_data.dtype) - Image = pdpd.static.data( + Image = paddle.static.data( name='Image', shape=image_data.shape, dtype=image_data.dtype) - box, var = pdpd.fluid.layers.prior_box(Input, + box, var = paddle.fluid.layers.prior_box(Input, Image, min_sizes=attrs['min_sizes'], max_sizes=attrs['max_sizes'], @@ -29,10 +29,10 @@ def prior_box(name: str, input_data, image_data, attrs: dict): name=None, min_max_aspect_ratios_order=attrs['min_max_aspect_ratios_order']) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'Input': input_data, 'Image': image_data}, @@ -121,4 +121,4 @@ if __name__ == "__main__": image_h)).astype('float32') for item in prior_box_attrs_list: - pred_pdpd = prior_box(item['name'], input_data, image_data, item) + pred_paddle = prior_box(item['name'], input_data, image_data, item) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_range.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_range.py similarity index 52% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_range.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_range.py index c2d7a0b5715..e6f0bfc37a5 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_range.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_range.py @@ -6,21 +6,21 @@ from save_model import saveModel import sys -def pdpd_range(name : str, x, start, end, step, out_type): - import paddle as pdpd - pdpd.enable_static() +def paddle_range(name : str, x, start, end, step, out_type): + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') # Range op only support fill_constant input, since dynamic op is not supported in ov - out = pdpd.fluid.layers.range(start, end, step, out_type) - out = pdpd.cast(out, np.float32) - out = pdpd.add(node_x, out) - #out = pdpd.cast(out, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.fluid.layers.range(start, end, step, out_type) + out = paddle.cast(out, np.float32) + out = paddle.add(node_x, out) + #out = paddle.cast(out, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -38,7 +38,7 @@ def main(): data = np.random.random([1, 5]).astype("float32") out_type = ["float32", "int32", "int64"] for i, dtype in enumerate(out_type): - pdpd_range("range"+str(i), data, start, end, step, dtype) + paddle_range("range"+str(i), data, start, end, step, dtype) if __name__ == "__main__": diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_relu.py similarity index 64% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_relu.py index 6952bd27cd8..4fe1045f44a 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_relu.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_relu.py @@ -7,16 +7,16 @@ import sys def relu(name: str, x): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.nn.functional.relu(node_x) + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.nn.functional.relu(node_x) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_relu6.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_relu6.py similarity index 61% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_relu6.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_relu6.py index af96e5e690e..7fc646b74de 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_relu6.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_relu6.py @@ -3,21 +3,21 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys def relu6(name: str, x, threshold: float = 6.0, data_type='float32'): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.relu6(node_x, threshold=threshold, name='relu6') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.fluid.layers.relu6(node_x, threshold=threshold, name='relu6') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_reshape.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reshape.py similarity index 55% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_reshape.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reshape.py index 3c2a7dd8f67..6b6ad2b9c3a 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_reshape.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_reshape.py @@ -9,17 +9,17 @@ data_type = 'float32' def reshape(name : str, x, out_shape): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.reshape(x=node_x, shape=out_shape) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -32,24 +32,24 @@ def reshape(name : str, x, out_shape): def reshape_tensor(name : str, x, out_shape, use_tensor_in_list): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) if use_tensor_in_list: - out_shape[0] = pdpd.assign(np.array((out_shape[0],)).astype('int32')) - out = pdpd.fluid.layers.reshape(x=node_x, shape=out_shape) + out_shape[0] = paddle.assign(np.array((out_shape[0],)).astype('int32')) + out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape) else: out_shape = np.array(out_shape).astype('int32') - node_shape = pdpd.assign(out_shape) - out = pdpd.fluid.layers.reshape(x=node_x, shape=node_shape) + node_shape = paddle.assign(out_shape) + out = paddle.fluid.layers.reshape(x=node_x, shape=node_shape) - out = pdpd.pow(out, 1) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.pow(out, 1) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_rnn_lstm.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_rnn_lstm.py similarity index 80% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_rnn_lstm.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_rnn_lstm.py index c5e62b75bc2..6bb894b460d 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_rnn_lstm.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_rnn_lstm.py @@ -3,32 +3,32 @@ from save_model import saveModel import sys -def pdpd_rnn_lstm(input_size, hidden_size, layers, direction, seq_len): - import paddle as pdpd - pdpd.enable_static() - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() +def paddle_rnn_lstm(input_size, hidden_size, layers, direction, seq_len): + import paddle + paddle.enable_static() + main_program = paddle.static.Program() + startup_program = paddle.static.Program() num_of_directions = 1 if direction == 'forward' else 2 - with pdpd.static.program_guard(main_program, startup_program): + with paddle.static.program_guard(main_program, startup_program): - rnn = pdpd.nn.LSTM(input_size, hidden_size, layers, direction) + rnn = paddle.nn.LSTM(input_size, hidden_size, layers, direction) - data = pdpd.static.data( + data = paddle.static.data( name='x', shape=[4, 3, input_size], dtype='float32') - prev_h = pdpd.ones( + prev_h = paddle.ones( shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32) - prev_c = pdpd.ones( + prev_c = paddle.ones( shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32) if seq_len: - seq_lengths = pdpd.static.data(name='sl', shape=[4], dtype='int32') + seq_lengths = paddle.static.data(name='sl', shape=[4], dtype='int32') y, (h, c) = rnn(data, (prev_h, prev_c), seq_lengths) else: y, (h, c) = rnn(data, (prev_h, prev_c)) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) exe.run(startup_program) if seq_len: @@ -99,5 +99,5 @@ if __name__ == "__main__": ] for test in testCases: - pdpd_rnn_lstm(test['input_size'], test['hidden_size'], + paddle_rnn_lstm(test['input_size'], test['hidden_size'], test['layers'], test['direction'], test['seq_len']) diff --git a/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_scale.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_scale.py new file mode 100644 index 00000000000..7f8303e7d80 --- /dev/null +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_scale.py @@ -0,0 +1,93 @@ +# +# pool2d paddle model generator +# +import numpy as np +import sys +from save_model import saveModel + + +def paddle_scale(name : str, x, scale, bias, attrs : dict, data_type): + import paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.scale(x=node_x, scale=scale, bias=bias, + bias_after_scale=attrs['bias_after_scale']) + #FuzzyTest only support FP32 now, so cast result to fp32 + out = paddle.cast(out, "float32") + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def paddle_scale_tensor(name : str, x, scale, bias, attrs : dict, data_type): + import paddle as paddle + paddle.enable_static() + + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + node_scale = paddle.static.data(name='scale', shape=[1], dtype='float32') + out = paddle.scale(x=node_x, scale=node_scale, bias=bias, + bias_after_scale=attrs['bias_after_scale']) + #FuzzyTest only support FP32 now, so cast result to fp32 + out = paddle.cast(out, "float32") + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={'x': x, 'scale': scale}, + fetch_list=[out]) + + saveModel(name, exe, feedkeys=['x', 'scale'], fetchlist=[out], inputs=[x, np.array([scale]).astype('float32')], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + +def main(): + scale = 2.0 + bias = 1.0 + data = np.random.random([2, 3]).astype("float32") + + test_cases = [ + "float32", + "int32", + "int64" + ] + + paddle_attrs = { + 'bias_after_scale': True, + } + paddle_scale_tensor("scale_tensor_bias_after", data, scale, bias, paddle_attrs, 'float32') + + paddle_attrs = { + 'bias_after_scale': False, + } + paddle_scale_tensor("scale_tensor_bias_before", data, scale, bias, paddle_attrs, 'float32') + + for test in test_cases: + data = np.random.random([2, 3]).astype(test) + paddle_attrs = { + 'bias_after_scale': True, + } + paddle_scale("scale_bias_after_" + test, data, scale, bias, paddle_attrs, test) + + paddle_attrs = { + 'bias_after_scale': False, + } + paddle_scale("scale_bias_before_" + test, data, scale, bias, paddle_attrs, test) + + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_shape.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_shape.py similarity index 50% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_shape.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_shape.py index 35241487bba..e7413760790 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_shape.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_shape.py @@ -6,18 +6,18 @@ from save_model import saveModel import sys -def pdpd_shape(name : str, x): - import paddle as pdpd - pdpd.enable_static() +def paddle_shape(name : str, x): + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.shape(node_x) - out = pdpd.cast(out, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.shape(node_x) + out = paddle.cast(out, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -31,7 +31,7 @@ def pdpd_shape(name : str, x): def main(): data = np.random.random(size=(2, 3)).astype('float32') - pdpd_shape("shape", data) + paddle_shape("shape", data) if __name__ == "__main__": diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_sigmoid.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_sigmoid.py similarity index 60% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_sigmoid.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_sigmoid.py index 815a016907d..e94d8ed40ef 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_sigmoid.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_sigmoid.py @@ -3,21 +3,21 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys def sigmoid(name: str, x, data_type): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.sigmoid(node_x, name='sigmoid') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) + out = paddle.fluid.layers.sigmoid(node_x, name='sigmoid') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_slice.py similarity index 68% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_slice.py index 75328e48fca..1aec3fc7228 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_slice.py @@ -5,7 +5,7 @@ import sys import os import numpy as np -import paddle as pdpd +import paddle from save_model import exportModel from save_model import saveModel @@ -13,16 +13,16 @@ from save_model import saveModel data_type = 'float32' def slice(name : str, x, axes : list, start : list, end : list): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -34,14 +34,14 @@ def slice(name : str, x, axes : list, start : list, end : list): def slice_dyn(test_shape=[2,8,10,10]): - pdpd.disable_static() + paddle.disable_static() - data = pdpd.rand(shape=test_shape, dtype='float32') + data = paddle.rand(shape=test_shape, dtype='float32') ''' slice w/ decrease_axis ''' - @pdpd.jit.to_static + @paddle.jit.to_static def test_slice_decrease_axis(x): return x[0, 1:3, :, 5] exportModel('slice_decrease_axis', test_slice_decrease_axis, [data], target_dir=sys.argv[1]) # output shape (2, 10) @@ -49,15 +49,15 @@ def slice_dyn(test_shape=[2,8,10,10]): ''' slice w/o decrease_axis ''' - @pdpd.jit.to_static + @paddle.jit.to_static def test_slice(x): - return pdpd.slice(x, axes=[0,1,3], starts=[0,1,5], ends=[1,3,6]) + return paddle.slice(x, axes=[0,1,3], starts=[0,1,5], ends=[1,3,6]) # exportModel('slice_dyn', test_slice, [data], target_dir=sys.argv[1]) # output shape (1, 2, 10, 1) # disable it by default as this kind of test model already there. It's for comparsion only. ''' slice w/ decrease_axis of all dims ''' - @pdpd.jit.to_static + @paddle.jit.to_static def test_slice_decrease_axis_all(x): return x[0, 0, 0, 0] exportModel('slice_decrease_axis_all', test_slice_decrease_axis_all, [data], target_dir=sys.argv[1]) # output shape (1,) @@ -65,23 +65,23 @@ def slice_dyn(test_shape=[2,8,10,10]): ''' slice w/o decrease_axis of all dims ''' - @pdpd.jit.to_static + @paddle.jit.to_static def test_slice_alldim(x): - return pdpd.slice(x, axes=[0,1,2,3], starts=[0,0,0,0], ends=[1,1,1,1]) + return paddle.slice(x, axes=[0,1,2,3], starts=[0,0,0,0], ends=[1,1,1,1]) # exportModel('slice_alldim', test_slice_alldim, [data], target_dir=sys.argv[1]) # output shape (1, 1, 1, 1) # disable it by default as this kind of test model already there. It's for comparsion only. ''' a test case simulating the last reshape2 of ocrnet which accepts slice (with decrease_axes in all dims) as its parents. ''' def slice_reshape(B=1, C=256, H=16, W=32): - pdpd.disable_static() + paddle.disable_static() - data = pdpd.rand(shape=[B, C, H*W], dtype='float32') + data = paddle.rand(shape=[B, C, H*W], dtype='float32') - @pdpd.jit.to_static + @paddle.jit.to_static def test_model(x): - x2 = pdpd.assign([-1, -1, 16, 32]).astype('int32') - node_reshape = pdpd.reshape(x, [0, 256, x2[2], x2[3]]) + x2 = paddle.assign([-1, -1, 16, 32]).astype('int32') + node_reshape = paddle.reshape(x, [0, 256, x2[2], x2[3]]) return node_reshape exportModel('slice_reshape', test_model, [data], target_dir=sys.argv[1]) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_softmax.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_softmax.py similarity index 71% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_softmax.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_softmax.py index f797a048606..0e6e25b3ffe 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_softmax.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_softmax.py @@ -7,16 +7,16 @@ from save_model import saveModel def softmax(name: str, x, axis): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.nn.functional.softmax(x=node_x, axis=axis) + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.nn.functional.softmax(x=node_x, axis=axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_softplus.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_softplus.py similarity index 70% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_softplus.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_softplus.py index f5d31f27fe3..48d0e5431c4 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_softplus.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_softplus.py @@ -7,16 +7,16 @@ from save_model import saveModel def softplus(name: str, x, beta, threshold): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.nn.functional.softplus(x=node_x, beta=beta, threshold=threshold) + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.nn.functional.softplus(x=node_x, beta=beta, threshold=threshold) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_split.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_split.py similarity index 60% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_split.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_split.py index 8ddb4790126..769c155a9dd 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_split.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_split.py @@ -7,17 +7,17 @@ import sys def split(name : str, x, attrs : dict): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) - out = pdpd.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=attrs['axis']) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + out = paddle.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=attrs['axis']) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -31,18 +31,18 @@ def split(name : str, x, attrs : dict): def split_dim_tensor(name : str, x, attrs : dict, dim): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) - dim_node = pdpd.assign(dim) - out = pdpd.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=dim_node) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + dim_node = paddle.assign(dim) + out = paddle.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=dim_node) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -56,19 +56,19 @@ def split_dim_tensor(name : str, x, attrs : dict, dim): def split_test_list_tensor(name : str, x, attrs : dict): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) section = attrs['num_or_sections'] - section[0] = pdpd.assign(np.array((section[0],)).astype('int32')) - out = pdpd.fluid.layers.split(node_x, num_or_sections=section, dim=attrs['axis']) + section[0] = paddle.assign(np.array((section[0],)).astype('int32')) + out = paddle.fluid.layers.split(node_x, num_or_sections=section, dim=attrs['axis']) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, @@ -91,12 +91,12 @@ def main(): for t in data_types: for s in num_or_sections: for i in axes: - pdpd_attrs = { + paddle_attrs = { 'num_or_sections': s, 'axis': i } data_NCHW = np.random.rand(3,9,5).astype(t) - split("split_test{}".format(idx), data_NCHW, pdpd_attrs) + split("split_test{}".format(idx), data_NCHW, paddle_attrs) idx+=1 split("split_test_list", data_NCHW, { diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_squeeze.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_squeeze.py similarity index 59% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_squeeze.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_squeeze.py index 04eae5cf0b1..4c2f2dd1520 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_squeeze.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_squeeze.py @@ -3,22 +3,22 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def squeeze(name : str, x, axes : list): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.fluid.layers.squeeze(node_x, axes=axes, name='squeeze') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.fluid.layers.squeeze(node_x, axes=axes, name='squeeze') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_stack.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_stack.py similarity index 82% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_stack.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_stack.py index 04a6917d4fb..021e2c3b218 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_stack.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_stack.py @@ -7,26 +7,26 @@ import sys def stack(axis, input1, input2, input3): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - data1 = pdpd.static.data( + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data1 = paddle.static.data( 'data1', shape=input1.shape, dtype=input1.dtype) - data2 = pdpd.static.data( + data2 = paddle.static.data( 'data2', shape=input2.shape, dtype=input2.dtype) - data3 = pdpd.static.data( + data3 = paddle.static.data( 'data3', shape=input3.shape, dtype=input3.dtype) if (axis == None): - out = pdpd.paddle.stack([data1, data2, data3]) + out = paddle.paddle.stack([data1, data2, data3]) else: - out = pdpd.paddle.stack([data1, data2, data3], axis) + out = paddle.paddle.stack([data1, data2, data3], axis) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={"data1": input1, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_tanh.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_tanh.py similarity index 59% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_tanh.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_tanh.py index 40847a7417d..c819c7a8503 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_tanh.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_tanh.py @@ -3,22 +3,22 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def tanh(name:str, x): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - data = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.tanh(data) + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + data = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.tanh(data) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_unsqueeze.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_unsqueeze.py similarity index 57% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_unsqueeze.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_unsqueeze.py index e2fee0e97f5..b2b11c5b05a 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_unsqueeze.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_unsqueeze.py @@ -3,22 +3,22 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys data_type = 'float32' def unsqueeze(name : str, x, axes : list): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type) - out = pdpd.fluid.layers.unsqueeze(node_x, axes = axes, name = 'unsqueeze') + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) + out = paddle.fluid.layers.unsqueeze(node_x, axes = axes, name = 'unsqueeze') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_unsupported_relu.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_unsupported_relu.py similarity index 84% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_unsupported_relu.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_unsupported_relu.py index ef70895ed38..6c738a67808 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_unsupported_relu.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_unsupported_relu.py @@ -7,7 +7,7 @@ import sys import os import numpy as np -import paddle as pdpd +import paddle # print numpy array like C structure @@ -60,22 +60,22 @@ def saveModel(name, exe, feedkeys: list, fetchlist: list, inputs: list, outputs: np.save(os.path.join(model_dir, "output{}".format(i)), output) # composited model + scattered model - pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe) - pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name + ".pdmodel", + paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe) + paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name + ".pdmodel", params_filename=name + ".pdiparams") def relu(name: str, x): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.nn.functional.relu(node_x) + node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') + out = paddle.nn.functional.relu(node_x) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x}, diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_yolo_box.py similarity index 70% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/generate_yolo_box.py index f737068faf5..64daf67c78c 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/generate_yolo_box.py @@ -6,13 +6,13 @@ from save_model import saveModel import sys def yolo_box(name : str, x, img_size, attrs : dict): - import paddle as pdpd - pdpd.enable_static() + import paddle + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype) - node_img_size = pdpd.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype) - boxes, scores = pdpd.vision.ops.yolo_box(node_x, + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) + node_img_size = paddle.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype) + boxes, scores = paddle.vision.ops.yolo_box(node_x, node_img_size, anchors=attrs['anchors'], class_num=attrs['class_num'], @@ -22,10 +22,10 @@ def yolo_box(name : str, x, img_size, attrs : dict): name=None, scale_x_y=attrs['scale_x_y']) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'x': x, 'img_size': img_size}, @@ -41,7 +41,7 @@ def yolo_box(name : str, x, img_size, attrs : dict): def TEST1(): # yolo_box - pdpd_attrs = { + paddle_attrs = { 'name': "yolo_box_default", 'anchors': [10, 13, 16, 30, 33, 23], 'class_num': 2, @@ -51,7 +51,7 @@ def TEST1(): 'scale_x_y': 1.0 } - pdpd_attrs_clip_box = { + paddle_attrs_clip_box = { 'name': "yolo_box_clip_box", 'anchors': [10, 13, 16, 30, 33, 23], 'class_num': 2, @@ -61,7 +61,7 @@ def TEST1(): 'scale_x_y': 1.0 } - pdpd_attrs_scale_xy = { + paddle_attrs_scale_xy = { 'name': "yolo_box_scale_xy", 'anchors': [10, 13, 16, 30, 33, 23], 'class_num': 2, @@ -71,23 +71,23 @@ def TEST1(): 'scale_x_y': 1.2 } - pdpd_attrs_list = [pdpd_attrs, pdpd_attrs_clip_box, pdpd_attrs_scale_xy] + paddle_attrs_list = [paddle_attrs, paddle_attrs_clip_box, paddle_attrs_scale_xy] N = 32 - num_anchors = int(len(pdpd_attrs['anchors'])//2) - x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), 13, 13) + num_anchors = int(len(paddle_attrs['anchors'])//2) + x_shape = (N, num_anchors * (5 + paddle_attrs['class_num']), 13, 13) imgsize_shape = (N, 2) data = np.random.random(x_shape).astype('float32') data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32') - for item in pdpd_attrs_list: - pred_pdpd = yolo_box(item['name'], data, data_ImSize, item) + for item in paddle_attrs_list: + pred_paddle = yolo_box(item['name'], data, data_ImSize, item) def TEST2(): # yolo_box uneven spatial width and height - pdpd_attrs = { + paddle_attrs = { 'name': "yolo_box_uneven_wh", 'anchors': [10, 13, 16, 30, 33, 23], 'class_num': 2, @@ -100,14 +100,14 @@ def TEST2(): N = 16 SPATIAL_WIDTH = 13 SPATIAL_HEIGHT = 9 - num_anchors = int(len(pdpd_attrs['anchors'])//2) - x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), SPATIAL_HEIGHT, SPATIAL_WIDTH) + num_anchors = int(len(paddle_attrs['anchors'])//2) + x_shape = (N, num_anchors * (5 + paddle_attrs['class_num']), SPATIAL_HEIGHT, SPATIAL_WIDTH) imgsize_shape = (N, 2) data = np.random.random(x_shape).astype('float32') data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32') - pred_pdpd = yolo_box(pdpd_attrs['name'], data, data_ImSize, pdpd_attrs) + pred_paddle = yolo_box(paddle_attrs['name'], data, data_ImSize, paddle_attrs) if __name__ == "__main__": TEST1() diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/save_model.py b/src/core/tests/frontend/paddle/test_models/gen_scripts/save_model.py similarity index 87% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/save_model.py rename to src/core/tests/frontend/paddle/test_models/gen_scripts/save_model.py index 3512fb2fd14..14429089e7a 100644 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/save_model.py +++ b/src/core/tests/frontend/paddle/test_models/gen_scripts/save_model.py @@ -1,7 +1,7 @@ import os import sys import numpy as np -import paddle as pdpd +import paddle #print numpy array like C structure @@ -52,8 +52,8 @@ def saveModel(name, exe, feedkeys:list, fetchlist:list, inputs:list, outputs:lis np.save(os.path.join(model_dir, "output{}".format(i)), output) # composited model + scattered model - pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe) - pdpd.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name+".pdmodel", params_filename=name+".pdiparams") + paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe) + paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name+".pdmodel", params_filename=name+".pdiparams") ''' @@ -70,17 +70,17 @@ def exportModel(name, dyn_func, input_data:list, target_dir:str): for idx, data in enumerate(input_data): input_name = 'input{}'.format(idx) input_specs.append( - pdpd.static.InputSpec(shape=data.shape, dtype=data.dtype, name=input_name) + paddle.static.InputSpec(shape=data.shape, dtype=data.dtype, name=input_name) ) # dump input np.save(os.path.join(model_dir, "input{}".format(idx)), data) - pdpd.jit.save(dyn_func, save_path, input_specs) + paddle.jit.save(dyn_func, save_path, input_specs) print('saved exported model to {}'.format(save_path)) # infer - model = pdpd.jit.load(save_path) + model = paddle.jit.load(save_path) result = model(*[input[:] for input in input_data]) diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_wrapper.py b/src/core/tests/frontend/paddle/test_models/gen_wrapper.py similarity index 100% rename from src/core/tests/frontend/paddlepaddle/test_models/gen_wrapper.py rename to src/core/tests/frontend/paddle/test_models/gen_wrapper.py diff --git a/src/core/tests/frontend/paddlepaddle/throw_in_conversion.cpp b/src/core/tests/frontend/paddle/throw_in_conversion.cpp similarity index 100% rename from src/core/tests/frontend/paddlepaddle/throw_in_conversion.cpp rename to src/core/tests/frontend/paddle/throw_in_conversion.cpp diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_argmax.py b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_argmax.py deleted file mode 100644 index 54b24364b2d..00000000000 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_argmax.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# pool2d paddle model generator -# -import numpy as np -from save_model import saveModel -import sys -data_type = 'float32' - - -def pdpd_argmax(name : str, x, axis): - import paddle as pdpd - pdpd.enable_static() - - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.argmax(x=node_x, axis=axis) - out = pdpd.cast(out, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - feed={'x': x}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - -def pdpd_argmax1(name : str, x): - import paddle as pdpd - pdpd.enable_static() - - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.argmax(x=node_x) - out = pdpd.cast(out, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - feed={'x': x}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - -def main(): - data = np.random.random([3,5,7,2]).astype("float32") - axis = 0 - pdpd_argmax("argmax", data, axis) - pdpd_argmax1("argmax1", data) - - -if __name__ == "__main__": - main() diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_dropout.py b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_dropout.py deleted file mode 100644 index 6f40afdb1f2..00000000000 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_dropout.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# pool2d paddle model generator -# -import numpy as np -from save_model import saveModel -import sys - - -def pdpd_dropout(name : str, x, p, pdpd_attrs): - import paddle as pdpd - pdpd.enable_static() - - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32') - out = pdpd.nn.functional.dropout(x=node_x, p=p, training=pdpd_attrs['training'], mode=pdpd_attrs['mode']) - - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - feed={'x': x}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], - outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def main(): - p=0.5 - data = np.random.random(size=(3, 10, 3, 7)).astype('float32') - pdpd_attrs = { - 'training' : False, - 'mode' : "downscale_in_infer" - } - pdpd_attrs2 = { - 'training' : False, - 'mode' : "upscale_in_train" - } - pdpd_dropout("dropout", data, p, pdpd_attrs) - pdpd_dropout("dropout_upscale_in_train", data, p, pdpd_attrs2) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant.py b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant.py deleted file mode 100644 index feb8ce40300..00000000000 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_fill_constant.py +++ /dev/null @@ -1,97 +0,0 @@ -# -# fill_const paddle model generator -# -import numpy as np -from save_model import saveModel -import paddle as pdpd -import sys - - -def fill_constant(name : str, shape : list, dtype, value): - pdpd.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - x1 = pdpd.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') - x2 = pdpd.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') - out = pdpd.add(pdpd.cast(x1, np.float32), pdpd.cast(x2, np.float32)) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - fetch_list=[out]) - - saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def fill_constant_tensor(name : str, shape : list, dtype, value): - pdpd.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_value = pdpd.static.data(name='value', shape=[1], dtype=dtype) - x1 = pdpd.fluid.layers.fill_constant(shape=shape, value=node_value, dtype=dtype, name='fill_constant1') - out = pdpd.cast(x1, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - feed={"value": value}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=["value"], fetchlist=[out], inputs=[np.array([value]).astype(dtype)], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def fill_constant_shape_tensor(name : str, shape, dtype, value): - pdpd.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_shape = pdpd.fluid.layers.fill_constant(shape=[2], value=shape, dtype='int32', name='shape') - x1 = pdpd.fluid.layers.fill_constant(shape=node_shape, value=value, dtype=dtype, name='fill_constant') - out = pdpd.cast(x1, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - fetch_list=[out]) - - saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def fill_constant_shape_tensor_list(name : str, shape: list, dtype, value): - pdpd.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_shape = pdpd.fluid.layers.fill_constant(shape=[1], value=shape, dtype='int32', name='shape') - x1 = pdpd.fluid.layers.fill_constant(shape=[2, node_shape], value=value, dtype=dtype, name='fill_constant') - out = pdpd.cast(x1, np.float32) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - fetch_list=[out]) - - saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def main(): - fill_constant("fill_constant", [2, 3, 4], 'float32', 0.03) - fill_constant("fill_constant_int32", [2, 3, 4], "int32", 2) - fill_constant("fill_constant_int64", [2, 3, 4], "int64", 4) - fill_constant_tensor("fill_constant_tensor", [2, 3, 4], 'float32', 0.05) - fill_constant_shape_tensor("fill_constant_shape_tensor", 2, 'float32', 0.05) - fill_constant_shape_tensor_list("fill_constant_shape_tensor_list", 2, 'float32', 0.05) - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_place_test_model.py b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_place_test_model.py deleted file mode 100644 index ef8dd32f0b3..00000000000 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_place_test_model.py +++ /dev/null @@ -1,53 +0,0 @@ -import numpy as np -from save_model import saveModel -import sys - - -def pdpd_rnn_lstm(input_size, hidden_size, layers, direction): - import paddle as pdpd - pdpd.enable_static() - main_program = pdpd.static.Program() - startup_program = pdpd.static.Program() - - num_of_directions = 1 if direction == 'forward' else 2 - with pdpd.static.program_guard(main_program, startup_program): - - rnn = pdpd.nn.LSTM(input_size, hidden_size, layers, direction, name="lstm") - - data = pdpd.static.data(name='x', shape=[4, 3, input_size], dtype='float32') - prev_h = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_1") - prev_c = pdpd.ones(shape=[layers * num_of_directions, 4, hidden_size], dtype=np.float32, name="const_2") - - y, (h, c) = rnn(data, (prev_h, prev_c)) - relu_1 = pdpd.nn.functional.relu(c, name="relu_1") - relu_2 = pdpd.nn.functional.relu(c, name="relu_2") - relu_3 = pdpd.nn.functional.relu(c, name="relu_3") - - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - exe.run(startup_program) - - outs = exe.run( - feed={'x': np.ones([4, 3, input_size]).astype(np.float32)}, - fetch_list=[y, h, c], - program=main_program) - saveModel("place_test_model", exe, feedkeys=['x'], - fetchlist=[y, h, c, relu_1, relu_2, relu_3], - inputs=[np.ones([4, 3, input_size]).astype(np.float32)], - outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1]) - return outs[0] - - -if __name__ == "__main__": - - testCases = [ - { - 'input_size': 2, - 'hidden_size': 2, - 'layers': 1, - 'direction': 'forward', - }, - ] - - for test in testCases: - pdpd_rnn_lstm(test['input_size'], test['hidden_size'], test['layers'], test['direction']) \ No newline at end of file diff --git a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_scale.py b/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_scale.py deleted file mode 100644 index 7ceee084fa6..00000000000 --- a/src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_scale.py +++ /dev/null @@ -1,93 +0,0 @@ -# -# pool2d paddle model generator -# -import numpy as np -import sys -from save_model import saveModel - - -def pdpd_scale(name : str, x, scale, bias, attrs : dict, data_type): - import paddle as pdpd - pdpd.enable_static() - - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - out = pdpd.scale(x=node_x, scale=scale, bias=bias, - bias_after_scale=attrs['bias_after_scale']) - #FuzzyTest only support FP32 now, so cast result to fp32 - out = pdpd.cast(out, "float32") - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - feed={'x': x}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def pdpd_scale_tensor(name : str, x, scale, bias, attrs : dict, data_type): - import paddle as pdpd - pdpd.enable_static() - - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data(name='x', shape=x.shape, dtype=data_type) - node_scale = pdpd.static.data(name='scale', shape=[1], dtype='float32') - out = pdpd.scale(x=node_x, scale=node_scale, bias=bias, - bias_after_scale=attrs['bias_after_scale']) - #FuzzyTest only support FP32 now, so cast result to fp32 - out = pdpd.cast(out, "float32") - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) - - outs = exe.run( - feed={'x': x, 'scale': scale}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=['x', 'scale'], fetchlist=[out], inputs=[x, np.array([scale]).astype('float32')], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - -def main(): - scale = 2.0 - bias = 1.0 - data = np.random.random([2, 3]).astype("float32") - - test_cases = [ - "float32", - "int32", - "int64" - ] - - pdpd_attrs = { - 'bias_after_scale': True, - } - pdpd_scale_tensor("scale_tensor_bias_after", data, scale, bias, pdpd_attrs, 'float32') - - pdpd_attrs = { - 'bias_after_scale': False, - } - pdpd_scale_tensor("scale_tensor_bias_before", data, scale, bias, pdpd_attrs, 'float32') - - for test in test_cases: - data = np.random.random([2, 3]).astype(test) - pdpd_attrs = { - 'bias_after_scale': True, - } - pdpd_scale("scale_bias_after_" + test, data, scale, bias, pdpd_attrs, test) - - pdpd_attrs = { - 'bias_after_scale': False, - } - pdpd_scale("scale_bias_before_" + test, data, scale, bias, pdpd_attrs, test) - - - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/src/frontends/CMakeLists.txt b/src/frontends/CMakeLists.txt index b60c8156aff..1e5d6790ffa 100644 --- a/src/frontends/CMakeLists.txt +++ b/src/frontends/CMakeLists.txt @@ -12,8 +12,8 @@ if(ENABLE_OV_ONNX_FRONTEND) add_subdirectory(onnx) endif() -if(ENABLE_OV_PDPD_FRONTEND) - add_subdirectory(paddlepaddle) +if(ENABLE_OV_PADDLE_FRONTEND) + add_subdirectory(paddle) endif() if(ENABLE_OV_IR_FRONTEND) diff --git a/src/frontends/common/src/manager.cpp b/src/frontends/common/src/manager.cpp index 4a27068a8c7..8d5b2c1b1df 100644 --- a/src/frontends/common/src/manager.cpp +++ b/src/frontends/common/src/manager.cpp @@ -7,8 +7,9 @@ #include #include +#include "ngraph/except.hpp" #include "openvino/frontend/exception.hpp" -#include "openvino/frontend/place.hpp" +#include "openvino/util/env_util.hpp" #include "plugin_loader.hpp" #include "utils.hpp" @@ -32,7 +33,7 @@ public: {"ir", "ir"}, {"onnx", "onnx"}, {"tf", "tensorflow"}, - {"paddle", "paddlepaddle"}, + {"paddle", "paddle"}, }; auto it = predefined_frontends.find(framework); std::lock_guard guard(m_loading_mutex); @@ -119,7 +120,7 @@ private: {".xml", {"ir", "ir"}}, {".onnx", {"onnx", "onnx"}}, {".pb", {"tf", "tensorflow"}}, - {".pdmodel", {"paddle", "paddlepaddle"}}, + {".pdmodel", {"paddle", "paddle"}}, }; // List of prioritized frontends. @@ -127,7 +128,7 @@ private: {"ir", "ir"}, {"onnx", "onnx"}, {"tf", "tensorflow"}, - {"paddle", "paddlepaddle"}, + {"paddle", "paddle"}, }; if (variants.empty()) { return nullptr; diff --git a/src/frontends/common/src/plugin_loader.cpp b/src/frontends/common/src/plugin_loader.cpp index e7233ec2542..abe4da11f6e 100644 --- a/src/frontends/common/src/plugin_loader.cpp +++ b/src/frontends/common/src/plugin_loader.cpp @@ -45,7 +45,7 @@ void load_static_plugins(std::vector& res) { {"ir", "ir"}, {"onnx", "onnx"}, {"tf", "tensorflow"}, - {"paddle", "paddlepaddle"}, + {"paddle", "paddle"}, }; auto it = predefined_frontends.find(factory.m_name); if (it != predefined_frontends.end()) { diff --git a/src/frontends/paddlepaddle/CMakeLists.txt b/src/frontends/paddle/CMakeLists.txt similarity index 66% rename from src/frontends/paddlepaddle/CMakeLists.txt rename to src/frontends/paddle/CMakeLists.txt index d6c5e4999f5..c42ead4cf8f 100644 --- a/src/frontends/paddlepaddle/CMakeLists.txt +++ b/src/frontends/paddle/CMakeLists.txt @@ -2,8 +2,8 @@ # SPDX-License-Identifier: Apache-2.0 # -# TODO: Add LINKABLE_FRONTEND option when tensorflow frontend directory is moved to openvino folder -ov_add_frontend(NAME paddlepaddle +ov_add_frontend(NAME paddle + LINKABLE_FRONTEND PROTOBUF_LITE FILEDESCRIPTION "FrontEnd to load and convert PaddlePaddle file format" LINK_LIBRARIES inference_engine_transformations) diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp b/src/frontends/paddle/include/openvino/frontend/paddle/frontend.hpp similarity index 87% rename from src/frontends/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp rename to src/frontends/paddle/include/openvino/frontend/paddle/frontend.hpp index fd705b2913e..26b60fd96dc 100644 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/frontend.hpp +++ b/src/frontends/paddle/include/openvino/frontend/paddle/frontend.hpp @@ -6,18 +6,21 @@ #include #include -#include +#include +#include #include "exceptions.hpp" -#include "model.hpp" +#include "openvino/frontend/paddle/visibility.hpp" namespace ov { namespace frontend { -class OpPlacePDPD; +namespace paddle { -class PDPD_API FrontEndPDPD : public FrontEnd { +class OpPlace; + +class PADDLE_API FrontEnd : public ov::frontend::FrontEnd { public: - FrontEndPDPD() = default; + FrontEnd() = default; /// \brief Completely convert the remaining, not converted part of a function. /// \param partiallyConverted partially converted OV Model @@ -52,7 +55,7 @@ public: void add_extension(const std::shared_ptr& extension) override; protected: - /// \brief Check if FrontEndPDPD can recognize model from given parts + /// \brief Check if FrontEnd can recognize model from given parts /// \param params Can be path to folder which contains __model__ file or path to /// .pdmodel file /// \return InputModel::Ptr @@ -67,12 +70,13 @@ protected: private: static std::shared_ptr convert_each_node( - const std::shared_ptr& model, + const std::shared_ptr& frontend_model, std::function(const std::map>&, - const std::shared_ptr&)> func); + const std::shared_ptr&)> func); std::shared_ptr m_telemetry; std::vector> m_transformation_extensions; }; +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddle/include/openvino/frontend/paddle/visibility.hpp b/src/frontends/paddle/include/openvino/frontend/paddle/visibility.hpp new file mode 100644 index 00000000000..9107fe2a60f --- /dev/null +++ b/src/frontends/paddle/include/openvino/frontend/paddle/visibility.hpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/frontend/exception.hpp" + +#ifdef OPENVINO_STATIC_LIBRARY +# define PADDLE_API +# define PADDLE_C_API +#else +# ifdef ov_paddle_frontend_EXPORTS +# define PADDLE_API OPENVINO_CORE_EXPORTS +# define PADDLE_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS +# else +# define PADDLE_API OPENVINO_CORE_IMPORTS +# define PADDLE_C_API OPENVINO_EXTERN_C OPENVINO_CORE_IMPORTS +# endif // ov_paddle_frontend_EXPORTS +#endif // OPENVINO_STATIC_LIBRARY diff --git a/src/frontends/paddlepaddle/src/decoder.cpp b/src/frontends/paddle/src/decoder.cpp similarity index 78% rename from src/frontends/paddlepaddle/src/decoder.cpp rename to src/frontends/paddle/src/decoder.cpp index 816d7864fcd..432957d8d88 100644 --- a/src/frontends/paddlepaddle/src/decoder.cpp +++ b/src/frontends/paddle/src/decoder.cpp @@ -17,9 +17,11 @@ namespace ov { namespace frontend { -using namespace paddle::framework; +namespace paddle { -std::map TYPE_MAP{ +using namespace ::paddle::framework; + +std::map<::paddle::framework::proto::VarType_Type, ov::element::Type> TYPE_MAP{ {proto::VarType_Type::VarType_Type_BOOL, ov::element::boolean}, {proto::VarType_Type::VarType_Type_INT16, ov::element::i16}, {proto::VarType_Type::VarType_Type_INT32, ov::element::i32}, @@ -31,7 +33,7 @@ std::map TYPE_MAP{ {proto::VarType_Type::VarType_Type_INT8, ov::element::i8}, {proto::VarType_Type::VarType_Type_BF16, ov::element::bf16}}; -ov::Any DecoderPDPDProto::get_attribute(const std::string& name, const std::type_info& type_info) const { +ov::Any DecoderProto::get_attribute(const std::string& name, const std::type_info& type_info) const { auto attrs = decode_attribute_helper(name); if (attrs.empty()) { return {}; @@ -51,8 +53,8 @@ ov::Any DecoderPDPDProto::get_attribute(const std::string& name, const std::type return attrs[0].f(); } else if (type_info == typeid(std::vector)) { return std::vector(attrs[0].floats().begin(), attrs[0].floats().end()); - } else if (type_info == typeid(ngraph::element::Type)) { - return TYPE_MAP[static_cast(attrs[0].i())]; + } else if (type_info == typeid(ov::element::Type)) { + return TYPE_MAP[static_cast<::paddle::framework::proto::VarType_Type>(attrs[0].i())]; } else if (type_info == typeid(bool)) { return attrs[0].b(); } @@ -61,7 +63,7 @@ ov::Any DecoderPDPDProto::get_attribute(const std::string& name, const std::type return {}; } -std::vector DecoderPDPDProto::get_output_names() const { +std::vector DecoderProto::get_output_names() const { std::vector output_names; for (const auto& output : op_place->get_desc().outputs()) { output_names.push_back(output.parameter()); @@ -69,7 +71,7 @@ std::vector DecoderPDPDProto::get_output_names() const { return output_names; } -size_t DecoderPDPDProto::get_output_size() const { +size_t DecoderProto::get_output_size() const { size_t res = 0; for (const auto& output : op_place->get_desc().outputs()) { res += output.arguments().size(); @@ -77,20 +79,20 @@ size_t DecoderPDPDProto::get_output_size() const { return res; } -std::map> DecoderPDPDProto::get_output_type_map() const { +std::map> DecoderProto::get_output_type_map() const { std::map> output_types; for (const auto& out_port_pair : op_place->get_output_ports()) { for (const auto& p_place : out_port_pair.second) { - output_types[out_port_pair.first].push_back(p_place->get_target_tensor_pdpd()->get_element_type()); + output_types[out_port_pair.first].push_back(p_place->get_target_tensor_paddle()->get_element_type()); } } return output_types; } -ov::element::Type DecoderPDPDProto::get_out_port_type(const std::string& port_name) const { +ov::element::Type DecoderProto::get_out_port_type(const std::string& port_name) const { std::vector output_types; for (const auto& out_port : op_place->get_output_ports().at(port_name)) { - output_types.push_back(out_port->get_target_tensor_pdpd()->get_element_type()); + output_types.push_back(out_port->get_target_tensor_paddle()->get_element_type()); } FRONT_END_GENERAL_CHECK(output_types.size() > 0, "Port has no tensors connected."); FRONT_END_GENERAL_CHECK(std::equal(output_types.begin() + 1, output_types.end(), output_types.begin()), @@ -98,11 +100,11 @@ ov::element::Type DecoderPDPDProto::get_out_port_type(const std::string& port_na return output_types[0]; } -std::string DecoderPDPDProto::get_op_type() const { +std::string DecoderProto::get_op_type() const { return op_place->get_desc().type(); } -std::vector DecoderPDPDProto::decode_attribute_helper(const std::string& name) const { +std::vector DecoderProto::decode_attribute_helper(const std::string& name) const { std::vector attrs; for (const auto& attr : op_place->get_desc().attrs()) { if (attr.name() == name) @@ -121,7 +123,7 @@ std::vector DecoderPDPDProto::decode_attribute_helper(const namespace { inline std::map map_for_each_input_impl( - const google::protobuf::RepeatedPtrField& c, + const google::protobuf::RepeatedPtrField<::paddle::framework::proto::OpDesc_Var>& c, const std::function(const std::string&, size_t)>& func) { size_t idx = 0; std::map res; @@ -137,15 +139,16 @@ inline std::map map_for_each_input_impl( } } // namespace -std::map DecoderPDPDProto::map_for_each_input( +std::map DecoderProto::map_for_each_input( const std::function(const std::string&, size_t)>& func) const { return map_for_each_input_impl(op_place->get_desc().inputs(), func); } -std::map DecoderPDPDProto::map_for_each_output( +std::map DecoderProto::map_for_each_output( const std::function(const std::string&, size_t)>& func) const { return map_for_each_input_impl(op_place->get_desc().outputs(), func); } +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/decoder.hpp b/src/frontends/paddle/src/decoder.hpp similarity index 66% rename from src/frontends/paddlepaddle/src/decoder.hpp rename to src/frontends/paddle/src/decoder.hpp index b56ed58116e..82e4d7a37bf 100644 --- a/src/frontends/paddlepaddle/src/decoder.hpp +++ b/src/frontends/paddle/src/decoder.hpp @@ -16,20 +16,21 @@ #include "framework.pb.h" #include "node_context.hpp" -#include "paddlepaddle_frontend/frontend.hpp" -#include "paddlepaddle_frontend/place.hpp" +#include "openvino/frontend/paddle/frontend.hpp" +#include "place.hpp" namespace ov { namespace frontend { -extern std::map TYPE_MAP; +namespace paddle { +extern std::map<::paddle::framework::proto::VarType_Type, ov::element::Type> TYPE_MAP; -class DecoderPDPDProto : public pdpd::DecoderBase { +class DecoderProto : public paddle::DecoderBase { public: - explicit DecoderPDPDProto(const std::shared_ptr& op) : op_place(op) {} + explicit DecoderProto(const std::shared_ptr& op) : op_place(op) {} ov::Any get_attribute(const std::string& name, const std::type_info& type_info) const override; - std::vector get_output_names() const override; + std::vector get_output_names() const override; size_t get_output_size() const override; @@ -46,9 +47,10 @@ public: const std::function(const std::string&, size_t)>& func) const; private: - std::vector decode_attribute_helper(const std::string& name) const; - std::shared_ptr op_place; + std::vector<::paddle::framework::proto::OpDesc_Attr> decode_attribute_helper(const std::string& name) const; + std::shared_ptr op_place; }; +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/default_opset.hpp b/src/frontends/paddle/src/default_opset.hpp similarity index 86% rename from src/frontends/paddlepaddle/src/default_opset.hpp rename to src/frontends/paddle/src/default_opset.hpp index 852018f88a1..413d7036f06 100644 --- a/src/frontends/paddlepaddle/src/default_opset.hpp +++ b/src/frontends/paddle/src/default_opset.hpp @@ -6,11 +6,11 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { namespace default_opset = ov::opset8; } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/exception.cpp b/src/frontends/paddle/src/exceptions.cpp similarity index 70% rename from src/frontends/paddlepaddle/src/exception.cpp rename to src/frontends/paddle/src/exceptions.cpp index ddd097ba5ef..f571183b61f 100644 --- a/src/frontends/paddlepaddle/src/exception.cpp +++ b/src/frontends/paddle/src/exceptions.cpp @@ -3,16 +3,17 @@ // #include "exceptions.hpp" + #include "node_context.hpp" namespace ov { namespace frontend { -namespace pdpd { -std::string OpValidationFailurePDPD::get_error_msg_prefix_pdpd(const pdpd::NodeContext& node) { +namespace paddle { +std::string OpValidationFailure::get_error_msg_prefix_paddle(const paddle::NodeContext& node) { std::stringstream ss; ss << "While validating node '" << node.get_op_type() << '\''; return ss.str(); } -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddle/src/exceptions.hpp b/src/frontends/paddle/src/exceptions.hpp new file mode 100644 index 00000000000..7d8254cbca5 --- /dev/null +++ b/src/frontends/paddle/src/exceptions.hpp @@ -0,0 +1,37 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace ov { +namespace frontend { +namespace paddle { + +class NodeContext; + +class OpValidationFailure : public ov::frontend::OpValidationFailure { +public: + OpValidationFailure(const CheckLocInfo& check_loc_info, + const paddle::NodeContext& node, + const std::string& explanation) + : ov::frontend::OpValidationFailure(check_loc_info, get_error_msg_prefix_paddle(node), explanation) {} + +private: + static std::string get_error_msg_prefix_paddle(const paddle::NodeContext& node); +}; +} // namespace paddle +} // namespace frontend + +/// \brief Macro to check whether a boolean condition holds. +/// \param node_context Object of NodeContext class +/// \param cond Condition to check +/// \param ... Additional error message info to be added to the error message via the `<<` +/// stream-insertion operator. Note that the expressions here will be evaluated lazily, +/// i.e., only if the `cond` evalutes to `false`. +/// \throws ::ov::OpValidationFailure if `cond` is false. +#define PADDLE_OP_CHECK(node_context, ...) \ + OPENVINO_ASSERT_HELPER(::ov::frontend::paddle::OpValidationFailure, (node_context), __VA_ARGS__) +} // namespace ov diff --git a/src/frontends/paddlepaddle/src/frontend.cpp b/src/frontends/paddle/src/frontend.cpp similarity index 72% rename from src/frontends/paddlepaddle/src/frontend.cpp rename to src/frontends/paddle/src/frontend.cpp index c6e41123bdd..6e8cdc0c9c7 100644 --- a/src/frontends/paddlepaddle/src/frontend.cpp +++ b/src/frontends/paddle/src/frontend.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "paddlepaddle_frontend/frontend.hpp" +#include "openvino/frontend/paddle/frontend.hpp" #include #include @@ -10,15 +10,14 @@ #include #include "decoder.hpp" -#include "exceptions.hpp" #include "framework.pb.h" +#include "input_model.hpp" #include "node_context.hpp" #include "op_table.hpp" #include "openvino/opsets/opset7.hpp" -#include "paddlepaddle_frontend/model.hpp" -#include "paddlepaddle_frontend/place.hpp" -#include "pdpd_fw_node.hpp" -#include "pdpd_utils.hpp" +#include "paddle_fw_node.hpp" +#include "paddle_utils.hpp" +#include "place.hpp" using namespace ov::opset7; using namespace ov; @@ -26,10 +25,11 @@ using namespace ov::frontend; namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace { -NamedOutputs make_ng_node(const std::map>& nodes, - const std::shared_ptr& op_place, + +NamedOutputs make_ng_node(const std::map>& nodes, + const std::shared_ptr& op_place, const std::map& CREATORS_MAP) { const auto& op_desc = op_place->get_desc(); @@ -52,7 +52,7 @@ NamedOutputs make_ng_node(const std::map>& nodes, NamedOutputs outputs; // In case the conversion function throws exception try { - outputs = creator_it->second(NodeContext(DecoderPDPDProto(op_place), named_inputs)); + outputs = creator_it->second(NodeContext(DecoderProto(op_place), named_inputs)); } catch (std::exception& ex) { FRONT_END_OP_CONVERSION_CHECK(false, "Fail to convert " + op_desc.type() + " Exception " + ex.what()); } @@ -60,8 +60,8 @@ NamedOutputs make_ng_node(const std::map>& nodes, return outputs; } -NamedOutputs make_framework_node(const std::map>& nodes, - const std::shared_ptr& op_place) { +NamedOutputs make_framework_node(const std::map>& nodes, + const std::shared_ptr& op_place) { const auto& op_desc = op_place->get_desc(); OutputVector inputs_vector; @@ -82,13 +82,12 @@ NamedOutputs make_framework_node(const std::map>& } } - auto node = - std::make_shared(DecoderPDPDProto(op_place), inputs_vector, inputs_names); + auto node = std::make_shared(DecoderProto(op_place), inputs_vector, inputs_names); return node->return_named_outputs(); } -bool normalize_framework_node(const std::shared_ptr& node, +bool normalize_framework_node(const std::shared_ptr& node, const std::map& CREATORS_MAP) { auto type = node->get_op_type(); auto creator_it = CREATORS_MAP.find(type); @@ -132,18 +131,19 @@ std::istream* variant_to_stream_ptr(const ov::Any& variant, std::ifstream& ext_s return &ext_stream; } } // namespace -} // namespace pdpd -std::shared_ptr FrontEndPDPD::convert_each_node( - const std::shared_ptr& model, +std::shared_ptr FrontEnd::convert_each_node( + const std::shared_ptr& frontend_model, std::function(const std::map>&, - const std::shared_ptr&)> func) { + const std::shared_ptr&)> func) { + auto model = std::dynamic_pointer_cast(frontend_model); + FRONT_END_GENERAL_CHECK(model, "Invalid input model"); auto nodes_dict(model->get_tensor_values()); ParameterVector parameter_nodes; ResultVector result_nodes; for (const auto& _inp_place : model->get_inputs()) { - const auto& inp_place = std::dynamic_pointer_cast(_inp_place); + const auto& inp_place = std::dynamic_pointer_cast(_inp_place); const auto& var = inp_place->get_desc(); const auto& shape = inp_place->get_partial_shape(); const auto& type = inp_place->get_element_type(); @@ -161,10 +161,10 @@ std::shared_ptr FrontEndPDPD::convert_each_node( // inputs and outputs are stored in the model already continue; } else { - pdpd::NamedOutputs named_outputs = func(nodes_dict, op_place); + paddle::NamedOutputs named_outputs = func(nodes_dict, op_place); if (!named_outputs.empty()) { - if (op_desc.outputs().begin()->arguments().size() > 0) { + if (!op_desc.outputs().begin()->arguments().empty()) { const auto& tensor_name = op_desc.outputs().begin()->arguments()[0]; auto node = named_outputs.begin()->second[0].get_node_shared_ptr(); node->set_friendly_name(tensor_name); @@ -193,7 +193,7 @@ std::shared_ptr FrontEndPDPD::convert_each_node( } for (const auto& _outp_place : model->get_outputs()) { - const auto& outp_place = std::dynamic_pointer_cast(_outp_place); + const auto& outp_place = std::dynamic_pointer_cast(_outp_place); auto var = outp_place->get_desc(); auto input_var_name = var.name(); auto result = std::make_shared(nodes_dict.at(input_var_name)); @@ -204,8 +204,8 @@ std::shared_ptr FrontEndPDPD::convert_each_node( return std::make_shared(result_nodes, parameter_nodes); } -bool FrontEndPDPD::supported_impl(const std::vector& variants) const { - // FrontEndPDPD can only load model specified by one path, one file or two files. +bool FrontEnd::supported_impl(const std::vector& variants) const { + // FrontEnd can only load model specified by one path, one file or two files. if (variants.empty() || variants.size() > 2) return false; @@ -213,8 +213,8 @@ bool FrontEndPDPD::supported_impl(const std::vector& variants) const { if (variants[0].is()) { std::string suffix = ".pdmodel"; std::string model_path = variants[0].as(); - if (!pdpd::endsWith(model_path, suffix)) { - model_path += pdpd::get_path_sep() + "__model__"; + if (!paddle::endsWith(model_path, suffix)) { + model_path += paddle::get_path_sep() + "__model__"; } std::ifstream model_str(model_path, std::ios::in | std::ifstream::binary); // It is possible to validate here that protobuf can read model from the stream, @@ -225,8 +225,8 @@ bool FrontEndPDPD::supported_impl(const std::vector& variants) const { else if (variants[0].is()) { std::wstring suffix = L".pdmodel"; std::wstring model_path = variants[0].as(); - if (!pdpd::endsWith(model_path, suffix)) { - model_path += pdpd::get_path_sep() + L"__model__"; + if (!paddle::endsWith(model_path, suffix)) { + model_path += paddle::get_path_sep() + L"__model__"; } std::ifstream model_str(model_path, std::ios::in | std::ifstream::binary); // It is possible to validate here that protobuf can read model from the stream, @@ -237,48 +237,48 @@ bool FrontEndPDPD::supported_impl(const std::vector& variants) const { else if (variants[0].is()) { // Validating first stream, it must contain a model auto p_model_stream = variants[0].as(); - paddle::framework::proto::ProgramDesc fw; + ::paddle::framework::proto::ProgramDesc fw; return fw.ParseFromIstream(p_model_stream); } return false; } -InputModel::Ptr FrontEndPDPD::load_impl(const std::vector& variants) const { +InputModel::Ptr FrontEnd::load_impl(const std::vector& variants) const { if (variants.size() == 1) { // The case when folder with __model__ and weight files is provided or .pdmodel file if (variants[0].is()) { std::string m_path = variants[0].as(); - return std::make_shared(m_path, m_telemetry); + return std::make_shared(m_path, m_telemetry); } #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) else if (variants[0].is()) { std::wstring m_path = variants[0].as(); - return std::make_shared(m_path, m_telemetry); + return std::make_shared(m_path, m_telemetry); } #endif // The case with only model stream provided and no weights. This means model has // no learnable weights else if (variants[0].is()) { auto p_model_stream = variants[0].as(); - return std::make_shared(std::vector{p_model_stream}, m_telemetry); + return std::make_shared(std::vector{p_model_stream}, m_telemetry); } } else if (variants.size() == 2) { // The case when .pdmodel and .pdparams files are provided std::ifstream model_stream; std::ifstream weights_stream; - std::istream* p_model_stream = pdpd::variant_to_stream_ptr(variants[0], model_stream); - std::istream* p_weights_stream = pdpd::variant_to_stream_ptr(variants[1], weights_stream); + std::istream* p_model_stream = paddle::variant_to_stream_ptr(variants[0], model_stream); + std::istream* p_weights_stream = paddle::variant_to_stream_ptr(variants[1], weights_stream); if (p_model_stream && p_weights_stream) { - return std::make_shared(std::vector{p_model_stream, p_weights_stream}, - m_telemetry); + return std::make_shared(std::vector{p_model_stream, p_weights_stream}, + m_telemetry); } } - PDPD_THROW("Model can be loaded either from 1 or 2 files/streams"); + FRONT_END_THROW("Model can be loaded either from 1 or 2 files/streams"); } -std::shared_ptr FrontEndPDPD::convert(const InputModel::Ptr& model) const { - auto pdpd_model = std::dynamic_pointer_cast(model); - FRONT_END_GENERAL_CHECK(pdpd_model != nullptr, "Invalid input model"); +std::shared_ptr FrontEnd::convert(const InputModel::Ptr& model) const { + auto paddle_model = std::dynamic_pointer_cast(model); + FRONT_END_GENERAL_CHECK(paddle_model != nullptr, "Invalid input model"); if (!m_transformation_extensions.empty()) { auto function = decode(model); @@ -292,20 +292,20 @@ std::shared_ptr FrontEndPDPD::convert(const InputModel::Ptr& model) c return function; } - std::map CREATORS_MAP = pdpd::get_supported_ops(); + std::map CREATORS_MAP = paddle::get_supported_ops(); auto f = convert_each_node( - pdpd_model, - [&](const std::map>& nodes_dict, const std::shared_ptr& op_place) { - return pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP); + paddle_model, + [&](const std::map>& nodes_dict, const std::shared_ptr& op_place) { + return paddle::make_ng_node(nodes_dict, op_place, CREATORS_MAP); }); return f; } -void FrontEndPDPD::convert(const std::shared_ptr& partiallyConverted) const { +void FrontEnd::convert(const std::shared_ptr& partiallyConverted) const { for (const auto& node : partiallyConverted->get_ordered_ops()) { - if (ov::is_type(node)) { - pdpd::normalize_framework_node(std::dynamic_pointer_cast(node), - pdpd::get_supported_ops()); + if (ov::is_type(node)) { + paddle::normalize_framework_node(std::dynamic_pointer_cast(node), + paddle::get_supported_ops()); } } for (const auto& result : partiallyConverted->get_results()) { @@ -313,9 +313,9 @@ void FrontEndPDPD::convert(const std::shared_ptr& partiallyConverted) } } -std::shared_ptr FrontEndPDPD::convert_partially(const InputModel::Ptr& model) const { - auto pdpd_model = std::dynamic_pointer_cast(model); - FRONT_END_GENERAL_CHECK(pdpd_model != nullptr, "Invalid input model"); +std::shared_ptr FrontEnd::convert_partially(const InputModel::Ptr& model) const { + auto paddle_model = std::dynamic_pointer_cast(model); + FRONT_END_GENERAL_CHECK(paddle_model != nullptr, "Invalid input model"); if (!m_transformation_extensions.empty()) { auto function = decode(model); @@ -329,35 +329,35 @@ std::shared_ptr FrontEndPDPD::convert_partially(const InputModel::Ptr return function; } - std::map CREATORS_MAP = pdpd::get_supported_ops(); + std::map CREATORS_MAP = paddle::get_supported_ops(); auto f = convert_each_node( - pdpd_model, - [&](const std::map>& nodes_dict, const std::shared_ptr& op_place) { - pdpd::NamedOutputs named_outputs; + paddle_model, + [&](const std::map>& nodes_dict, const std::shared_ptr& op_place) { + paddle::NamedOutputs named_outputs; try { - named_outputs = pdpd::make_ng_node(nodes_dict, op_place, CREATORS_MAP); + named_outputs = paddle::make_ng_node(nodes_dict, op_place, CREATORS_MAP); } catch (const OpConversionFailure&) { - named_outputs = pdpd::make_framework_node(nodes_dict, op_place); + named_outputs = paddle::make_framework_node(nodes_dict, op_place); } return named_outputs; }); return f; } -std::shared_ptr FrontEndPDPD::decode(const InputModel::Ptr& model) const { - auto pdpd_model = std::dynamic_pointer_cast(model); - FRONT_END_GENERAL_CHECK(pdpd_model != nullptr, "Invalid input model"); +std::shared_ptr FrontEnd::decode(const InputModel::Ptr& model) const { + auto paddle_model = std::dynamic_pointer_cast(model); + FRONT_END_GENERAL_CHECK(paddle_model != nullptr, "Invalid input model"); - std::map CREATORS_MAP = pdpd::get_supported_ops(); - auto f = convert_each_node(pdpd_model, pdpd::make_framework_node); + std::map CREATORS_MAP = paddle::get_supported_ops(); + auto f = convert_each_node(paddle_model, paddle::make_framework_node); return f; } -std::string FrontEndPDPD::get_name() const { +std::string FrontEnd::get_name() const { return "paddle"; } -void FrontEndPDPD::add_extension(const std::shared_ptr& extension) { +void FrontEnd::add_extension(const std::shared_ptr& extension) { if (auto telemetry = std::dynamic_pointer_cast(extension)) { m_telemetry = telemetry; } else if (auto transformation = std::dynamic_pointer_cast(extension)) { @@ -365,18 +365,19 @@ void FrontEndPDPD::add_extension(const std::shared_ptr& extension } } +} // namespace paddle } // namespace frontend } // namespace ov -PDPD_C_API FrontEndVersion GetAPIVersion() { +PADDLE_C_API FrontEndVersion GetAPIVersion() { return OV_FRONTEND_API_VERSION; } -PDPD_C_API void* GetFrontEndData() { +PADDLE_C_API void* GetFrontEndData() { FrontEndPluginInfo* res = new FrontEndPluginInfo(); res->m_name = "paddle"; res->m_creator = []() { - return std::make_shared(); + return std::make_shared(); }; return res; } diff --git a/src/frontends/paddlepaddle/src/model.cpp b/src/frontends/paddle/src/input_model.cpp similarity index 62% rename from src/frontends/paddlepaddle/src/model.cpp rename to src/frontends/paddle/src/input_model.cpp index b499aedd719..84cb362b787 100644 --- a/src/frontends/paddlepaddle/src/model.cpp +++ b/src/frontends/paddle/src/input_model.cpp @@ -2,18 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "paddlepaddle_frontend/model.hpp" +#include "input_model.hpp" #include #include #include "decoder.hpp" -#include "exceptions.hpp" #include "framework.pb.h" +#include "input_model.hpp" #include "node_context.hpp" #include "openvino/opsets/opset7.hpp" -#include "paddlepaddle_frontend/place.hpp" -#include "pdpd_utils.hpp" +#include "paddle_utils.hpp" +#include "place.hpp" #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) # include @@ -22,17 +22,19 @@ namespace ov { namespace frontend { -using namespace paddle::framework::proto; +namespace paddle { -class InputModelPDPD::InputModelPDPDImpl { +using namespace ::paddle::framework::proto; + +class InputModel::InputModelImpl { public: template - InputModelPDPDImpl(const std::basic_string& path, - const InputModel& input_model, - const std::shared_ptr& telemetry); - InputModelPDPDImpl(const std::vector& streams, - const InputModel& input_model, - const std::shared_ptr& telemetry); + InputModelImpl(const std::basic_string& path, + const InputModel& input_model, + const std::shared_ptr& telemetry); + InputModelImpl(const std::vector& streams, + const InputModel& input_model, + const std::shared_ptr& telemetry); std::vector getInputs() const; std::vector getOutputs() const; Place::Ptr getPlaceByTensorName(const std::string& tensorName) const; @@ -45,11 +47,11 @@ public: void setElementType(Place::Ptr place, const ov::element::Type&); void setTensorValue(Place::Ptr place, const void* value); - std::vector> get_op_places() const; - std::map> get_var_places() const { + std::vector> get_op_places() const; + std::map> get_var_places() const { return m_var_places; } - std::map> get_tensor_values() const { + std::map> get_tensor_values() const { return m_tensor_values; }; @@ -57,15 +59,15 @@ private: void loadPlaces(); template void loadConsts(const std::basic_string& folder_with_weights, std::istream* weight_stream); - std::vector> determine_cut_nodes() const; + std::vector> determine_cut_nodes() const; - std::vector> m_op_places; - std::map> m_var_places; + std::vector> m_op_places; + std::map> m_var_places; std::shared_ptr m_fw_ptr; const InputModel& m_input_model; std::vector m_inputs; std::vector m_outputs; - std::map> m_tensor_values; + std::map> m_tensor_values; std::shared_ptr m_telemetry; @@ -73,7 +75,7 @@ private: bool m_graph_changed = false; }; -void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { +void InputModel::InputModelImpl::loadPlaces() { const int cnt_of_blocks = m_fw_ptr->blocks_size(); const auto& blocks = m_fw_ptr->blocks(); std::map op_statistics; @@ -82,11 +84,11 @@ void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { const auto& block = blocks[block_idx]; for (const auto& var : block.vars()) { - m_var_places[var.name()] = std::make_shared(m_input_model, var); + m_var_places[var.name()] = std::make_shared(m_input_model, var); } for (const auto& op : block.ops()) { - auto op_place = std::make_shared(m_input_model, op); + auto op_place = std::make_shared(m_input_model, op); if (m_telemetry) { op_statistics[op.type()]++; @@ -96,7 +98,7 @@ void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { for (const auto& output : op.outputs()) { for (const auto& var_name : output.arguments()) { - auto out_port = std::make_shared(m_input_model); + auto out_port = std::make_shared(m_input_model); // connect out_port and tensor const auto& tensor = m_var_places.at(var_name); @@ -111,7 +113,7 @@ void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { for (const auto& input : op.inputs()) { for (const auto& var_name : input.arguments()) { - auto in_port = std::make_shared(m_input_model); + auto in_port = std::make_shared(m_input_model); // connect in_port and tensor const auto& tensor = m_var_places.at(var_name); @@ -126,8 +128,8 @@ void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { // Determine outputs and inputs if (op.type() == "feed") { - const auto& place = op_place->get_output_port_pdpd("Out", 0); - const auto& var_place = std::dynamic_pointer_cast(place->get_target_tensor_pdpd()); + const auto& place = op_place->get_output_port_paddle("Out", 0); + const auto& var_place = std::dynamic_pointer_cast(place->get_target_tensor_paddle()); const auto& tensor_desc = var_place->get_desc().type().lod_tensor().tensor(); const auto& dims = tensor_desc.dims(); @@ -135,8 +137,8 @@ void InputModelPDPD::InputModelPDPDImpl::loadPlaces() { var_place->set_partial_shape(PartialShape(std::vector(dims.begin(), dims.end()))); m_inputs.push_back(var_place); } else if (op.type() == "fetch") { - auto place = op_place->get_input_port_pdpd("X", 0); - m_outputs.push_back(place->get_source_tensor_pdpd()); + auto place = op_place->get_input_port_paddle("X", 0); + m_outputs.push_back(place->get_source_tensor_paddle()); } } } @@ -163,7 +165,7 @@ bool read_tensor(std::istream& is, char* data, size_t len) { template std::basic_string get_const_path(const std::basic_string& folder_with_weights, const std::string& name) { - return folder_with_weights + pdpd::get_path_sep() + name; + return folder_with_weights + paddle::get_path_sep() + name; } #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) @@ -171,7 +173,7 @@ template <> std::basic_string get_const_path(const std::basic_string& folder, const std::string& name) { std::wstring_convert> converter; std::wstring _name = converter.from_bytes(name); - return folder + pdpd::get_path_sep() + _name; + return folder + paddle::get_path_sep() + _name; } #endif @@ -179,7 +181,7 @@ template std::basic_string get_model_path(const std::basic_string& path, std::ifstream* weights_stream) { std::string model_file{path}; std::string ext = ".pdmodel"; - if (pdpd::endsWith(model_file, ext)) { + if (paddle::endsWith(model_file, ext)) { std::string params_ext = ".pdiparams"; std::string weights_file{path}; weights_file.replace(weights_file.size() - ext.size(), ext.size(), params_ext); @@ -187,7 +189,7 @@ std::basic_string get_model_path(const std::basic_string& path, std::ifstr // Don't throw error if file isn't opened // It may mean that model don't have constants } else { - model_file += pdpd::get_path_sep() + "__model__"; + model_file += paddle::get_path_sep() + "__model__"; } return model_file; } @@ -197,7 +199,7 @@ template <> std::basic_string get_model_path(const std::basic_string& path, std::ifstream* weights_stream) { std::wstring model_file{path}; std::wstring ext = L".pdmodel"; - if (pdpd::endsWith(model_file, ext)) { + if (paddle::endsWith(model_file, ext)) { std::wstring params_ext = L".pdiparams"; std::wstring weights_file{path}; weights_file.replace(weights_file.size() - ext.size(), ext.size(), params_ext); @@ -205,34 +207,34 @@ std::basic_string get_model_path(const std::basic_string& path // Don't throw error if file isn't opened // It may mean that model don't have constants } else { - model_file += pdpd::get_path_sep() + L"__model__"; + model_file += paddle::get_path_sep() + L"__model__"; } return model_file; } #endif } // namespace -std::vector> InputModelPDPD::InputModelPDPDImpl::get_op_places() const { +std::vector> InputModel::InputModelImpl::get_op_places() const { if (m_graph_changed) { return determine_cut_nodes(); } return m_op_places; } -std::vector> InputModelPDPD::InputModelPDPDImpl::determine_cut_nodes() const { - std::queue q; - std::unordered_set visited; - std::vector> new_op_places; +std::vector> InputModel::InputModelImpl::determine_cut_nodes() const { + std::queue q; + std::unordered_set visited; + std::vector> new_op_places; new_op_places.reserve(m_op_places.size()); // Marking nodes from outputs to inputs/constants for (const auto& output : getOutputs()) { if (!output->is_input()) { - auto pdpd_output_op = std::dynamic_pointer_cast(output->get_producing_operation()); - PDPD_ASSERT(pdpd_output_op != nullptr, "Output doesn't have producing operation"); - if (!visited.count(pdpd_output_op.get())) { - visited.insert(pdpd_output_op.get()); - q.push(pdpd_output_op.get()); - new_op_places.push_back(pdpd_output_op); + auto paddle_output_op = std::dynamic_pointer_cast(output->get_producing_operation()); + FRONT_END_GENERAL_CHECK(paddle_output_op != nullptr, "Output doesn't have producing operation"); + if (!visited.count(paddle_output_op.get())) { + visited.insert(paddle_output_op.get()); + q.push(paddle_output_op.get()); + new_op_places.push_back(paddle_output_op); } } } @@ -243,12 +245,12 @@ std::vector> InputModelPDPD::InputModelPDPDImpl::de for (const auto& port : map_pair.second) { auto tensor = port->get_source_tensor(); if (tensor && !tensor->is_input() && !m_tensor_values.count(tensor->get_names()[0])) { - std::shared_ptr pdpd_op = - std::dynamic_pointer_cast(tensor->get_producing_operation()); - if (pdpd_op && !visited.count(pdpd_op.get())) { - visited.insert(pdpd_op.get()); - q.push(pdpd_op.get()); - new_op_places.push_back(pdpd_op); + std::shared_ptr paddle_op = + std::dynamic_pointer_cast(tensor->get_producing_operation()); + if (paddle_op && !visited.count(paddle_op.get())) { + visited.insert(paddle_op.get()); + q.push(paddle_op.get()); + new_op_places.push_back(paddle_op); } } } @@ -259,17 +261,17 @@ std::vector> InputModelPDPD::InputModelPDPDImpl::de } template -void InputModelPDPD::InputModelPDPDImpl::loadConsts(const std::basic_string& folder_with_weights, - std::istream* weight_stream) { +void InputModel::InputModelImpl::loadConsts(const std::basic_string& folder_with_weights, + std::istream* weight_stream) { for (const auto& item : m_var_places) { const auto& var_desc = item.second->get_desc(); const auto& name = item.first; - if (pdpd::endsWith(name, std::string{"feed"}) || pdpd::endsWith(name, std::string{"fetch"})) + if (paddle::endsWith(name, std::string{"feed"}) || paddle::endsWith(name, std::string{"fetch"})) continue; if (!var_desc.persistable()) continue; - FRONT_END_GENERAL_CHECK(var_desc.type().type() == paddle::framework::proto::VarType::LOD_TENSOR); + FRONT_END_GENERAL_CHECK(var_desc.type().type() == ::paddle::framework::proto::VarType::LOD_TENSOR); const auto& tensor = var_desc.type().lod_tensor().tensor(); Shape shape(tensor.dims().cbegin(), tensor.dims().cend()); const auto& type = TYPE_MAP[tensor.data_type()]; @@ -298,9 +300,9 @@ void InputModelPDPD::InputModelPDPDImpl::loadConsts(const std::basic_string& } template -InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::basic_string& path, - const InputModel& input_model, - const std::shared_ptr& telemetry) +InputModel::InputModelImpl::InputModelImpl(const std::basic_string& path, + const InputModel& input_model, + const std::shared_ptr& telemetry) : m_fw_ptr{std::make_shared()}, m_input_model(input_model), m_telemetry(telemetry) { @@ -312,8 +314,8 @@ InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::basic_stringParseFromIstream(&pb_stream), "Model can't be parsed"); // According to Paddle, the saved model has the framework version // For example Paddle 2.1.0 is encoded as 2001000. 0 means the latest framework. - // https://github.com/PaddlePaddle/Paddle/blob/develop/cmake/version.cmake - // https://github.com/PaddlePaddle/Paddle/blob/2100816c5190693cc7dee181e96af72e9f0fbd1d/paddle/fluid/framework/program_desc.cc#L52 + // https://github.com/paddle/Paddle/blob/develop/cmake/version.cmake + // https://github.com/paddle/Paddle/blob/2100816c5190693cc7dee181e96af72e9f0fbd1d/paddle/fluid/framework/program_desc.cc#L52 int64_t version = m_fw_ptr->version().version(); FRONT_END_GENERAL_CHECK( version >= 2000000 || version == 0, @@ -326,9 +328,9 @@ InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::basic_string& streams, - const InputModel& input_model, - const std::shared_ptr& telemetry) +InputModel::InputModelImpl::InputModelImpl(const std::vector& streams, + const InputModel& input_model, + const std::shared_ptr& telemetry) : m_fw_ptr{std::make_shared()}, m_input_model(input_model), m_telemetry(telemetry) { @@ -346,35 +348,35 @@ InputModelPDPD::InputModelPDPDImpl::InputModelPDPDImpl(const std::vector InputModelPDPD::InputModelPDPDImpl::getInputs() const { +std::vector InputModel::InputModelImpl::getInputs() const { return m_inputs; } -std::vector InputModelPDPD::InputModelPDPDImpl::getOutputs() const { +std::vector InputModel::InputModelImpl::getOutputs() const { return m_outputs; } -Place::Ptr InputModelPDPD::InputModelPDPDImpl::getPlaceByTensorName(const std::string& tensorName) const { +Place::Ptr InputModel::InputModelImpl::getPlaceByTensorName(const std::string& tensorName) const { if (m_var_places.count(tensorName)) return m_var_places.at(tensorName); return nullptr; } namespace { -std::shared_ptr castToTensorPlace(const Place::Ptr& place) { - if (auto var_place = std::dynamic_pointer_cast(place)) { +std::shared_ptr castToTensorPlace(const Place::Ptr& place) { + if (auto var_place = std::dynamic_pointer_cast(place)) { return var_place; - } else if (auto in_port_place = std::dynamic_pointer_cast(place)) { - return in_port_place->get_source_tensor_pdpd(); - } else if (auto out_port_place = std::dynamic_pointer_cast(place)) { - return out_port_place->get_target_tensor_pdpd(); + } else if (auto in_port_place = std::dynamic_pointer_cast(place)) { + return in_port_place->get_source_tensor_paddle(); + } else if (auto out_port_place = std::dynamic_pointer_cast(place)) { + return out_port_place->get_target_tensor_paddle(); } - FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlacePDPD."); + FRONT_END_GENERAL_CHECK(false, "Cannot cast this Place to TensorPlacepaddle."); } } // namespace -void InputModelPDPD::InputModelPDPDImpl::overrideAllInputs(const std::vector& inputs) { +void InputModel::InputModelImpl::overrideAllInputs(const std::vector& inputs) { m_graph_changed = true; m_inputs.clear(); for (const auto& inp : inputs) { @@ -382,7 +384,7 @@ void InputModelPDPD::InputModelPDPDImpl::overrideAllInputs(const std::vector& outputs) { +void InputModel::InputModelImpl::overrideAllOutputs(const std::vector& outputs) { m_graph_changed = true; m_outputs.clear(); for (const auto& outp : outputs) { @@ -390,30 +392,30 @@ void InputModelPDPD::InputModelPDPDImpl::overrideAllOutputs(const std::vector& inputs, - const std::vector& outputs) { +void InputModel::InputModelImpl::extractSubgraph(const std::vector& inputs, + const std::vector& outputs) { m_graph_changed = true; overrideAllInputs(inputs); overrideAllOutputs(outputs); } -void InputModelPDPD::InputModelPDPDImpl::setDefaultShape(Place::Ptr place, const ov::Shape& shape) { +void InputModel::InputModelImpl::setDefaultShape(Place::Ptr place, const ov::Shape& shape) { FRONT_END_NOT_IMPLEMENTED("setDefaultShape"); } -void InputModelPDPD::InputModelPDPDImpl::setPartialShape(Place::Ptr place, const ov::PartialShape& p_shape) { +void InputModel::InputModelImpl::setPartialShape(Place::Ptr place, const ov::PartialShape& p_shape) { castToTensorPlace(place)->set_partial_shape(p_shape); } -ov::PartialShape InputModelPDPD::InputModelPDPDImpl::getPartialShape(Place::Ptr place) const { +ov::PartialShape InputModel::InputModelImpl::getPartialShape(Place::Ptr place) const { return castToTensorPlace(place)->get_partial_shape(); } -void InputModelPDPD::InputModelPDPDImpl::setElementType(Place::Ptr place, const ov::element::Type& type) { +void InputModel::InputModelImpl::setElementType(Place::Ptr place, const ov::element::Type& type) { castToTensorPlace(place)->set_element_type(type); } -void InputModelPDPD::InputModelPDPDImpl::setTensorValue(Place::Ptr place, const void* value) { +void InputModel::InputModelImpl::setTensorValue(Place::Ptr place, const void* value) { m_graph_changed = true; auto tensor_place = castToTensorPlace(place); auto p_shape = tensor_place->get_partial_shape(); @@ -424,69 +426,69 @@ void InputModelPDPD::InputModelPDPDImpl::setTensorValue(Place::Ptr place, const m_tensor_values[name] = constant; } -InputModelPDPD::InputModelPDPD(const std::string& path, const std::shared_ptr& telemetry) - : _impl{std::make_shared(path, *this, telemetry)} {} +InputModel::InputModel(const std::string& path, const std::shared_ptr& telemetry) + : _impl{std::make_shared(path, *this, telemetry)} {} #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) -InputModelPDPD::InputModelPDPD(const std::wstring& path, const std::shared_ptr& telemetry) - : _impl{std::make_shared(path, *this, telemetry)} {} +InputModel::InputModel(const std::wstring& path, const std::shared_ptr& telemetry) + : _impl{std::make_shared(path, *this, telemetry)} {} #endif -InputModelPDPD::InputModelPDPD(const std::vector& streams, - const std::shared_ptr& telemetry) - : _impl{std::make_shared(streams, *this, telemetry)} {} +InputModel::InputModel(const std::vector& streams, const std::shared_ptr& telemetry) + : _impl{std::make_shared(streams, *this, telemetry)} {} -std::vector> InputModelPDPD::get_op_places() const { +std::vector> InputModel::get_op_places() const { return _impl->get_op_places(); } -std::map> InputModelPDPD::get_var_places() const { +std::map> InputModel::get_var_places() const { return _impl->get_var_places(); } -std::map> InputModelPDPD::get_tensor_values() const { +std::map> InputModel::get_tensor_values() const { return _impl->get_tensor_values(); } -std::vector InputModelPDPD::get_inputs() const { +std::vector InputModel::get_inputs() const { return _impl->getInputs(); } -std::vector InputModelPDPD::get_outputs() const { +std::vector InputModel::get_outputs() const { return _impl->getOutputs(); } -Place::Ptr InputModelPDPD::get_place_by_tensor_name(const std::string& tensorName) const { +Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const { return _impl->getPlaceByTensorName(tensorName); } -void InputModelPDPD::override_all_outputs(const std::vector& outputs) { +void InputModel::override_all_outputs(const std::vector& outputs) { _impl->overrideAllOutputs(outputs); } -void InputModelPDPD::override_all_inputs(const std::vector& inputs) { +void InputModel::override_all_inputs(const std::vector& inputs) { _impl->overrideAllInputs(inputs); } -void InputModelPDPD::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { +void InputModel::extract_subgraph(const std::vector& inputs, const std::vector& outputs) { _impl->extractSubgraph(inputs, outputs); } -void InputModelPDPD::set_partial_shape(const Place::Ptr& place, const ov::PartialShape& p_shape) { +void InputModel::set_partial_shape(const Place::Ptr& place, const ov::PartialShape& p_shape) { _impl->setPartialShape(place, p_shape); } -ov::PartialShape InputModelPDPD::get_partial_shape(const Place::Ptr& place) const { +ov::PartialShape InputModel::get_partial_shape(const Place::Ptr& place) const { return _impl->getPartialShape(place); } -void InputModelPDPD::set_element_type(const Place::Ptr& place, const ov::element::Type& type) { +void InputModel::set_element_type(const Place::Ptr& place, const ov::element::Type& type) { _impl->setElementType(place, type); } -void InputModelPDPD::set_tensor_value(const Place::Ptr& place, const void* value) { +void InputModel::set_tensor_value(const Place::Ptr& place, const void* value) { _impl->setTensorValue(place, value); } +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/model.hpp b/src/frontends/paddle/src/input_model.hpp similarity index 59% rename from src/frontends/paddlepaddle/include/paddlepaddle_frontend/model.hpp rename to src/frontends/paddle/src/input_model.hpp index 491af81b3ba..6a9beb2505d 100644 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/model.hpp +++ b/src/frontends/paddle/src/input_model.hpp @@ -5,30 +5,23 @@ #pragma once #include -#include - -#include "paddlepaddle_frontend/utility.hpp" +#include namespace ov { namespace frontend { -class OpPlacePDPD; -class TensorPlacePDPD; -class PDPD_API InputModelPDPD : public InputModel { - friend class FrontEndPDPD; - class InputModelPDPDImpl; - std::shared_ptr _impl; +namespace paddle { - std::vector> get_op_places() const; - std::map> get_var_places() const; - std::map> get_tensor_values() const; +class OpPlace; +class TensorPlace; +class InputModel : public ov::frontend::InputModel { public: - explicit InputModelPDPD(const std::string& path, const std::shared_ptr& telemetry = {}); + explicit InputModel(const std::string& path, const std::shared_ptr& telemetry = {}); #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - explicit InputModelPDPD(const std::wstring& path, const std::shared_ptr& telemetry = {}); + explicit InputModel(const std::wstring& path, const std::shared_ptr& telemetry = {}); #endif - explicit InputModelPDPD(const std::vector& streams, - const std::shared_ptr& telemetry = {}); + explicit InputModel(const std::vector& streams, + const std::shared_ptr& telemetry = {}); std::vector get_inputs() const override; std::vector get_outputs() const override; Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const override; @@ -39,7 +32,17 @@ public: ov::PartialShape get_partial_shape(const Place::Ptr& place) const override; void set_element_type(const Place::Ptr& place, const ov::element::Type&) override; void set_tensor_value(const Place::Ptr& place, const void* value) override; + +private: + friend class ov::frontend::paddle::FrontEnd; + class InputModelImpl; + std::shared_ptr _impl; + + std::vector> get_op_places() const; + std::map> get_var_places() const; + std::map> get_tensor_values() const; }; +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/node_context.hpp b/src/frontends/paddle/src/node_context.hpp similarity index 88% rename from src/frontends/paddlepaddle/src/node_context.hpp rename to src/frontends/paddle/src/node_context.hpp index 6dfe727f6ee..4525ea948c2 100644 --- a/src/frontends/paddlepaddle/src/node_context.hpp +++ b/src/frontends/paddle/src/node_context.hpp @@ -4,13 +4,13 @@ #pragma once #include "exceptions.hpp" -#include "ngraph/compatibility.hpp" #include "openvino/core/any.hpp" -#include "paddlepaddle_frontend/utility.hpp" +#include "openvino/core/node.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { + using InPortName = std::string; using OutPortName = std::string; using TensorName = std::string; @@ -119,24 +119,24 @@ public: } NamedOutputs default_single_output_mapping(const std::shared_ptr& node, - const std::vector& required_pdpd_out_names) const; + const std::vector& required_paddle_out_names) const; }; inline NamedOutputs NodeContext::default_single_output_mapping( const std::shared_ptr& node, - const std::vector& required_pdpd_out_names) const { + const std::vector& required_paddle_out_names) const { NamedOutputs named_outputs; const auto& outputs = node->outputs(); - const auto& pdpd_op_output_names = this->get_output_names(); + const auto& paddle_op_output_names = this->get_output_names(); FRONT_END_GENERAL_CHECK(outputs.size() == 1, "OV node must have exactly one output"); - for (const auto& pdpd_name : pdpd_op_output_names) { - if (std::find(required_pdpd_out_names.begin(), required_pdpd_out_names.end(), pdpd_name) != - required_pdpd_out_names.end()) - named_outputs[pdpd_name] = {outputs[0]}; + for (const auto& paddle_name : paddle_op_output_names) { + if (std::find(required_paddle_out_names.begin(), required_paddle_out_names.end(), paddle_name) != + required_paddle_out_names.end()) + named_outputs[paddle_name] = {outputs[0]}; } return named_outputs; } -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/argmax.cpp b/src/frontends/paddle/src/op/argmax.cpp similarity index 97% rename from src/frontends/paddlepaddle/src/op/argmax.cpp rename to src/frontends/paddle/src/op/argmax.cpp index 265c12ad879..637f80013ce 100644 --- a/src/frontends/paddlepaddle/src/op/argmax.cpp +++ b/src/frontends/paddle/src/op/argmax.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs argmax(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -36,6 +36,6 @@ NamedOutputs argmax(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/assign_value.cpp b/src/frontends/paddle/src/op/assign_value.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/assign_value.cpp rename to src/frontends/paddle/src/op/assign_value.cpp index 93b2fdfb08e..41fa114037b 100644 --- a/src/frontends/paddlepaddle/src/op/assign_value.cpp +++ b/src/frontends/paddle/src/op/assign_value.cpp @@ -7,7 +7,7 @@ #include "openvino/opsets/opset6.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs assign_value(const NodeContext& node) { std::vector shape = node.get_attribute>("shape"); @@ -36,7 +36,7 @@ NamedOutputs assign_value(const NodeContext& node) { break; } default: { - PDPD_OP_VALIDATION_CHECK(node, false, "assign_value only supports int32, int64, float32, bool"); + PADDLE_OP_CHECK(node, false, "assign_value only supports int32, int64, float32, bool"); break; } } @@ -45,6 +45,6 @@ NamedOutputs assign_value(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/batch_norm.cpp b/src/frontends/paddle/src/op/batch_norm.cpp similarity index 93% rename from src/frontends/paddlepaddle/src/op/batch_norm.cpp rename to src/frontends/paddle/src/op/batch_norm.cpp index 86f4cea41fc..e608b1da86e 100644 --- a/src/frontends/paddlepaddle/src/op/batch_norm.cpp +++ b/src/frontends/paddle/src/op/batch_norm.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs batch_norm(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -18,7 +18,7 @@ NamedOutputs batch_norm(const NodeContext& node) { auto variance = node.get_ng_input("Variance"); auto data_layout = node.get_attribute("data_layout"); - PDPD_ASSERT((data_layout == "NCHW" || data_layout == "NHWC"), "Not supported input data layout!"); + PADDLE_OP_CHECK(node, (data_layout == "NCHW" || data_layout == "NHWC"), "Not supported input data layout!"); if (data_layout == "NCHW") { return node.default_single_output_mapping( {std::make_shared(data, @@ -45,6 +45,6 @@ NamedOutputs batch_norm(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/cast.cpp b/src/frontends/paddle/src/op/cast.cpp similarity index 92% rename from src/frontends/paddlepaddle/src/op/cast.cpp rename to src/frontends/paddle/src/op/cast.cpp index 4530705483a..9a5dac28e8a 100644 --- a/src/frontends/paddlepaddle/src/op/cast.cpp +++ b/src/frontends/paddle/src/op/cast.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs cast(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -18,6 +18,6 @@ NamedOutputs cast(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/clip.cpp b/src/frontends/paddle/src/op/clip.cpp similarity index 80% rename from src/frontends/paddlepaddle/src/op/clip.cpp rename to src/frontends/paddle/src/op/clip.cpp index c34806a1d53..e22a8368a5c 100644 --- a/src/frontends/paddlepaddle/src/op/clip.cpp +++ b/src/frontends/paddle/src/op/clip.cpp @@ -8,18 +8,18 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs clip(const NodeContext& node) { auto data = node.get_ng_input("X"); auto min = node.get_attribute("min"); auto max = node.get_attribute("max"); - PDPD_OP_VALIDATION_CHECK(node, max >= min, "clip: max value must greater than min value!"); + PADDLE_OP_CHECK(node, max >= min, "clip: max value must greater than min value!"); return node.default_single_output_mapping({std::make_shared(data, min, max)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/concat.cpp b/src/frontends/paddle/src/op/concat.cpp similarity index 92% rename from src/frontends/paddlepaddle/src/op/concat.cpp rename to src/frontends/paddle/src/op/concat.cpp index 512df6cc806..df744e8ae99 100644 --- a/src/frontends/paddlepaddle/src/op/concat.cpp +++ b/src/frontends/paddle/src/op/concat.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs concat(const NodeContext& node) { auto data = node.get_ng_inputs("X"); @@ -17,6 +17,6 @@ NamedOutputs concat(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/conv2d.cpp b/src/frontends/paddle/src/op/conv2d.cpp similarity index 90% rename from src/frontends/paddlepaddle/src/op/conv2d.cpp rename to src/frontends/paddle/src/op/conv2d.cpp index cd71b250fcd..5c2ff481024 100644 --- a/src/frontends/paddlepaddle/src/op/conv2d.cpp +++ b/src/frontends/paddle/src/op/conv2d.cpp @@ -7,13 +7,13 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs conv2d(const NodeContext& node) { return conv2d_base(node); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/conv2d_transpose.cpp b/src/frontends/paddle/src/op/conv2d_transpose.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/conv2d_transpose.cpp rename to src/frontends/paddle/src/op/conv2d_transpose.cpp index e0f8db7655f..3ddf75a7d72 100644 --- a/src/frontends/paddlepaddle/src/op/conv2d_transpose.cpp +++ b/src/frontends/paddle/src/op/conv2d_transpose.cpp @@ -9,13 +9,13 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs conv2d_transpose(const NodeContext& node) { return conv2d_base(node); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/conv2d_utils.cpp b/src/frontends/paddle/src/op/conv2d_utils.cpp similarity index 96% rename from src/frontends/paddlepaddle/src/op/conv2d_utils.cpp rename to src/frontends/paddle/src/op/conv2d_utils.cpp index 7155b896514..e22bd823a28 100644 --- a/src/frontends/paddlepaddle/src/op/conv2d_utils.cpp +++ b/src/frontends/paddle/src/op/conv2d_utils.cpp @@ -10,7 +10,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { ov::op::PadType get_auto_pad(const NodeContext& node) { // Default value means use explicitly provided padding values. @@ -59,7 +59,7 @@ std::pair get_pads(const NodeContext& node, cons std::pair get_pads(const NodeContext& node) { const auto data_rank = node.get_ng_input("Input").get_partial_shape().rank(); - PDPD_ASSERT(data_rank.get_length() > 2, "the rank of conv input must > 2"); + PADDLE_OP_CHECK(node, data_rank.get_length() > 2, "the rank of conv input must > 2"); const auto data_spatial_dims = data_rank.get_length() - 2; return get_pads(node, data_spatial_dims); @@ -84,6 +84,6 @@ std::shared_ptr get_reshaped_filter(const Output& filters, const int } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/conv2d_utils.hpp b/src/frontends/paddle/src/op/conv2d_utils.hpp similarity index 95% rename from src/frontends/paddlepaddle/src/op/conv2d_utils.hpp rename to src/frontends/paddle/src/op/conv2d_utils.hpp index 6ed034231f5..5cf6c553c76 100644 --- a/src/frontends/paddlepaddle/src/op/conv2d_utils.hpp +++ b/src/frontends/paddle/src/op/conv2d_utils.hpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { ov::op::PadType get_auto_pad(const NodeContext& node); std::pair get_pads(const NodeContext& node); @@ -28,7 +28,7 @@ NamedOutputs conv2d_base(const NodeContext& node) { const auto groups = node.get_attribute("groups"); const auto data_format = node.get_attribute("data_format"); // TODO Support Other data layout #55423 - PDPD_ASSERT(data_format == "NCHW", "conv2d only supports NCHW now"); + PADDLE_OP_CHECK(node, data_format == "NCHW", "conv2d only supports NCHW now"); if (groups > 1) { const auto reshaped_filters = get_reshaped_filter(filters, groups); @@ -54,6 +54,6 @@ NamedOutputs conv2d_base(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/cumsum.cpp b/src/frontends/paddle/src/op/cumsum.cpp similarity index 96% rename from src/frontends/paddlepaddle/src/op/cumsum.cpp rename to src/frontends/paddle/src/op/cumsum.cpp index a56724f0bf5..4d55bdb567c 100644 --- a/src/frontends/paddlepaddle/src/op/cumsum.cpp +++ b/src/frontends/paddle/src/op/cumsum.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs cumsum(const NodeContext& node) { const auto x = node.get_ng_input("X"); @@ -32,6 +32,6 @@ NamedOutputs cumsum(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/deformable_conv.cpp b/src/frontends/paddle/src/op/deformable_conv.cpp similarity index 98% rename from src/frontends/paddlepaddle/src/op/deformable_conv.cpp rename to src/frontends/paddle/src/op/deformable_conv.cpp index 93b03e9d9ec..ea569d002ac 100644 --- a/src/frontends/paddlepaddle/src/op/deformable_conv.cpp +++ b/src/frontends/paddle/src/op/deformable_conv.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs deformable_conv(const NodeContext& node) { auto input = node.get_ng_input("Input"); @@ -62,6 +62,6 @@ NamedOutputs deformable_conv(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/dropout.cpp b/src/frontends/paddle/src/op/dropout.cpp similarity index 79% rename from src/frontends/paddlepaddle/src/op/dropout.cpp rename to src/frontends/paddle/src/op/dropout.cpp index fd4d5546184..40206acadf2 100644 --- a/src/frontends/paddlepaddle/src/op/dropout.cpp +++ b/src/frontends/paddle/src/op/dropout.cpp @@ -8,15 +8,14 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs dropout(const NodeContext& node) { auto data = node.get_ng_input("X"); auto dropout_implementation = node.get_attribute("dropout_implementation"); - PDPD_OP_VALIDATION_CHECK( - node, - (dropout_implementation == "downgrade_in_infer" || dropout_implementation == "upscale_in_train"), - "Unsupported dropout mode!"); + PADDLE_OP_CHECK(node, + (dropout_implementation == "downgrade_in_infer" || dropout_implementation == "upscale_in_train"), + "Unsupported dropout mode!"); if (dropout_implementation == "downgrade_in_infer") { auto dropout_prob = ov::opset6::Constant::create(ov::element::f32, {1}, {1 - node.get_attribute("dropout_prob")}); @@ -28,6 +27,6 @@ NamedOutputs dropout(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/elementwise_ops.cpp b/src/frontends/paddle/src/op/elementwise_ops.cpp similarity index 85% rename from src/frontends/paddlepaddle/src/op/elementwise_ops.cpp rename to src/frontends/paddle/src/op/elementwise_ops.cpp index d62707771fd..948089a33c3 100644 --- a/src/frontends/paddlepaddle/src/op/elementwise_ops.cpp +++ b/src/frontends/paddle/src/op/elementwise_ops.cpp @@ -9,7 +9,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { template NamedOutputs elementwise_ops(const NodeContext& node) { @@ -18,8 +18,8 @@ NamedOutputs elementwise_ops(const NodeContext& node) { auto axis = node.get_attribute("axis"); - PDPD_OP_VALIDATION_CHECK(node, x.get_partial_shape().rank().is_static(), "elementwise_ops: X rank must be static!"); - PDPD_OP_VALIDATION_CHECK(node, y.get_partial_shape().rank().is_static(), "elementwise_ops: Y rank must be static!"); + PADDLE_OP_CHECK(node, x.get_partial_shape().rank().is_static(), "elementwise_ops: X rank must be static!"); + PADDLE_OP_CHECK(node, y.get_partial_shape().rank().is_static(), "elementwise_ops: Y rank must be static!"); int64_t x_rank = x.get_partial_shape().rank().get_length(); int64_t y_rank = y.get_partial_shape().rank().get_length(); @@ -32,8 +32,7 @@ NamedOutputs elementwise_ops(const NodeContext& node) { for (int64_t i = y_rank + axis; i < x_rank; i++) indices.push_back(i); - auto indices_node = - default_opset::Constant::create(ngraph::element::i64, ngraph::Shape{indices.size()}, indices); + auto indices_node = default_opset::Constant::create(ov::element::i64, ov::Shape{indices.size()}, indices); auto y_node = std::make_shared(y, indices_node); return node.default_single_output_mapping({std::make_shared(x, y_node)}, {"Out"}); } @@ -77,6 +76,6 @@ NamedOutputs elementwise_greater_equal(const NodeContext& node_context) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/embedding.cpp b/src/frontends/paddle/src/op/embedding.cpp similarity index 98% rename from src/frontends/paddlepaddle/src/op/embedding.cpp rename to src/frontends/paddle/src/op/embedding.cpp index e8cfe77f4e8..180ed514e78 100644 --- a/src/frontends/paddlepaddle/src/op/embedding.cpp +++ b/src/frontends/paddle/src/op/embedding.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { using namespace opset8; using namespace element; @@ -51,6 +51,6 @@ NamedOutputs embedding(const NodeContext& node) { return node.default_single_output_mapping({node_embedding}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/exp.cpp b/src/frontends/paddle/src/op/exp.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/exp.cpp rename to src/frontends/paddle/src/op/exp.cpp index 4af6f32a6b1..0ab58108185 100644 --- a/src/frontends/paddlepaddle/src/op/exp.cpp +++ b/src/frontends/paddle/src/op/exp.cpp @@ -8,13 +8,13 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs exp(const NodeContext& node) { auto data = node.get_ng_input("X"); return node.default_single_output_mapping({std::make_shared(data)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/expand_v2.cpp b/src/frontends/paddle/src/op/expand_v2.cpp similarity index 96% rename from src/frontends/paddlepaddle/src/op/expand_v2.cpp rename to src/frontends/paddle/src/op/expand_v2.cpp index 3cc59b41778..1a00ea2f5b1 100644 --- a/src/frontends/paddlepaddle/src/op/expand_v2.cpp +++ b/src/frontends/paddle/src/op/expand_v2.cpp @@ -5,11 +5,10 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs expand_v2(const NodeContext& node) { auto x = node.get_ng_input("X"); @@ -46,6 +45,6 @@ NamedOutputs expand_v2(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/fill_any_like.cpp b/src/frontends/paddle/src/op/fill_any_like.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/fill_any_like.cpp rename to src/frontends/paddle/src/op/fill_any_like.cpp index af52a2581b4..bdd1bec34dd 100644 --- a/src/frontends/paddlepaddle/src/op/fill_any_like.cpp +++ b/src/frontends/paddle/src/op/fill_any_like.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs fill_any_like(const NodeContext& node) { const auto x = node.get_ng_input("X"); @@ -27,7 +27,7 @@ NamedOutputs fill_any_like(const NodeContext& node) { std::any_of(supported_type.begin(), supported_type.end(), [dtype](const element::Type& type) { return dtype == type; }); - PDPD_ASSERT(valid_type, "fill_any_like only supports i32, i64, f16, f32, f64"); + PADDLE_OP_CHECK(node, valid_type, "fill_any_like only supports i32, i64, f16, f32, f64"); const auto value_node = default_opset::Constant::create(dtype, {1}, {value}); const auto shape_node = std::make_shared(x); @@ -36,6 +36,6 @@ NamedOutputs fill_any_like(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/fill_constant.cpp b/src/frontends/paddle/src/op/fill_constant.cpp similarity index 84% rename from src/frontends/paddlepaddle/src/op/fill_constant.cpp rename to src/frontends/paddle/src/op/fill_constant.cpp index 0983bbabe41..450d8d902c7 100644 --- a/src/frontends/paddlepaddle/src/op/fill_constant.cpp +++ b/src/frontends/paddle/src/op/fill_constant.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs fill_constant(const NodeContext& node) { auto shape = node.get_attribute>("shape"); @@ -27,11 +27,12 @@ NamedOutputs fill_constant(const NodeContext& node) { int64_t value = static_cast(node.get_attribute("value")); value_node = opset6::Constant::create(dtype, {1}, {value}); } else { - PDPD_ASSERT(false, "fill_constant only supports i32, f32, i64"); + PADDLE_OP_CHECK(node, false, "fill_constant only supports i32, f32, i64"); } - PDPD_ASSERT(shape.size() > 0 || node.has_ng_input("ShapeTensor") || node.has_ng_input("ShapeTensorList"), - "fill_constant shape not set"); + PADDLE_OP_CHECK(node, + shape.size() > 0 || node.has_ng_input("ShapeTensor") || node.has_ng_input("ShapeTensorList"), + "fill_constant shape not set"); if (node.has_ng_input("ShapeTensor")) { shape_node = node.get_ng_input("ShapeTensor"); @@ -47,6 +48,6 @@ NamedOutputs fill_constant(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/fill_constant_batch_size_like.cpp b/src/frontends/paddle/src/op/fill_constant_batch_size_like.cpp similarity index 98% rename from src/frontends/paddlepaddle/src/op/fill_constant_batch_size_like.cpp rename to src/frontends/paddle/src/op/fill_constant_batch_size_like.cpp index e0f4c5728b0..8cc5592ccbc 100644 --- a/src/frontends/paddlepaddle/src/op/fill_constant_batch_size_like.cpp +++ b/src/frontends/paddle/src/op/fill_constant_batch_size_like.cpp @@ -7,11 +7,10 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { static std::shared_ptr get_val(int32_t idx, const Output& data) { auto startsNode = ov::opset6::Constant::create(element::i32, {1}, {idx}); @@ -120,6 +119,6 @@ NamedOutputs fill_constant_batch_size_like(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/flatten_contiguous_range.cpp b/src/frontends/paddle/src/op/flatten_contiguous_range.cpp similarity index 98% rename from src/frontends/paddlepaddle/src/op/flatten_contiguous_range.cpp rename to src/frontends/paddle/src/op/flatten_contiguous_range.cpp index 7a395248b1d..a079944f424 100644 --- a/src/frontends/paddlepaddle/src/op/flatten_contiguous_range.cpp +++ b/src/frontends/paddle/src/op/flatten_contiguous_range.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs flatten_contiguous_range(const NodeContext& node) { auto x_node = node.get_ng_input("X"); @@ -42,6 +42,6 @@ NamedOutputs flatten_contiguous_range(const NodeContext& node) { {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/gelu.cpp b/src/frontends/paddle/src/op/gelu.cpp similarity index 93% rename from src/frontends/paddlepaddle/src/op/gelu.cpp rename to src/frontends/paddle/src/op/gelu.cpp index a5ed25ec636..de8e3ff0fa0 100644 --- a/src/frontends/paddlepaddle/src/op/gelu.cpp +++ b/src/frontends/paddle/src/op/gelu.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs gelu(const NodeContext& node) { const auto data = node.get_ng_input("X"); @@ -18,6 +18,6 @@ NamedOutputs gelu(const NodeContext& node) { return node.default_single_output_mapping({std::make_shared(data, mode)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/hard_sigmoid.cpp b/src/frontends/paddle/src/op/hard_sigmoid.cpp similarity index 90% rename from src/frontends/paddlepaddle/src/op/hard_sigmoid.cpp rename to src/frontends/paddle/src/op/hard_sigmoid.cpp index fa993e5e444..29dabfb23ef 100644 --- a/src/frontends/paddlepaddle/src/op/hard_sigmoid.cpp +++ b/src/frontends/paddle/src/op/hard_sigmoid.cpp @@ -5,11 +5,10 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs hard_sigmoid(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -22,6 +21,6 @@ NamedOutputs hard_sigmoid(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/hard_swish.cpp b/src/frontends/paddle/src/op/hard_swish.cpp similarity index 64% rename from src/frontends/paddlepaddle/src/op/hard_swish.cpp rename to src/frontends/paddle/src/op/hard_swish.cpp index 196705e0496..6261c66d950 100644 --- a/src/frontends/paddlepaddle/src/op/hard_swish.cpp +++ b/src/frontends/paddle/src/op/hard_swish.cpp @@ -5,30 +5,31 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs hard_swish(const NodeContext& node) { auto data = node.get_ng_input("X"); if (node.has_attribute("threshold")) { auto threshold = node.get_attribute("threshold"); - PDPD_ASSERT(std::abs(threshold - 6.0) < 0.001, "hard_swish: Only threshold = 6.0 is currently supported"); + PADDLE_OP_CHECK(node, + std::abs(threshold - 6.0) < 0.001, + "hard_swish: Only threshold = 6.0 is currently supported"); } if (node.has_attribute("scale")) { auto scale = node.get_attribute("scale"); - PDPD_ASSERT(std::abs(scale - 6.0) < 0.001, "hard_swish: Only scale = 6.0 is currently supported"); + PADDLE_OP_CHECK(node, std::abs(scale - 6.0) < 0.001, "hard_swish: Only scale = 6.0 is currently supported"); } if (node.has_attribute("offset")) { auto offset = node.get_attribute("offset"); - PDPD_ASSERT(std::abs(offset - 3.0) < 0.001, "hard_swish: Only offset = 3.0 is currently supported"); + PADDLE_OP_CHECK(node, std::abs(offset - 3.0) < 0.001, "hard_swish: Only offset = 3.0 is currently supported"); } return node.default_single_output_mapping({std::make_shared(data)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/interp.cpp b/src/frontends/paddle/src/op/interp.cpp similarity index 99% rename from src/frontends/paddlepaddle/src/op/interp.cpp rename to src/frontends/paddle/src/op/interp.cpp index 7831ce06389..a8dd145bea4 100644 --- a/src/frontends/paddlepaddle/src/op/interp.cpp +++ b/src/frontends/paddle/src/op/interp.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { using namespace default_opset; @@ -173,6 +173,6 @@ NamedOutputs bicubic_interp_v2(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/layer_norm.cpp b/src/frontends/paddle/src/op/layer_norm.cpp similarity index 90% rename from src/frontends/paddlepaddle/src/op/layer_norm.cpp rename to src/frontends/paddle/src/op/layer_norm.cpp index 12599ad99bc..a4cdc6c25bd 100644 --- a/src/frontends/paddlepaddle/src/op/layer_norm.cpp +++ b/src/frontends/paddle/src/op/layer_norm.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs layer_norm(const NodeContext& node) { using namespace default_opset; @@ -16,8 +16,8 @@ NamedOutputs layer_norm(const NodeContext& node) { const auto epsilon = node.get_attribute("epsilon", 1e-05); const auto begin_norm_axis = node.get_attribute("begin_norm_axis", 1); // The limitation from: - // https://github.com/PaddlePaddle/Paddle/blob/cec36ea6ff16fda90c1a004c6e043cd9b2096a2a/paddle/fluid/operators/layer_norm_op.cc#L176 - PDPD_ASSERT(begin_norm_axis > 0, "begin_norm_axis should be greater than 0"); + // https://github.com/paddle/Paddle/blob/cec36ea6ff16fda90c1a004c6e043cd9b2096a2a/paddle/fluid/operators/layer_norm_op.cc#L176 + PADDLE_OP_CHECK(node, begin_norm_axis > 0, "begin_norm_axis should be greater than 0"); // shape of input const auto shape_of_node = std::make_shared(data); @@ -54,6 +54,6 @@ NamedOutputs layer_norm(const NodeContext& node) { return node.default_single_output_mapping({result}, {"Y"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/leakyrelu.cpp b/src/frontends/paddle/src/op/leakyrelu.cpp similarity index 93% rename from src/frontends/paddlepaddle/src/op/leakyrelu.cpp rename to src/frontends/paddle/src/op/leakyrelu.cpp index 4a54ccb5173..641360c40da 100644 --- a/src/frontends/paddlepaddle/src/op/leakyrelu.cpp +++ b/src/frontends/paddle/src/op/leakyrelu.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs leaky_relu(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -16,6 +16,6 @@ NamedOutputs leaky_relu(const NodeContext& node) { return node.default_single_output_mapping({std::make_shared(data, alpha)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/log.cpp b/src/frontends/paddle/src/op/log.cpp similarity index 84% rename from src/frontends/paddlepaddle/src/op/log.cpp rename to src/frontends/paddle/src/op/log.cpp index a38e802cb5a..305a60bb7cf 100644 --- a/src/frontends/paddlepaddle/src/op/log.cpp +++ b/src/frontends/paddle/src/op/log.cpp @@ -5,17 +5,16 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs log(const NodeContext& node) { auto x = node.get_ng_input("X"); return node.default_single_output_mapping({std::make_shared(x)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/logical_not.cpp b/src/frontends/paddle/src/op/logical_not.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/logical_not.cpp rename to src/frontends/paddle/src/op/logical_not.cpp index f182be450ec..473f5a1ea55 100644 --- a/src/frontends/paddlepaddle/src/op/logical_not.cpp +++ b/src/frontends/paddle/src/op/logical_not.cpp @@ -8,13 +8,13 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs logical_not(const NodeContext& node) { auto data = node.get_ng_input("X"); return node.default_single_output_mapping({std::make_shared(data)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/lstm.cpp b/src/frontends/paddle/src/op/lstm.cpp similarity index 98% rename from src/frontends/paddlepaddle/src/op/lstm.cpp rename to src/frontends/paddle/src/op/lstm.cpp index da8c93a2cff..02184a28b79 100644 --- a/src/frontends/paddlepaddle/src/op/lstm.cpp +++ b/src/frontends/paddle/src/op/lstm.cpp @@ -5,12 +5,11 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" -#include "pdpd_utils.hpp" +#include "paddle_utils.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { namespace { // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INPUT NODES PARSING ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -154,7 +153,7 @@ struct LSTMAttributes { } // namespace NamedOutputs lstm(const NodeContext& node) { auto mode = node.get_attribute("mode"); - PDPD_ASSERT(mode == "LSTM", "RNN only support LSTM now"); + PADDLE_OP_CHECK(node, mode == "LSTM", "RNN only support LSTM now"); auto prev_inputs = node.get_ng_inputs("Input"); Output prev_output = prev_inputs[0]; LSTMAttributes attrs(node); @@ -188,6 +187,6 @@ NamedOutputs lstm(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/matmul.cpp b/src/frontends/paddle/src/op/matmul.cpp similarity index 95% rename from src/frontends/paddlepaddle/src/op/matmul.cpp rename to src/frontends/paddle/src/op/matmul.cpp index 37a4a69eeb5..f5638bd5d51 100644 --- a/src/frontends/paddlepaddle/src/op/matmul.cpp +++ b/src/frontends/paddle/src/op/matmul.cpp @@ -7,7 +7,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs matmul(const NodeContext& node) { auto x = node.get_ng_input("X"); @@ -25,6 +25,6 @@ NamedOutputs matmul(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/matmul_v2.cpp b/src/frontends/paddle/src/op/matmul_v2.cpp similarity index 94% rename from src/frontends/paddlepaddle/src/op/matmul_v2.cpp rename to src/frontends/paddle/src/op/matmul_v2.cpp index f4e414a2d0c..1f88ec1bda4 100644 --- a/src/frontends/paddlepaddle/src/op/matmul_v2.cpp +++ b/src/frontends/paddle/src/op/matmul_v2.cpp @@ -7,7 +7,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs matmul_v2(const NodeContext& node) { const auto x = node.get_ng_input("X"); @@ -19,6 +19,6 @@ NamedOutputs matmul_v2(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/matrix_nms.cpp b/src/frontends/paddle/src/op/matrix_nms.cpp similarity index 85% rename from src/frontends/paddlepaddle/src/op/matrix_nms.cpp rename to src/frontends/paddle/src/op/matrix_nms.cpp index 12a7df7cd1d..246722f306e 100644 --- a/src/frontends/paddlepaddle/src/op/matrix_nms.cpp +++ b/src/frontends/paddle/src/op/matrix_nms.cpp @@ -4,11 +4,10 @@ #include #include "openvino/opsets/opset8.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs matrix_nms(const NodeContext& node) { using namespace opset8; @@ -31,8 +30,9 @@ NamedOutputs matrix_nms(const NodeContext& node) { } auto out_names = node.get_output_names(); - PDPD_ASSERT(out_names.size() == 3 || out_names.size() == 2, - "Unexpected number of outputs of MatrixNMS: " + std::to_string(out_names.size())); + PADDLE_OP_CHECK(node, + out_names.size() == 3 || out_names.size() == 2, + "Unexpected number of outputs of MatrixNMS: " + std::to_string(out_names.size())); element::Type type_num = i32; bool return_rois_num = true; @@ -44,8 +44,9 @@ NamedOutputs matrix_nms(const NodeContext& node) { } auto type_index = node.get_out_port_type("Index"); - PDPD_ASSERT((type_index == i32 || type_index == i64) && (type_num == i32 || type_num == i64), - "Unexpected data type of outputs of MatrixNMS"); + PADDLE_OP_CHECK(node, + (type_index == i32 || type_index == i64) && (type_num == i32 || type_num == i64), + "Unexpected data type of outputs of MatrixNMS"); auto normalized = node.get_attribute("normalized"); @@ -82,6 +83,6 @@ NamedOutputs matrix_nms(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/multiclass_nms.cpp b/src/frontends/paddle/src/op/multiclass_nms.cpp similarity index 84% rename from src/frontends/paddlepaddle/src/op/multiclass_nms.cpp rename to src/frontends/paddle/src/op/multiclass_nms.cpp index 88871e680ea..b5a45ae721e 100644 --- a/src/frontends/paddlepaddle/src/op/multiclass_nms.cpp +++ b/src/frontends/paddle/src/op/multiclass_nms.cpp @@ -4,11 +4,10 @@ #include #include "openvino/opsets/opset8.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs multiclass_nms(const NodeContext& node) { using namespace opset8; @@ -25,12 +24,13 @@ NamedOutputs multiclass_nms(const NodeContext& node) { auto nms_eta = node.get_attribute("nms_eta"); auto out_names = node.get_output_names(); - PDPD_ASSERT(out_names.size() == 3, "Unexpected number of outputs of MulticlassNMS"); + PADDLE_OP_CHECK(node, out_names.size() == 3, "Unexpected number of outputs of MulticlassNMS"); auto type_index = node.get_out_port_type("Index"); auto type_num = node.get_out_port_type("NmsRoisNum"); - PDPD_ASSERT((type_index == i32 || type_index == i64) && (type_num == i32 || type_num == i64), - "Unexpected data type of outputs of MulticlassNMS: " + std::to_string(out_names.size())); + PADDLE_OP_CHECK(node, + (type_index == i32 || type_index == i64) && (type_num == i32 || type_num == i64), + "Unexpected data type of outputs of MulticlassNMS: " + std::to_string(out_names.size())); auto normalized = node.get_attribute("normalized"); @@ -64,6 +64,6 @@ NamedOutputs multiclass_nms(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/pad3d.cpp b/src/frontends/paddle/src/op/pad3d.cpp similarity index 84% rename from src/frontends/paddlepaddle/src/op/pad3d.cpp rename to src/frontends/paddle/src/op/pad3d.cpp index 8a2a8f1b555..22a07ecac61 100644 --- a/src/frontends/paddlepaddle/src/op/pad3d.cpp +++ b/src/frontends/paddle/src/op/pad3d.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs pad3d(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -18,18 +18,18 @@ NamedOutputs pad3d(const NodeContext& node) { auto paddings = std::vector(6, 0); - // padding of type int feature only supported by PaddlePaddle 'develop' + // padding of type int feature only supported by paddle 'develop' // version(>=2.1.0) if (node.has_attribute>("paddings")) { auto paddings_vector = node.get_attribute>("paddings"); - PDPD_OP_VALIDATION_CHECK(node, paddings_vector.size() == 6, "paddings Params size should be 6 in pad3d!"); + PADDLE_OP_CHECK(node, paddings_vector.size() == 6, "paddings Params size should be 6 in pad3d!"); paddings = paddings_vector; } else if (node.has_attribute("paddings")) { auto padding_int = node.get_attribute("paddings"); for (int i = 0; i < 6; i++) paddings[i] = padding_int; } else { - PDPD_OP_VALIDATION_CHECK(node, false, "Unsupported paddings attribute!"); + PADDLE_OP_CHECK(node, false, "Unsupported paddings attribute!"); } auto pads_begin = std::vector(5, 0); @@ -49,7 +49,7 @@ NamedOutputs pad3d(const NodeContext& node) { } else if (mode == "replicate") { pad_mode = ov::op::PadMode::EDGE; } else { - PDPD_OP_VALIDATION_CHECK(node, false, "Unsupported 3d paddings mode: [" + mode + "]"); + PADDLE_OP_CHECK(node, false, "Unsupported 3d paddings mode: [" + mode + "]"); } if (data_format == "NCDHW") { @@ -67,7 +67,7 @@ NamedOutputs pad3d(const NodeContext& node) { pads_begin[1] = paddings[4]; // front pads_end[1] = paddings[5]; // back } else { - PDPD_OP_VALIDATION_CHECK(node, false, "Unsupported 3d paddings data_format: [" + data_format + "]"); + PADDLE_OP_CHECK(node, false, "Unsupported 3d paddings data_format: [" + data_format + "]"); } padding_begin = ov::opset6::Constant::create(element::i32, ov::Shape{pads_begin.size()}, pads_begin); @@ -83,6 +83,6 @@ NamedOutputs pad3d(const NodeContext& node) { {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/pool2d.cpp b/src/frontends/paddle/src/op/pool2d.cpp similarity index 94% rename from src/frontends/paddlepaddle/src/op/pool2d.cpp rename to src/frontends/paddle/src/op/pool2d.cpp index d7606aa85bb..23b3efa363e 100644 --- a/src/frontends/paddlepaddle/src/op/pool2d.cpp +++ b/src/frontends/paddle/src/op/pool2d.cpp @@ -11,7 +11,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { // helper func - get pad_begin and pad_end static void get_paddings(const NodeContext& node, ov::Shape& pad_begin, ov::Shape& pad_end, ov::op::PadType& auto_pad) { @@ -73,13 +73,13 @@ NamedOutputs pool2d(const NodeContext& node) { pooling_type = "max"; } - PDPD_ASSERT((pooling_type == "max") || (pooling_type == "avg"), "pool2d: not supported pooling type !"); - PDPD_ASSERT(kernel_shape.size() == 1 || kernel_shape.size() == 2, "pool2d: ksize must be 1 or 2!"); + PADDLE_OP_CHECK(node, (pooling_type == "max") || (pooling_type == "avg"), "pool2d: not supported pooling type !"); + PADDLE_OP_CHECK(node, kernel_shape.size() == 1 || kernel_shape.size() == 2, "pool2d: ksize must be 1 or 2!"); PartialShape input_shape = data.get_partial_shape(); int32_t input_rank = input_shape.rank().get_length(); - PDPD_ASSERT(input_rank >= 2, "input tensor rank must be greater than 2"); + PADDLE_OP_CHECK(node, input_rank >= 2, "input tensor rank must be greater than 2"); auto auto_pad = ov::op::PadType::EXPLICIT; ov::Shape pad_begin, pad_end; @@ -139,7 +139,7 @@ NamedOutputs pool2d(const NodeContext& node) { kernel_w = kernel_shape[1]; } - PDPD_ASSERT(kernel_h > 0 && kernel_w > 0, "pool2d kernel shape must be greater than 0"); + PADDLE_OP_CHECK(node, kernel_h > 0 && kernel_w > 0, "pool2d kernel shape must be greater than 0"); // Note: this shape check is only valid when the spatial dim of input_shape // is static. @@ -181,6 +181,6 @@ NamedOutputs pool2d(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/pow.cpp b/src/frontends/paddle/src/op/pow.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/pow.cpp rename to src/frontends/paddle/src/op/pow.cpp index 22cbc91461e..5784005d2c3 100644 --- a/src/frontends/paddlepaddle/src/op/pow.cpp +++ b/src/frontends/paddle/src/op/pow.cpp @@ -4,11 +4,10 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs pow(const NodeContext& node) { auto x = node.get_ng_input("X"); @@ -26,6 +25,6 @@ NamedOutputs pow(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/prior_box.cpp b/src/frontends/paddle/src/op/prior_box.cpp similarity index 98% rename from src/frontends/paddlepaddle/src/op/prior_box.cpp rename to src/frontends/paddle/src/op/prior_box.cpp index 1801f8fd59f..4d3302ae926 100644 --- a/src/frontends/paddlepaddle/src/op/prior_box.cpp +++ b/src/frontends/paddle/src/op/prior_box.cpp @@ -10,7 +10,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { using namespace default_opset; using namespace element; @@ -65,6 +65,6 @@ NamedOutputs prior_box(const NodeContext& node) { return outputs; } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/range.cpp b/src/frontends/paddle/src/op/range.cpp similarity index 95% rename from src/frontends/paddlepaddle/src/op/range.cpp rename to src/frontends/paddle/src/op/range.cpp index 9ebeff92d06..741e08cc8b7 100644 --- a/src/frontends/paddlepaddle/src/op/range.cpp +++ b/src/frontends/paddle/src/op/range.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs range(const NodeContext& node) { auto start = node.get_ng_input("Start"); @@ -27,6 +27,6 @@ NamedOutputs range(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/relu.cpp b/src/frontends/paddle/src/op/relu.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/relu.cpp rename to src/frontends/paddle/src/op/relu.cpp index 9277594cc9a..108db9d4435 100644 --- a/src/frontends/paddlepaddle/src/op/relu.cpp +++ b/src/frontends/paddle/src/op/relu.cpp @@ -8,13 +8,13 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs relu(const NodeContext& node) { return node.default_single_output_mapping({std::make_shared(node.get_ng_input("X"))}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/relu6.cpp b/src/frontends/paddle/src/op/relu6.cpp similarity index 86% rename from src/frontends/paddlepaddle/src/op/relu6.cpp rename to src/frontends/paddle/src/op/relu6.cpp index 2c37f0ac81f..d24660c951d 100644 --- a/src/frontends/paddlepaddle/src/op/relu6.cpp +++ b/src/frontends/paddle/src/op/relu6.cpp @@ -5,11 +5,10 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs relu6(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -18,6 +17,6 @@ NamedOutputs relu6(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/reshape2.cpp b/src/frontends/paddle/src/op/reshape2.cpp similarity index 94% rename from src/frontends/paddlepaddle/src/op/reshape2.cpp rename to src/frontends/paddle/src/op/reshape2.cpp index 20e2bccea10..fcdcf8086b2 100644 --- a/src/frontends/paddlepaddle/src/op/reshape2.cpp +++ b/src/frontends/paddle/src/op/reshape2.cpp @@ -5,11 +5,10 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs reshape2(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -38,6 +37,6 @@ NamedOutputs reshape2(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/rnn.cpp b/src/frontends/paddle/src/op/rnn.cpp similarity index 62% rename from src/frontends/paddlepaddle/src/op/rnn.cpp rename to src/frontends/paddle/src/op/rnn.cpp index 0e779c168de..ebb552ca623 100644 --- a/src/frontends/paddlepaddle/src/op/rnn.cpp +++ b/src/frontends/paddle/src/op/rnn.cpp @@ -5,23 +5,23 @@ #include #include "openvino/opsets/opset6.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs lstm(const NodeContext& node); NamedOutputs rnn(const NodeContext& node) { auto mode = node.get_attribute("mode"); - PDPD_ASSERT(mode == "LSTM", - "[Paddle Frontend]RNN Only Supports LSTM Ops Conversion now, don't " - "support " + - mode); + PADDLE_OP_CHECK(node, + mode == "LSTM", + "[Paddle Frontend]RNN Only Supports LSTM Ops Conversion now, don't " + "support " + + mode); return lstm(node); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/scale.cpp b/src/frontends/paddle/src/op/scale.cpp similarity index 93% rename from src/frontends/paddlepaddle/src/op/scale.cpp rename to src/frontends/paddle/src/op/scale.cpp index da8dd865178..6098e1b60fd 100644 --- a/src/frontends/paddlepaddle/src/op/scale.cpp +++ b/src/frontends/paddle/src/op/scale.cpp @@ -8,12 +8,12 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs scale(const NodeContext& node) { auto data = node.get_ng_input("X"); auto dtype = data.get_element_type(); - // Note: PDPD Scale output data_type is the same with input + // Note: paddle Scale output data_type is the same with input Output scale; Output bias; @@ -45,6 +45,6 @@ NamedOutputs scale(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/shape.cpp b/src/frontends/paddle/src/op/shape.cpp similarity index 92% rename from src/frontends/paddlepaddle/src/op/shape.cpp rename to src/frontends/paddle/src/op/shape.cpp index b2594edcba3..98e5d518c5e 100644 --- a/src/frontends/paddlepaddle/src/op/shape.cpp +++ b/src/frontends/paddle/src/op/shape.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs shape(const NodeContext& node) { auto data = node.get_ng_input("Input"); @@ -17,6 +17,6 @@ NamedOutputs shape(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/sigmoid.cpp b/src/frontends/paddle/src/op/sigmoid.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/sigmoid.cpp rename to src/frontends/paddle/src/op/sigmoid.cpp index b4145c68695..b11154143ef 100644 --- a/src/frontends/paddlepaddle/src/op/sigmoid.cpp +++ b/src/frontends/paddle/src/op/sigmoid.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs sigmoid(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -16,6 +16,6 @@ NamedOutputs sigmoid(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/slice.cpp b/src/frontends/paddle/src/op/slice.cpp similarity index 95% rename from src/frontends/paddlepaddle/src/op/slice.cpp rename to src/frontends/paddle/src/op/slice.cpp index 873695a8fc8..e739a96be79 100644 --- a/src/frontends/paddlepaddle/src/op/slice.cpp +++ b/src/frontends/paddle/src/op/slice.cpp @@ -10,7 +10,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { using namespace default_opset; NamedOutputs slice(const NodeContext& node) { @@ -80,9 +80,9 @@ NamedOutputs slice(const NodeContext& node) { // according to paddle slice_op, when all axes are decreased, output shape is [1], instead of scalar. // Ref: paddle/fluid/operators/slice_op.h PartialShape input_shape = data.get_partial_shape(); - PDPD_OP_VALIDATION_CHECK(node, - input_shape.rank().is_static(), - "input rank of slice must be static when decrease_axis is set."); + PADDLE_OP_CHECK(node, + input_shape.rank().is_static(), + "input rank of slice must be static when decrease_axis is set."); auto squeeze_index_node = Constant::create(element::i32, {decrease_axis.size()}, decrease_axis); auto decreased_node = std::make_shared(stride_slice_node, squeeze_index_node); @@ -101,6 +101,6 @@ NamedOutputs slice(const NodeContext& node) { return node.default_single_output_mapping({stride_slice_node}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/softmax.cpp b/src/frontends/paddle/src/op/softmax.cpp similarity index 80% rename from src/frontends/paddlepaddle/src/op/softmax.cpp rename to src/frontends/paddle/src/op/softmax.cpp index b70dd6971d9..bb19d209900 100644 --- a/src/frontends/paddlepaddle/src/op/softmax.cpp +++ b/src/frontends/paddle/src/op/softmax.cpp @@ -8,19 +8,19 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs softmax(const NodeContext& node) { auto data = node.get_ng_input("X"); auto axis = node.get_attribute("axis"); if (axis < 0) { - PDPD_OP_VALIDATION_CHECK(node, data.get_partial_shape().rank().is_static(), "Softmax rank must be static"); + PADDLE_OP_CHECK(node, data.get_partial_shape().rank().is_static(), "Softmax rank must be static"); auto data_rank = data.get_partial_shape().rank().get_length(); axis = data_rank + axis; } return node.default_single_output_mapping({std::make_shared(data, axis)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/softplus.cpp b/src/frontends/paddle/src/op/softplus.cpp similarity index 85% rename from src/frontends/paddlepaddle/src/op/softplus.cpp rename to src/frontends/paddle/src/op/softplus.cpp index 35e2ec3f132..0d7c5b85cc2 100644 --- a/src/frontends/paddlepaddle/src/op/softplus.cpp +++ b/src/frontends/paddle/src/op/softplus.cpp @@ -7,7 +7,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs softplus(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -18,11 +18,11 @@ NamedOutputs softplus(const NodeContext& node) { const float EPSINON = 1e-6; if (!(abs(beta - supported_beta) <= EPSINON) || !(abs(threshold - supported_threshold) <= EPSINON)) { - PDPD_OP_VALIDATION_CHECK(node, false, "only support beta==1.0 && threshold==20.0"); + PADDLE_OP_CHECK(node, false, "only support beta==1.0 && threshold==20.0"); } return node.default_single_output_mapping({std::make_shared(data)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/split.cpp b/src/frontends/paddle/src/op/split.cpp similarity index 83% rename from src/frontends/paddlepaddle/src/op/split.cpp rename to src/frontends/paddle/src/op/split.cpp index 843f2bb6df5..9364399e091 100644 --- a/src/frontends/paddlepaddle/src/op/split.cpp +++ b/src/frontends/paddle/src/op/split.cpp @@ -5,11 +5,10 @@ #include #include "openvino/opsets/opset7.hpp" -#include "paddlepaddle_frontend/utility.hpp" namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs split(const NodeContext& node) { using namespace opset7; @@ -35,8 +34,9 @@ NamedOutputs split(const NodeContext& node) { auto inputs = node.get_ng_inputs("SectionsTensorList"); sections_node = std::make_shared(inputs, 0); } else { - PDPD_ASSERT(node.has_attribute>("sections"), - "split: num==0 && no sections is invalid."); + PADDLE_OP_CHECK(node, + node.has_attribute>("sections"), + "split: num==0 && no sections is invalid."); auto sections = node.get_attribute>("sections"); sections_node = Constant::create(element::i32, {sections.size()}, sections); } @@ -46,16 +46,16 @@ NamedOutputs split(const NodeContext& node) { } auto out_names = node.get_output_names(); - PDPD_ASSERT(out_names.size() == 1, "Unexpected number of outputs"); + PADDLE_OP_CHECK(node, out_names.size() == 1, "Unexpected number of outputs"); auto it = std::find(out_names.begin(), out_names.end(), "Out"); - PDPD_ASSERT(it != out_names.end(), "Expected output not found"); + PADDLE_OP_CHECK(node, it != out_names.end(), "Expected output not found"); for (const auto& split_output : split_outputs) { named_outputs[*it].push_back(split_output); } return named_outputs; } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/squeeze.cpp b/src/frontends/paddle/src/op/squeeze.cpp similarity index 95% rename from src/frontends/paddlepaddle/src/op/squeeze.cpp rename to src/frontends/paddle/src/op/squeeze.cpp index 2c3472df019..af83f9575e3 100644 --- a/src/frontends/paddlepaddle/src/op/squeeze.cpp +++ b/src/frontends/paddle/src/op/squeeze.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs squeeze(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -27,6 +27,6 @@ NamedOutputs squeeze(const NodeContext& node) { return node.default_single_output_mapping(out, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/stack.cpp b/src/frontends/paddle/src/op/stack.cpp similarity index 68% rename from src/frontends/paddlepaddle/src/op/stack.cpp rename to src/frontends/paddle/src/op/stack.cpp index 1b11e3715f6..92535d46728 100644 --- a/src/frontends/paddlepaddle/src/op/stack.cpp +++ b/src/frontends/paddle/src/op/stack.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { using namespace default_opset; NamedOutputs stack(const NodeContext& node) { @@ -20,15 +20,14 @@ NamedOutputs stack(const NodeContext& node) { auto axis_const = std::make_shared(element::i64, Shape{}, axis); if (data_shape.rank().is_static()) - PDPD_OP_VALIDATION_CHECK( - node, - (axis >= -(data_shape.rank().get_length() + 1)) && axis < (data_shape.rank().get_length() + 1), - "axis range is [-(R+1), R+1)!"); + PADDLE_OP_CHECK(node, + (axis >= -(data_shape.rank().get_length() + 1)) && axis < (data_shape.rank().get_length() + 1), + "axis range is [-(R+1), R+1)!"); for (const auto& data : datas) { - PDPD_OP_VALIDATION_CHECK(node, - data_type == data.get_element_type(), - "stack input tensor must have the same data types!"); + PADDLE_OP_CHECK(node, + data_type == data.get_element_type(), + "stack input tensor must have the same data types!"); node_datas_reshape.push_back(std::make_shared(data, axis_const)); } @@ -36,6 +35,6 @@ NamedOutputs stack(const NodeContext& node) { return node.default_single_output_mapping({std::make_shared(node_datas_reshape, axis)}, {"Y"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/tanh.cpp b/src/frontends/paddle/src/op/tanh.cpp similarity index 91% rename from src/frontends/paddlepaddle/src/op/tanh.cpp rename to src/frontends/paddle/src/op/tanh.cpp index 96ead42fa1b..2b90b5340a3 100644 --- a/src/frontends/paddlepaddle/src/op/tanh.cpp +++ b/src/frontends/paddle/src/op/tanh.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs tanh(const NodeContext& node) { const auto x = node.get_ng_input("X"); @@ -17,6 +17,6 @@ NamedOutputs tanh(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/transpose2.cpp b/src/frontends/paddle/src/op/transpose2.cpp similarity index 93% rename from src/frontends/paddlepaddle/src/op/transpose2.cpp rename to src/frontends/paddle/src/op/transpose2.cpp index 2c2464fd01b..051e0c95bcd 100644 --- a/src/frontends/paddlepaddle/src/op/transpose2.cpp +++ b/src/frontends/paddle/src/op/transpose2.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs transpose2(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -18,6 +18,6 @@ NamedOutputs transpose2(const NodeContext& node) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/unsqueeze.cpp b/src/frontends/paddle/src/op/unsqueeze.cpp similarity index 95% rename from src/frontends/paddlepaddle/src/op/unsqueeze.cpp rename to src/frontends/paddle/src/op/unsqueeze.cpp index 0d4a583b5a8..0791aa0e349 100644 --- a/src/frontends/paddlepaddle/src/op/unsqueeze.cpp +++ b/src/frontends/paddle/src/op/unsqueeze.cpp @@ -8,7 +8,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { NamedOutputs unsqueeze(const NodeContext& node) { auto data = node.get_ng_input("X"); @@ -25,6 +25,6 @@ NamedOutputs unsqueeze(const NodeContext& node) { return node.default_single_output_mapping({std::make_shared(data, axesNode)}, {"Out"}); } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op/yolo_box.cpp b/src/frontends/paddle/src/op/yolo_box.cpp similarity index 99% rename from src/frontends/paddlepaddle/src/op/yolo_box.cpp rename to src/frontends/paddle/src/op/yolo_box.cpp index 4393304ef99..2b63bf47586 100644 --- a/src/frontends/paddlepaddle/src/op/yolo_box.cpp +++ b/src/frontends/paddle/src/op/yolo_box.cpp @@ -9,7 +9,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { using namespace opset6; using namespace element; @@ -36,7 +36,7 @@ NamedOutputs yolo_box(const NodeContext& node_context) { int32_t class_num = node_context.get_attribute("class_num"); auto const_class_num = Constant::create(i64, {1}, {class_num}); - // PDPD anchors attribute is of type int32. Convert to float for computing + // Paddle anchors attribute is of type int32. Convert to float for computing // convinient. auto _anchors = node_context.get_attribute>("anchors"); std::vector anchors(_anchors.begin(), _anchors.end()); @@ -324,6 +324,6 @@ NamedOutputs yolo_box(const NodeContext& node_context) { } } // namespace op -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp similarity index 99% rename from src/frontends/paddlepaddle/src/op_table.cpp rename to src/frontends/paddle/src/op_table.cpp index e5c508c3237..aecdcbf4fc0 100644 --- a/src/frontends/paddlepaddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -5,7 +5,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { namespace op { #define OP_CONVERTER(op) NamedOutputs op(const NodeContext& node) OP_CONVERTER(argmax); @@ -150,6 +150,6 @@ std::map get_supported_ops() { {"yolo_box", op::yolo_box}}; }; -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/op_table.hpp b/src/frontends/paddle/src/op_table.hpp similarity index 90% rename from src/frontends/paddlepaddle/src/op_table.hpp rename to src/frontends/paddle/src/op_table.hpp index b24f2f94180..7654ed6521e 100644 --- a/src/frontends/paddlepaddle/src/op_table.hpp +++ b/src/frontends/paddle/src/op_table.hpp @@ -12,11 +12,11 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { using CreatorFunction = std::function; std::map get_supported_ops(); -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/pdpd_fw_node.cpp b/src/frontends/paddle/src/paddle_fw_node.cpp similarity index 72% rename from src/frontends/paddlepaddle/src/pdpd_fw_node.cpp rename to src/frontends/paddle/src/paddle_fw_node.cpp index b4e43718790..4d3fc17f3d1 100644 --- a/src/frontends/paddlepaddle/src/pdpd_fw_node.cpp +++ b/src/frontends/paddle/src/paddle_fw_node.cpp @@ -2,13 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include namespace ov { namespace frontend { - -void PDPDFrameworkNode::validate_and_infer_types() { - FrameworkNode::validate_and_infer_types(); +namespace paddle { +void FrameworkNode::validate_and_infer_types() { + ov::op::util::FrameworkNode::validate_and_infer_types(); size_t idx = 0; for (const auto& port_pair : m_decoder.get_output_type_map()) { for (const auto& p_type : port_pair.second) { @@ -17,7 +17,7 @@ void PDPDFrameworkNode::validate_and_infer_types() { } } -std::map PDPDFrameworkNode::get_named_inputs() const { +std::map FrameworkNode::get_named_inputs() const { return m_decoder.map_for_each_input([&](const std::string& name, size_t) { auto it = std::find(m_inputs_names.begin(), m_inputs_names.end(), name); if (it != m_inputs_names.end()) { @@ -28,11 +28,12 @@ std::map PDPDFrameworkNode::get_named_inputs() const }); } -std::map PDPDFrameworkNode::return_named_outputs() { +std::map FrameworkNode::return_named_outputs() { return m_decoder.map_for_each_output([&](const std::string&, size_t idx) { return output(idx); }); } +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/pdpd_fw_node.hpp b/src/frontends/paddle/src/paddle_fw_node.hpp similarity index 63% rename from src/frontends/paddlepaddle/src/pdpd_fw_node.hpp rename to src/frontends/paddle/src/paddle_fw_node.hpp index 81ede7fa33e..859402ab9e3 100644 --- a/src/frontends/paddlepaddle/src/pdpd_fw_node.hpp +++ b/src/frontends/paddle/src/paddle_fw_node.hpp @@ -10,14 +10,13 @@ namespace ov { namespace frontend { -class PDPDFrameworkNode : public ov::op::util::FrameworkNode { +namespace paddle { +class FrameworkNode : public ov::op::util::FrameworkNode { public: - OPENVINO_OP("PDPDFrameworkNode", "util", ov::op::util::FrameworkNode); + OPENVINO_OP("FrameworkNode", "util", ov::op::util::FrameworkNode); - PDPDFrameworkNode(const DecoderPDPDProto& decoder, - const OutputVector& inputs, - const std::vector& inputs_names) - : FrameworkNode(inputs, decoder.get_output_size()), + FrameworkNode(const DecoderProto& decoder, const OutputVector& inputs, const std::vector& inputs_names) + : ov::op::util::FrameworkNode(inputs, decoder.get_output_size()), m_decoder{decoder}, m_inputs_names{inputs_names} { ov::op::util::FrameworkNodeAttrs attrs; @@ -30,14 +29,14 @@ public: void validate_and_infer_types() override; std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override { - return std::make_shared(m_decoder, inputs, m_inputs_names); + return std::make_shared(m_decoder, inputs, m_inputs_names); } std::string get_op_type() const { return m_decoder.get_op_type(); } - const DecoderPDPDProto& get_decoder() const { + const DecoderProto& get_decoder() const { return m_decoder; } @@ -46,8 +45,9 @@ public: std::map return_named_outputs(); private: - const DecoderPDPDProto m_decoder; + const DecoderProto m_decoder; std::vector m_inputs_names; }; +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/pdpd_utils.cpp b/src/frontends/paddle/src/paddle_utils.cpp similarity index 66% rename from src/frontends/paddlepaddle/src/pdpd_utils.cpp rename to src/frontends/paddle/src/paddle_utils.cpp index 5545de213bb..68508799b1e 100644 --- a/src/frontends/paddlepaddle/src/pdpd_utils.cpp +++ b/src/frontends/paddle/src/paddle_utils.cpp @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "pdpd_utils.hpp" +#include "paddle_utils.hpp" -std::shared_ptr ov::frontend::pdpd::reorder_axes(const ov::Output& value, - std::vector axes_order) { +std::shared_ptr ov::frontend::paddle::reorder_axes(const ov::Output& value, + std::vector axes_order) { const auto axes_order_const = std::make_shared(element::i64, Shape{axes_order.size()}, diff --git a/src/frontends/paddlepaddle/src/pdpd_utils.hpp b/src/frontends/paddle/src/paddle_utils.hpp similarity index 96% rename from src/frontends/paddlepaddle/src/pdpd_utils.hpp rename to src/frontends/paddle/src/paddle_utils.hpp index 5fcaaa6c188..e098425a778 100644 --- a/src/frontends/paddlepaddle/src/pdpd_utils.hpp +++ b/src/frontends/paddle/src/paddle_utils.hpp @@ -9,7 +9,7 @@ namespace ov { namespace frontend { -namespace pdpd { +namespace paddle { #ifdef _WIN32 const char PATH_SEPARATOR = '\\'; # if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) @@ -40,6 +40,6 @@ bool endsWith(const std::basic_string& str, const std::basic_string& suffi } std::shared_ptr reorder_axes(const Output& value, std::vector axes_order); -} // namespace pdpd +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/place.cpp b/src/frontends/paddle/src/place.cpp similarity index 60% rename from src/frontends/paddlepaddle/src/place.cpp rename to src/frontends/paddle/src/place.cpp index 59a3e571ee2..62b71986dc1 100644 --- a/src/frontends/paddlepaddle/src/place.cpp +++ b/src/frontends/paddle/src/place.cpp @@ -2,15 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "paddlepaddle_frontend/place.hpp" +#include "place.hpp" #include "decoder.hpp" #include "framework.pb.h" -using namespace ov; -using namespace frontend; +namespace ov { +namespace frontend { +namespace paddle { -bool PlacePDPD::is_input() const { +bool Place::is_input() const { const auto& model_ins = m_input_model.get_inputs(); const auto cmp = [this](const Place::Ptr& p) { @@ -19,7 +20,7 @@ bool PlacePDPD::is_input() const { return std::find_if(model_ins.begin(), model_ins.end(), cmp) != model_ins.end(); } -bool PlacePDPD::is_output() const { +bool Place::is_output() const { const auto& model_outs = m_input_model.get_outputs(); const auto cmp = [this](const Place::Ptr& p) { return p.get() == this; @@ -27,81 +28,80 @@ bool PlacePDPD::is_output() const { return std::find_if(model_outs.begin(), model_outs.end(), cmp) != model_outs.end(); } -OpPlacePDPD::OpPlacePDPD(const InputModel& input_model, - const paddle::framework::proto::OpDesc& op_desc, - const std::vector& names) - : PlacePDPD(input_model, names), +OpPlace::OpPlace(const ov::frontend::InputModel& input_model, + const ::paddle::framework::proto::OpDesc& op_desc, + const std::vector& names) + : Place(input_model, names), m_op_desc(op_desc) {} -OpPlacePDPD::OpPlacePDPD(const InputModel& input_model, const paddle::framework::proto::OpDesc& op_desc) - : OpPlacePDPD(input_model, op_desc, {}) {} +OpPlace::OpPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::OpDesc& op_desc) + : OpPlace(input_model, op_desc, {}) {} -const std::map>>& OpPlacePDPD::get_output_ports() const { +const std::map>>& OpPlace::get_output_ports() const { return m_output_ports; } -const std::map>>& OpPlacePDPD::get_input_ports() const { +const std::map>>& OpPlace::get_input_ports() const { return m_input_ports; } -std::shared_ptr OpPlacePDPD::get_output_port_pdpd(const std::string& outputName, - int outputPortIndex) const { +std::shared_ptr OpPlace::get_output_port_paddle(const std::string& outputName, + int outputPortIndex) const { FRONT_END_GENERAL_CHECK(outputPortIndex <= m_output_ports.at(outputName).size(), "outputPortIndex is out of bounds."); return m_output_ports.at(outputName)[outputPortIndex]; } -std::shared_ptr OpPlacePDPD::get_input_port_pdpd(const std::string& inputName, - int inputPortIndex) const { +std::shared_ptr OpPlace::get_input_port_paddle(const std::string& inputName, int inputPortIndex) const { FRONT_END_GENERAL_CHECK(inputPortIndex <= m_input_ports.at(inputName).size(), "inputPortIndex is out of bounds."); return m_input_ports.at(inputName)[inputPortIndex]; } -const paddle::framework::proto::OpDesc& OpPlacePDPD::get_desc() const { +const ::paddle::framework::proto::OpDesc& OpPlace::get_desc() const { return m_op_desc; } -void OpPlacePDPD::add_out_port(const std::shared_ptr& output, const std::string& name) { +void OpPlace::add_out_port(const std::shared_ptr& output, const std::string& name) { m_output_ports[name].push_back(output); } -void OpPlacePDPD::add_in_port(const std::shared_ptr& input, const std::string& name) { +void OpPlace::add_in_port(const std::shared_ptr& input, const std::string& name) { m_input_ports[name].push_back(input); } -Place::Ptr OpPlacePDPD::get_output_port(const std::string& name) const { +Place::Ptr OpPlace::get_output_port(const std::string& name) const { FRONT_END_GENERAL_CHECK(m_output_ports.at(name).size() == 1, "Only one output port should exist."); return m_output_ports.at(name)[0]; } -Place::Ptr OpPlacePDPD::get_input_port(const std::string& name) const { +Place::Ptr OpPlace::get_input_port(const std::string& name) const { FRONT_END_GENERAL_CHECK(m_input_ports.at(name).size() == 1, "Only one input port should exist."); return m_input_ports.at(name)[0]; } -Place::Ptr OpPlacePDPD::get_input_port(int outputPortIndex) const { +Place::Ptr OpPlace::get_input_port(int outputPortIndex) const { FRONT_END_GENERAL_CHECK(m_input_ports.size() == 1, "Only one named input port should exist."); return m_input_ports.begin()->second[outputPortIndex]; } -Place::Ptr OpPlacePDPD::get_output_port(int outputPortIndex) const { +Place::Ptr OpPlace::get_output_port(int outputPortIndex) const { FRONT_END_GENERAL_CHECK(m_output_ports.size() == 1, "Only one named output port should exist."); return m_output_ports.begin()->second[outputPortIndex]; } -Place::Ptr OpPlacePDPD::get_output_port() const { +Place::Ptr OpPlace::get_output_port() const { FRONT_END_GENERAL_CHECK(m_output_ports.size() == 1 && m_output_ports.begin()->second.size() == 1, "Only one output port should exist."); return m_output_ports.begin()->second[0]; } -Place::Ptr OpPlacePDPD::get_input_port() const { +Place::Ptr OpPlace::get_input_port() const { FRONT_END_GENERAL_CHECK(m_input_ports.size() == 1 && m_input_ports.begin()->second.size() == 1, "Only one input port should exist."); return m_input_ports.begin()->second[0]; } -std::vector OpPlacePDPD::get_consuming_operations() const { +std::vector OpPlace::get_consuming_operations() const { std::vector consuming_ops; for (const auto& out_port : m_output_ports) { for (const auto& out_port_place : out_port.second) { @@ -112,20 +112,20 @@ std::vector OpPlacePDPD::get_consuming_operations() const { return consuming_ops; } -std::vector OpPlacePDPD::get_consuming_operations(const std::string& outputPortName, - int outputPortIndex) const { +std::vector OpPlace::get_consuming_operations(const std::string& outputPortName, + int outputPortIndex) const { return get_output_port(outputPortName, outputPortIndex)->get_consuming_operations(); } -std::vector OpPlacePDPD::get_consuming_operations(int outputPortIndex) const { +std::vector OpPlace::get_consuming_operations(int outputPortIndex) const { return get_output_port(outputPortIndex)->get_consuming_operations(); } -std::vector OpPlacePDPD::get_consuming_operations(const std::string& outputPortName) const { +std::vector OpPlace::get_consuming_operations(const std::string& outputPortName) const { return get_output_port(outputPortName)->get_consuming_operations(); } -std::vector OpPlacePDPD::get_consuming_ports() const { +std::vector OpPlace::get_consuming_ports() const { std::vector consuming_ports; for (const auto& out_port : m_output_ports) { for (const auto& out_port_place : out_port.second) { @@ -136,82 +136,83 @@ std::vector OpPlacePDPD::get_consuming_ports() const { return consuming_ports; } -Place::Ptr OpPlacePDPD::get_output_port(const std::string& outputName, int outputPortIndex) const { +Place::Ptr OpPlace::get_output_port(const std::string& outputName, int outputPortIndex) const { FRONT_END_GENERAL_CHECK(outputPortIndex <= m_output_ports.at(outputName).size(), "outputPortIndex is Out of bounds."); return m_output_ports.at(outputName)[outputPortIndex]; } -Place::Ptr OpPlacePDPD::get_input_port(const std::string& inputName, int inputPortIndex) const { +Place::Ptr OpPlace::get_input_port(const std::string& inputName, int inputPortIndex) const { FRONT_END_GENERAL_CHECK(inputPortIndex <= m_input_ports.at(inputName).size(), "inputPortIndex is out of bounds."); return m_input_ports.at(inputName)[inputPortIndex]; } -Place::Ptr OpPlacePDPD::get_source_tensor() const { +Place::Ptr OpPlace::get_source_tensor() const { return get_input_port()->get_source_tensor(); } -Place::Ptr OpPlacePDPD::get_source_tensor(const std::string& inputName) const { +Place::Ptr OpPlace::get_source_tensor(const std::string& inputName) const { return get_input_port(inputName)->get_source_tensor(); } -Place::Ptr OpPlacePDPD::get_source_tensor(int inputPortIndex) const { +Place::Ptr OpPlace::get_source_tensor(int inputPortIndex) const { return get_input_port(inputPortIndex)->get_source_tensor(); } -Place::Ptr OpPlacePDPD::get_source_tensor(const std::string& inputName, int inputPortIndex) const { +Place::Ptr OpPlace::get_source_tensor(const std::string& inputName, int inputPortIndex) const { return get_input_port(inputName, inputPortIndex)->get_source_tensor(); } -Place::Ptr OpPlacePDPD::get_target_tensor() const { +Place::Ptr OpPlace::get_target_tensor() const { return get_output_port()->get_target_tensor(); } -Place::Ptr OpPlacePDPD::get_target_tensor(const std::string& outputName) const { +Place::Ptr OpPlace::get_target_tensor(const std::string& outputName) const { return get_output_port(outputName)->get_target_tensor(); } -Place::Ptr OpPlacePDPD::get_target_tensor(const std::string& outputName, int outputPortIndex) const { +Place::Ptr OpPlace::get_target_tensor(const std::string& outputName, int outputPortIndex) const { return get_output_port(outputName, outputPortIndex)->get_target_tensor(); } -Place::Ptr OpPlacePDPD::get_producing_operation(const std::string& inputName) const { +Place::Ptr OpPlace::get_producing_operation(const std::string& inputName) const { return get_input_port(inputName)->get_producing_operation(); } -Place::Ptr OpPlacePDPD::get_producing_operation(const std::string& inputName, int inputPortIndex) const { +Place::Ptr OpPlace::get_producing_operation(const std::string& inputName, int inputPortIndex) const { return get_input_port(inputName, inputPortIndex)->get_producing_operation(); } -Place::Ptr OpPlacePDPD::get_producing_operation() const { +Place::Ptr OpPlace::get_producing_operation() const { return get_input_port()->get_producing_operation(); } -Place::Ptr OpPlacePDPD::get_producing_operation(int inputPortIndex) const { +Place::Ptr OpPlace::get_producing_operation(int inputPortIndex) const { return get_input_port(inputPortIndex)->get_producing_operation(); } -Place::Ptr OpPlacePDPD::get_target_tensor(int outputPortIndex) const { +Place::Ptr OpPlace::get_target_tensor(int outputPortIndex) const { return get_output_port(outputPortIndex)->get_target_tensor(); } -TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model, - const std::vector& names, - const paddle::framework::proto::VarDesc& var_desc) - : PlacePDPD(input_model, names), +TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model, + const std::vector& names, + const ::paddle::framework::proto::VarDesc& var_desc) + : Place(input_model, names), m_var_desc(var_desc) { const auto& var_type = var_desc.type(); - if (var_type.type() == paddle::framework::proto::VarType::LOD_TENSOR) { + if (var_type.type() == ::paddle::framework::proto::VarType::LOD_TENSOR) { const auto& tensor_desc = var_type.lod_tensor().tensor(); m_type = TYPE_MAP[tensor_desc.data_type()]; m_pshape = PartialShape(std::vector(tensor_desc.dims().begin(), tensor_desc.dims().end())); } } -TensorPlacePDPD::TensorPlacePDPD(const InputModel& input_model, const paddle::framework::proto::VarDesc& var_desc) - : TensorPlacePDPD(input_model, {var_desc.name()}, var_desc) {} +TensorPlace::TensorPlace(const ov::frontend::InputModel& input_model, + const ::paddle::framework::proto::VarDesc& var_desc) + : TensorPlace(input_model, {var_desc.name()}, var_desc) {} -std::vector TensorPlacePDPD::get_consuming_ports() const { +std::vector TensorPlace::get_consuming_ports() const { std::vector consuming_ports; for (const auto& consuming_port : m_consuming_ports) { if (const auto& locked = consuming_port.lock()) { @@ -223,7 +224,7 @@ std::vector TensorPlacePDPD::get_consuming_ports() const { return consuming_ports; } -Place::Ptr TensorPlacePDPD::get_producing_port() const { +Place::Ptr TensorPlace::get_producing_port() const { FRONT_END_GENERAL_CHECK(m_producing_ports.size() == 1, "Only one producing port is supported."); if (const auto& producing_port = m_producing_ports[0].lock()) { return producing_port; @@ -231,19 +232,19 @@ Place::Ptr TensorPlacePDPD::get_producing_port() const { FRONT_END_THROW("Producing Port has expired."); } -void TensorPlacePDPD::add_producing_port(const std::shared_ptr& out_port) { +void TensorPlace::add_producing_port(const std::shared_ptr& out_port) { m_producing_ports.push_back(out_port); } -void TensorPlacePDPD::add_consuming_port(const std::shared_ptr& in_port) { +void TensorPlace::add_consuming_port(const std::shared_ptr& in_port) { m_consuming_ports.push_back(in_port); } -const paddle::framework::proto::VarDesc& TensorPlacePDPD::get_desc() const { +const ::paddle::framework::proto::VarDesc& TensorPlace::get_desc() const { return m_var_desc; } -std::vector TensorPlacePDPD::get_consuming_operations() const { +std::vector TensorPlace::get_consuming_operations() const { std::vector consuming_ops; for (const auto& consuming_port : m_consuming_ports) { if (auto port_ptr = consuming_port.lock()) { @@ -256,7 +257,7 @@ std::vector TensorPlacePDPD::get_consuming_operations() const { return consuming_ops; } -bool TensorPlacePDPD::is_equal_data(const Place::Ptr& another) const { +bool TensorPlace::is_equal_data(const Place::Ptr& another) const { auto consuming_ports = get_consuming_ports(); bool eq_to_consuming_port = std::any_of(consuming_ports.begin(), consuming_ports.end(), [&another](const Ptr& place) { @@ -265,93 +266,97 @@ bool TensorPlacePDPD::is_equal_data(const Place::Ptr& another) const { return is_equal(another) || get_producing_port()->is_equal(another) || eq_to_consuming_port; } -Place::Ptr TensorPlacePDPD::get_producing_operation() const { +Place::Ptr TensorPlace::get_producing_operation() const { return get_producing_port()->get_producing_operation(); } -std::shared_ptr InPortPlacePDPD::get_source_tensor_pdpd() const { +std::shared_ptr InPortPlace::get_source_tensor_paddle() const { if (const auto& tensor = m_source_tensor.lock()) { return tensor; } FRONT_END_THROW("Source Tensor has expired."); } -std::shared_ptr InPortPlacePDPD::get_op() { +std::shared_ptr InPortPlace::get_op() { if (const auto& op = m_op.lock()) { return op; } FRONT_END_THROW("Operation has expired."); } -void InPortPlacePDPD::set_source_tensor(const std::weak_ptr& source_tensor) { +void InPortPlace::set_source_tensor(const std::weak_ptr& source_tensor) { m_source_tensor = source_tensor; } -std::vector InPortPlacePDPD::get_consuming_operations() const { +std::vector InPortPlace::get_consuming_operations() const { if (const auto& consuming_op = m_op.lock()) { return {consuming_op}; } FRONT_END_THROW("Operation has expired."); } -Place::Ptr InPortPlacePDPD::get_source_tensor() const { +Place::Ptr InPortPlace::get_source_tensor() const { if (const auto& tensor = m_source_tensor.lock()) { return tensor; } FRONT_END_THROW("Source Tensor has expired."); } -Place::Ptr InPortPlacePDPD::get_producing_port() const { +Place::Ptr InPortPlace::get_producing_port() const { return get_source_tensor()->get_producing_port(); } -bool InPortPlacePDPD::is_equal_data(const Place::Ptr& another) const { +bool InPortPlace::is_equal_data(const Place::Ptr& another) const { return get_source_tensor()->is_equal_data(another); } -Place::Ptr InPortPlacePDPD::get_producing_operation() const { +Place::Ptr InPortPlace::get_producing_operation() const { return get_producing_port()->get_producing_operation(); } -std::shared_ptr OutPortPlacePDPD::get_target_tensor_pdpd() const { +std::shared_ptr OutPortPlace::get_target_tensor_paddle() const { if (const auto& target_tensor = m_target_tensor.lock()) { return target_tensor; } FRONT_END_THROW("Target Tensor has expired."); } -std::vector OutPortPlacePDPD::get_consuming_operations() const { +std::vector OutPortPlace::get_consuming_operations() const { if (auto tensor_ptr = m_target_tensor.lock()) { return tensor_ptr->get_consuming_operations(); } FRONT_END_THROW("Tensor has expired."); } -void OutPortPlacePDPD::set_target_tensor(const std::weak_ptr& target_tensor) { +void OutPortPlace::set_target_tensor(const std::weak_ptr& target_tensor) { m_target_tensor = target_tensor; } -std::vector OutPortPlacePDPD::get_consuming_ports() const { +std::vector OutPortPlace::get_consuming_ports() const { if (auto tensor_ptr = m_target_tensor.lock()) { return tensor_ptr->get_consuming_ports(); } FRONT_END_THROW("Tensor has expired."); } -bool OutPortPlacePDPD::is_equal_data(const Place::Ptr& another) const { +bool OutPortPlace::is_equal_data(const Place::Ptr& another) const { return get_target_tensor()->is_equal_data(another); } -Place::Ptr OutPortPlacePDPD::get_target_tensor() const { +Place::Ptr OutPortPlace::get_target_tensor() const { if (const auto& target_tensor = m_target_tensor.lock()) { return target_tensor; } FRONT_END_THROW("Target Tensor has expired."); } -Place::Ptr OutPortPlacePDPD::get_producing_operation() const { +Place::Ptr OutPortPlace::get_producing_operation() const { if (auto op = m_op.lock()) { return op; } FRONT_END_THROW("Operation has expired."); } + +} // namespace paddle +} // namespace frontend +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/place.hpp b/src/frontends/paddle/src/place.hpp similarity index 59% rename from src/frontends/paddlepaddle/include/paddlepaddle_frontend/place.hpp rename to src/frontends/paddle/src/place.hpp index 4fdd245d005..48c49b0602f 100644 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/place.hpp +++ b/src/frontends/paddle/src/place.hpp @@ -6,7 +6,7 @@ #include -#include "exceptions.hpp" +#include "input_model.hpp" namespace paddle { namespace framework { @@ -20,18 +20,20 @@ class VarDesc; namespace ov { namespace frontend { -class TensorPlacePDPD; -class OpPlacePDPD; +namespace paddle { -class PlacePDPD : public Place { +class TensorPlace; +class OpPlace; + +class Place : public ov::frontend::Place { public: - PlacePDPD(const InputModel& input_model, const std::vector& names) + Place(const ov::frontend::InputModel& input_model, const std::vector& names) : m_input_model(input_model), m_names(names) {} - explicit PlacePDPD(const InputModel& input_model) : PlacePDPD(input_model, std::vector{}) {} + explicit Place(const ov::frontend::InputModel& input_model) : Place(input_model, std::vector{}) {} - ~PlacePDPD() override = default; + ~Place() override = default; bool is_input() const override; bool is_output() const override; @@ -44,22 +46,22 @@ public: } private: - const InputModel& m_input_model; + const ov::frontend::InputModel& m_input_model; std::vector m_names; }; -class InPortPlacePDPD : public PlacePDPD { +class InPortPlace : public Place { public: - explicit InPortPlacePDPD(const InputModel& input_model) : PlacePDPD(input_model) {} + explicit InPortPlace(const ov::frontend::InputModel& input_model) : Place(input_model) {} - void set_op(const std::weak_ptr& op) { + void set_op(const std::weak_ptr& op) { m_op = op; } - void set_source_tensor(const std::weak_ptr& source_tensor); + void set_source_tensor(const std::weak_ptr& source_tensor); // Internal usage - std::shared_ptr get_source_tensor_pdpd() const; - std::shared_ptr get_op(); + std::shared_ptr get_source_tensor_paddle() const; + std::shared_ptr get_op(); // External usage std::vector get_consuming_operations() const override; @@ -70,20 +72,20 @@ public: bool is_equal_data(const Ptr& another) const override; private: - std::weak_ptr m_source_tensor; - std::weak_ptr m_op; + std::weak_ptr m_source_tensor; + std::weak_ptr m_op; }; -class OutPortPlacePDPD : public PlacePDPD { +class OutPortPlace : public Place { public: - explicit OutPortPlacePDPD(const InputModel& input_model) : PlacePDPD(input_model) {} + explicit OutPortPlace(const ov::frontend::InputModel& input_model) : Place(input_model) {} - void set_op(const std::weak_ptr& op) { + void set_op(const std::weak_ptr& op) { m_op = op; } - void set_target_tensor(const std::weak_ptr& target_tensor); + void set_target_tensor(const std::weak_ptr& target_tensor); - std::shared_ptr get_target_tensor_pdpd() const; + std::shared_ptr get_target_tensor_paddle() const; // External usage std::vector get_consuming_operations() const override; @@ -93,27 +95,27 @@ public: bool is_equal_data(const Ptr& another) const override; private: - std::weak_ptr m_op; - std::weak_ptr m_target_tensor; + std::weak_ptr m_op; + std::weak_ptr m_target_tensor; }; -class OpPlacePDPD : public PlacePDPD { +class OpPlace : public Place { public: - OpPlacePDPD(const InputModel& input_model, - const paddle::framework::proto::OpDesc& op_desc, - const std::vector& names); + OpPlace(const ov::frontend::InputModel& input_model, + const ::paddle::framework::proto::OpDesc& op_desc, + const std::vector& names); - OpPlacePDPD(const InputModel& input_model, const paddle::framework::proto::OpDesc& op_desc); + OpPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::OpDesc& op_desc); - void add_in_port(const std::shared_ptr& input, const std::string& name); - void add_out_port(const std::shared_ptr& output, const std::string& name); + void add_in_port(const std::shared_ptr& input, const std::string& name); + void add_out_port(const std::shared_ptr& output, const std::string& name); // Internal usage - const std::map>>& get_output_ports() const; - const std::map>>& get_input_ports() const; - std::shared_ptr get_output_port_pdpd(const std::string& outputName, int outputPortIndex) const; - std::shared_ptr get_input_port_pdpd(const std::string& inputName, int inputPortIndex) const; - const paddle::framework::proto::OpDesc& get_desc() const; + const std::map>>& get_output_ports() const; + const std::map>>& get_input_ports() const; + std::shared_ptr get_output_port_paddle(const std::string& outputName, int outputPortIndex) const; + std::shared_ptr get_input_port_paddle(const std::string& inputName, int inputPortIndex) const; + const ::paddle::framework::proto::OpDesc& get_desc() const; // External API methods std::vector get_consuming_ports() const override; @@ -149,21 +151,21 @@ public: Ptr get_target_tensor(const std::string& outputName, int outputPortIndex) const override; private: - const paddle::framework::proto::OpDesc& m_op_desc; - std::map>> m_input_ports; - std::map>> m_output_ports; + const ::paddle::framework::proto::OpDesc& m_op_desc; + std::map>> m_input_ports; + std::map>> m_output_ports; }; -class TensorPlacePDPD : public PlacePDPD { +class TensorPlace : public Place { public: - TensorPlacePDPD(const InputModel& input_model, - const std::vector& names, - const paddle::framework::proto::VarDesc& var_desc); + TensorPlace(const ov::frontend::InputModel& input_model, + const std::vector& names, + const ::paddle::framework::proto::VarDesc& var_desc); - TensorPlacePDPD(const InputModel& input_model, const paddle::framework::proto::VarDesc& var_desc); + TensorPlace(const ov::frontend::InputModel& input_model, const ::paddle::framework::proto::VarDesc& var_desc); - void add_producing_port(const std::shared_ptr& out_port); - void add_consuming_port(const std::shared_ptr& in_port); + void add_producing_port(const std::shared_ptr& out_port); + void add_consuming_port(const std::shared_ptr& in_port); // Internal usage const PartialShape& get_partial_shape() const { @@ -178,7 +180,7 @@ public: void set_element_type(const element::Type& type) { m_type = type; } - const paddle::framework::proto::VarDesc& get_desc() const; + const ::paddle::framework::proto::VarDesc& get_desc() const; // External usage Ptr get_producing_operation() const override; @@ -188,13 +190,14 @@ public: bool is_equal_data(const Ptr& another) const override; private: - const paddle::framework::proto::VarDesc& m_var_desc; + const ::paddle::framework::proto::VarDesc& m_var_desc; PartialShape m_pshape; element::Type m_type; - std::vector> m_producing_ports; - std::vector> m_consuming_ports; + std::vector> m_producing_ports; + std::vector> m_consuming_ports; }; +} // namespace paddle } // namespace frontend } // namespace ov diff --git a/src/frontends/paddlepaddle/src/proto/framework.proto b/src/frontends/paddle/src/proto/framework.proto similarity index 100% rename from src/frontends/paddlepaddle/src/proto/framework.proto rename to src/frontends/paddle/src/proto/framework.proto diff --git a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/utility.hpp b/src/frontends/paddlepaddle/include/paddlepaddle_frontend/utility.hpp deleted file mode 100644 index 9ab066dc759..00000000000 --- a/src/frontends/paddlepaddle/include/paddlepaddle_frontend/utility.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2018-2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -#ifdef OPENVINO_STATIC_LIBRARY -# define PDPD_API -# define PDPD_C_API -#else -# ifdef ov_paddlepaddle_frontend_EXPORTS -# define PDPD_API OPENVINO_CORE_EXPORTS -# define PDPD_C_API OPENVINO_EXTERN_C OPENVINO_CORE_EXPORTS -# else -# define PDPD_API OPENVINO_CORE_IMPORTS -# define PDPD_C_API OPENVINO_EXTERN_C OPENVINO_CORE_IMPORTS -# endif // ov_paddlepaddle_frontend_EXPORTS -#endif // OPENVINO_STATIC_LIBRARY - -#define PDPD_ASSERT(ex, msg) \ - { \ - if (!(ex)) \ - throw std::runtime_error(msg); \ - } - -#define PDPD_THROW(msg) throw std::runtime_error(std::string("ERROR: ") + msg) - -#define NOT_IMPLEMENTED(msg) throw std::runtime_error(std::string(msg) + " is not implemented") diff --git a/src/frontends/paddlepaddle/src/exceptions.hpp b/src/frontends/paddlepaddle/src/exceptions.hpp deleted file mode 100644 index 78ce6937d83..00000000000 --- a/src/frontends/paddlepaddle/src/exceptions.hpp +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) 2021 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include - -namespace ov { -namespace frontend { -namespace pdpd { -class NodeContext; - -class OpValidationFailurePDPD : public OpValidationFailure { -public: - OpValidationFailurePDPD(const CheckLocInfo& check_loc_info, - const pdpd::NodeContext& node, - const std::string& explanation) - : OpValidationFailure(check_loc_info, get_error_msg_prefix_pdpd(node), explanation) {} - -private: - static std::string get_error_msg_prefix_pdpd(const pdpd::NodeContext& node); -}; -} // namespace pdpd -} // namespace frontend - -/// \brief Macro to check whether a boolean condition holds. -/// \param node_context Object of NodeContext class -/// \param cond Condition to check -/// \param ... Additional error message info to be added to the error message via the `<<` -/// stream-insertion operator. Note that the expressions here will be evaluated lazily, -/// i.e., only if the `cond` evalutes to `false`. -/// \throws ::ov::OpValidationFailurePDPD if `cond` is false. -#define PDPD_OP_VALIDATION_CHECK(node_context, ...) \ - OPENVINO_ASSERT_HELPER(::ov::frontend::pdpd::OpValidationFailurePDPD, (node_context), __VA_ARGS__) -} // namespace ov diff --git a/src/tests/functional/inference_engine/CMakeLists.txt b/src/tests/functional/inference_engine/CMakeLists.txt index 0cb94729dbc..0e8eae35322 100644 --- a/src/tests/functional/inference_engine/CMakeLists.txt +++ b/src/tests/functional/inference_engine/CMakeLists.txt @@ -52,7 +52,7 @@ if (NOT ENABLE_OV_ONNX_FRONTEND) list(APPEND EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/onnx_reader") endif() -if (NOT ENABLE_OV_PDPD_FRONTEND) +if (NOT ENABLE_OV_PADDLE_FRONTEND) list(APPEND EXCLUDED_SOURCE_PATHS "${CMAKE_CURRENT_SOURCE_DIR}/paddle_reader") endif() @@ -77,10 +77,10 @@ if(ENABLE_OV_ONNX_FRONTEND) add_dependencies(${TARGET_NAME} ov_onnx_frontend) endif() -if(ENABLE_OV_PDPD_FRONTEND) +if(ENABLE_OV_PADDLE_FRONTEND) target_compile_definitions(${TARGET_NAME} PRIVATE PADDLE_TEST_MODELS="${CMAKE_CURRENT_SOURCE_DIR}/paddle_reader/models/") - add_dependencies(${TARGET_NAME} ov_paddlepaddle_frontend) + add_dependencies(${TARGET_NAME} ov_paddle_frontend) endif() ie_faster_build(${TARGET_NAME} diff --git a/src/tests/functional/inference_engine/paddle_reader/read_paddle_model_test.cpp b/src/tests/functional/inference_engine/paddle_reader/read_paddle_model_test.cpp index 8bb7b222bc1..dfa0fe053b8 100644 --- a/src/tests/functional/inference_engine/paddle_reader/read_paddle_model_test.cpp +++ b/src/tests/functional/inference_engine/paddle_reader/read_paddle_model_test.cpp @@ -15,7 +15,7 @@ #include "common_test_utils/ngraph_test_utils.hpp" #include "common_test_utils/unicode_utils.hpp" -TEST(PDPD_Reader_Tests, ImportBasicModelToCore) { +TEST(Paddle_Reader_Tests, ImportBasicModelToCore) { auto model = std::string(PADDLE_TEST_MODELS) + "relu.pdmodel"; InferenceEngine::Core ie; auto cnnNetwork = ie.ReadNetwork(model); @@ -41,14 +41,14 @@ TEST(PDPD_Reader_Tests, ImportBasicModelToCore) { const auto reference = std::make_shared( ngraph::NodeVector{ result }, ngraph::ParameterVector{ data }, - "RefPDPDFunction"); + "RefPaddleFunction"); const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::NAMES); const FunctionsComparator::Result res = func_comparator(function, reference); ASSERT_TRUE(res.valid) << res.message; } #if defined(OPENVINO_ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) -TEST(PDPD_Reader_Tests, ImportBasicModelToCoreWstring) { +TEST(Paddle_Reader_Tests, ImportBasicModelToCoreWstring) { std::string win_dir_path{ PADDLE_TEST_MODELS "relu.pdmodel" }; std::wstring wmodel = CommonTestUtils::addUnicodePostfixToPath(win_dir_path, CommonTestUtils::test_unicode_postfix_vector[0]); @@ -82,7 +82,7 @@ TEST(PDPD_Reader_Tests, ImportBasicModelToCoreWstring) { const auto reference = std::make_shared( ngraph::NodeVector{ result }, ngraph::ParameterVector{ data }, - "RefPDPDFunction"); + "RefPaddleFunction"); const FunctionsComparator func_comparator = FunctionsComparator::with_default().enable(FunctionsComparator::NAMES); const FunctionsComparator::Result res = func_comparator(function, reference); ASSERT_TRUE(res.valid) << res.message; diff --git a/tests/fuzz/scripts/init_corpus.py b/tests/fuzz/scripts/init_corpus.py index e7a6ff66011..b4a0c872567 100755 --- a/tests/fuzz/scripts/init_corpus.py +++ b/tests/fuzz/scripts/init_corpus.py @@ -3,8 +3,8 @@ # SPDX-License-Identifier: Apache-2.0 # Sample usage: -# ./scripts/init_corpus.py ./pdpd_layer_models/**/*.pdmodel --join pdiparams -# mkdir -p corpus && find ./pdpd_layer_models/ -name "*.fuzz" -exec cp \{\} .//import_pdpd-corpus \; +# ./scripts/init_corpus.py ./paddle_layer_models/**/*.pdmodel --join pdiparams +# mkdir -p corpus && find ./paddle_layer_models/ -name "*.fuzz" -exec cp \{\} .//import_paddle-corpus \; import argparse import glob diff --git a/thirdparty/CMakeLists.txt b/thirdparty/CMakeLists.txt index 2e34ec79e53..84c10f8b044 100644 --- a/thirdparty/CMakeLists.txt +++ b/thirdparty/CMakeLists.txt @@ -82,7 +82,7 @@ endif() # Protobuf # -if(ENABLE_OV_PDPD_FRONTEND OR ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_TF_FRONTEND) +if(ENABLE_OV_PADDLE_FRONTEND OR ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_TF_FRONTEND) if(ENABLE_SYSTEM_PROTOBUF) set(Protobuf_USE_STATIC_LIBS ON) if(VERBOSE_BUILD) diff --git a/tools/deployment_manager/configs/darwin.json b/tools/deployment_manager/configs/darwin.json index a8b07728099..575fa4d8bc5 100644 --- a/tools/deployment_manager/configs/darwin.json +++ b/tools/deployment_manager/configs/darwin.json @@ -23,7 +23,7 @@ "runtime/lib/intel64/libov_auto_plugin.so", "runtime/lib/intel64/libov_ir_frontend.dylib", "runtime/lib/intel64/libov_onnx_frontend.dylib", - "runtime/lib/intel64/libov_paddlepaddle_frontend.dylib", + "runtime/lib/intel64/libov_paddle_frontend.dylib", "runtime/lib/intel64/plugins.xml", "runtime/3rdparty/tbb" ] diff --git a/tools/deployment_manager/configs/linux.json b/tools/deployment_manager/configs/linux.json index 2d17f1a788f..05e8ee9c983 100644 --- a/tools/deployment_manager/configs/linux.json +++ b/tools/deployment_manager/configs/linux.json @@ -29,7 +29,7 @@ "runtime/lib/intel64/libov_auto_plugin.so", "runtime/lib/intel64/libov_ir_frontend.so", "runtime/lib/intel64/libov_onnx_frontend.so", - "runtime/lib/intel64/libov_paddlepaddle_frontend.so", + "runtime/lib/intel64/libov_paddle_frontend.so", "runtime/lib/intel64/plugins.xml", "runtime/3rdparty/tbb" ] diff --git a/tools/deployment_manager/configs/windows.json b/tools/deployment_manager/configs/windows.json index 9cb8d68b379..729f2496ea8 100644 --- a/tools/deployment_manager/configs/windows.json +++ b/tools/deployment_manager/configs/windows.json @@ -23,7 +23,7 @@ "runtime/bin/intel64/Release/ov_auto_plugin.dll", "runtime/bin/intel64/Release/ov_ir_frontend.dll", "runtime/bin/intel64/Release/ov_onnx_frontend.dll", - "runtime/bin/intel64/Release/ov_paddlepaddle_frontend.dll", + "runtime/bin/intel64/Release/ov_paddle_frontend.dll", "runtime/bin/intel64/Release/plugins.xml", "runtime/3rdparty/tbb" ]