From c500f0a783cbb91ccac1f687d6e4b67ef440f000 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 14 May 2021 21:27:12 +0300 Subject: [PATCH] Removed CPU GEMM cmake stuff (#5642) * Removed CPU GEMM cmake stuff * Fixed code style --- inference-engine/cmake/dependencies.cmake | 56 ------------------- inference-engine/cmake/features.cmake | 17 ------ .../src/mkldnn_plugin/CMakeLists.txt | 33 ++--------- .../emitters/jit_bf16_emitters.hpp | 6 +- .../emitters/jit_eltwise_emitters.hpp | 6 +- .../emitters/jit_mkldnn_emitters.hpp | 2 +- .../mkldnn_plugin/nodes/mkldnn_conv_node.h | 2 +- .../src/mkldnn_plugin/nodes/mkldnn_mvn_node.h | 4 +- .../mkldnn_plugin/nodes/mkldnn_roll_node.h | 2 +- .../src/mkldnn_plugin/utils/ngraph_utils.hpp | 2 +- 10 files changed, 18 insertions(+), 112 deletions(-) diff --git a/inference-engine/cmake/dependencies.cmake b/inference-engine/cmake/dependencies.cmake index a9261df7964..85adfd45089 100644 --- a/inference-engine/cmake/dependencies.cmake +++ b/inference-engine/cmake/dependencies.cmake @@ -38,62 +38,6 @@ if (ENABLE_MYRIAD) include(cmake/vpu_dependencies.cmake) endif() -## enable cblas_gemm from OpenBLAS package -if (ENABLE_MKL_DNN AND GEMM STREQUAL "OPENBLAS") - if(AARCH64) - if(DEFINED ENV{THIRDPARTY_SERVER_PATH}) - set(IE_PATH_TO_DEPS "$ENV{THIRDPARTY_SERVER_PATH}") - elseif(DEFINED THIRDPARTY_SERVER_PATH) - set(IE_PATH_TO_DEPS "${THIRDPARTY_SERVER_PATH}") - else() - message(WARNING "OpenBLAS is not found!") - endif() - - if(DEFINED IE_PATH_TO_DEPS) - reset_deps_cache(OpenBLAS_DIR) - - RESOLVE_DEPENDENCY(OpenBLAS - ARCHIVE_LIN "keembay/openblas_0.3.7_yocto_kmb.tar.xz" - TARGET_PATH "${TEMP}/openblas_0.3.7_yocto_kmb" - ENVIRONMENT "OpenBLAS_DIR" - SHA256 "c75aac901d5297d6d60a4b1f941f0335d8fd7f52e0dff8c445f644e2e45e6fba") - - update_deps_cache(OpenBLAS_DIR "${OpenBLAS}/lib/cmake/openblas" "Path to OpenBLAS package folder") - - find_package(OpenBLAS QUIET) - - if(OpenBLAS_FOUND) - set(BLAS_FOUND TRUE) - set(BLAS_INCLUDE_DIRS ${OpenBLAS_INCLUDE_DIRS}) - set(BLAS_LIBRARIES ${OpenBLAS_LIBRARIES}) - endif() - - unset(IE_PATH_TO_DEPS) - endif() - endif() - - if(NOT BLAS_LIBRARIES OR NOT BLAS_INCLUDE_DIRS) - find_package(BLAS REQUIRED) - - if(BLAS_FOUND) - find_path(BLAS_INCLUDE_DIRS cblas.h) - else() - message(ERROR "OpenBLAS not found: install OpenBLAS or set -DBLAS_INCLUDE_DIRS= and -DBLAS_LIBRARIES=") - endif() - endif() - - debug_message(STATUS "openblas=" ${BLAS_LIBRARIES}) -endif () - -## MKL-ML package -if (GEMM STREQUAL "MKL") - if(NOT MKLROOT) - message(FATAL_ERROR "MKLROOT not found: install MKL and set -DMKLROOT=") - endif() - set(MKL ${MKLROOT}) - debug_message(STATUS "mkl_ml=" ${MKLROOT}) -endif () - ## Intel OMP package if (THREADING STREQUAL "OMP") reset_deps_cache(OMP) diff --git a/inference-engine/cmake/features.cmake b/inference-engine/cmake/features.cmake index db7a0028f60..6c42fe16a1e 100644 --- a/inference-engine/cmake/features.cmake +++ b/inference-engine/cmake/features.cmake @@ -8,23 +8,6 @@ ie_dependent_option (ENABLE_GNA "GNA support for inference engine" ON "NOT APPLE ie_dependent_option (ENABLE_CLDNN_TESTS "Enable clDNN unit tests" OFF "ENABLE_CLDNN" OFF) -# "MKL-DNN library might use MKL-ML or OpenBLAS for gemm tasks: MKL|OPENBLAS|JIT" -if (ENABLE_MKL_DNN) - if(AARCH64) - set(GEMM_DEFAULT "OPENBLAS") - else() - set(GEMM_DEFAULT "JIT") - endif() - set(GEMM "${GEMM_DEFAULT}" CACHE STRING "GEMM implementation") - set_property(CACHE GEMM PROPERTY STRINGS "MKL" "OPENBLAS" "JIT") - list (APPEND IE_OPTIONS GEMM) - if (NOT GEMM STREQUAL "MKL" AND - NOT GEMM STREQUAL "OPENBLAS" AND - NOT GEMM STREQUAL "JIT") - message(FATAL_ERROR "GEMM should be set to MKL, OPENBLAS or JIT. Default option is ${GEMM_DEFAULT}") - endif() -endif() - # "MKL-DNN library based on OMP or TBB or Sequential implementation: TBB|OMP|SEQ" if(X86 OR ARM OR (MSVC AND (ARM OR AARCH64)) ) set(THREADING_DEFAULT "SEQ") diff --git a/inference-engine/src/mkldnn_plugin/CMakeLists.txt b/inference-engine/src/mkldnn_plugin/CMakeLists.txt index 388a600a697..fe57b29ddf6 100644 --- a/inference-engine/src/mkldnn_plugin/CMakeLists.txt +++ b/inference-engine/src/mkldnn_plugin/CMakeLists.txt @@ -16,25 +16,16 @@ if (ENABLE_CPU_DEBUG_CAPS) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCPU_DEBUG_CAPS") endif() -file(GLOB_RECURSE SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) +file(GLOB_RECURSE SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) +file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.h + ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) -file(GLOB HEADERS - ${CMAKE_CURRENT_SOURCE_DIR}/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) - -addVersionDefines(mkldnn_plugin.cpp CI_BUILD_NUMBER MKL_VERSION) +addVersionDefines(mkldnn_plugin.cpp CI_BUILD_NUMBER) include_directories( $ ${CMAKE_CURRENT_SOURCE_DIR}/mkldnn - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_BINARY_DIR}/include -) - -if (GEMM STREQUAL "MKL") - log_rpath_from_dir(MKL "${MKL}/lib") -endif() + ${CMAKE_CURRENT_SOURCE_DIR}) # create plugin @@ -99,17 +90,3 @@ set_target_properties(${TARGET_NAME}_obj PROPERTIES EXCLUDE_FROM_ALL ON) set_target_properties(${TARGET_NAME} ${TARGET_NAME}_obj PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO}) - -# install - -if(GEMM STREQUAL "MKL") - install(DIRECTORY "${MKL}/include" - DESTINATION ${IE_CPACK_IE_DIR}/external/mkltiny_lnx - COMPONENT cpu) - install(FILES "${MKLLIB}" - DESTINATION ${IE_CPACK_IE_DIR}/external/mkltiny_lnx/lib - COMPONENT cpu) - install(FILES "${MKL}/version.info" - DESTINATION ${IE_CPACK_IE_DIR}/external/mkltiny_lnx - COMPONENT cpu) -endif() diff --git a/inference-engine/src/mkldnn_plugin/emitters/jit_bf16_emitters.hpp b/inference-engine/src/mkldnn_plugin/emitters/jit_bf16_emitters.hpp index 75ebf5b41d3..454a6c335af 100644 --- a/inference-engine/src/mkldnn_plugin/emitters/jit_bf16_emitters.hpp +++ b/inference-engine/src/mkldnn_plugin/emitters/jit_bf16_emitters.hpp @@ -13,9 +13,9 @@ public: jit_emu_vcvtneps2bf16(mkldnn::impl::cpu::x64::jit_generator* host, mkldnn::impl::cpu::x64::cpu_isa_t host_isa, const MKLDNNNode* node, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::BF16) : jit_emitter(host, host_isa, node, exec_prc) { prepare_table(); - }; + } - size_t get_inputs_num() const override { return 1; }; + size_t get_inputs_num() const override { return 1; } private: void emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs, @@ -71,4 +71,4 @@ private: size_t aux_vecs_count() const override { return 2; } }; -} // namespace MKLDNNPlugin { \ No newline at end of file +} // namespace MKLDNNPlugin \ No newline at end of file diff --git a/inference-engine/src/mkldnn_plugin/emitters/jit_eltwise_emitters.hpp b/inference-engine/src/mkldnn_plugin/emitters/jit_eltwise_emitters.hpp index 7bd63c59c02..50ddb57bad6 100644 --- a/inference-engine/src/mkldnn_plugin/emitters/jit_eltwise_emitters.hpp +++ b/inference-engine/src/mkldnn_plugin/emitters/jit_eltwise_emitters.hpp @@ -190,9 +190,11 @@ private: class jit_squared_difference_emitter : public jit_emitter { public: - jit_squared_difference_emitter(mkldnn::impl::cpu::x64::jit_generator *host, mkldnn::impl::cpu::x64::cpu_isa_t host_isa, const MKLDNNNode* node, + jit_squared_difference_emitter(mkldnn::impl::cpu::x64::jit_generator *host, mkldnn::impl::cpu::x64::cpu_isa_t host_isa, + const MKLDNNNode* node, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); - jit_squared_difference_emitter(mkldnn::impl::cpu::x64::jit_generator *host, mkldnn::impl::cpu::x64::cpu_isa_t host_isa, const std::shared_ptr& n, + jit_squared_difference_emitter(mkldnn::impl::cpu::x64::jit_generator *host, mkldnn::impl::cpu::x64::cpu_isa_t host_isa, + const std::shared_ptr& n, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); size_t get_inputs_num() const override; diff --git a/inference-engine/src/mkldnn_plugin/emitters/jit_mkldnn_emitters.hpp b/inference-engine/src/mkldnn_plugin/emitters/jit_mkldnn_emitters.hpp index e045279307a..7c40d5fd713 100644 --- a/inference-engine/src/mkldnn_plugin/emitters/jit_mkldnn_emitters.hpp +++ b/inference-engine/src/mkldnn_plugin/emitters/jit_mkldnn_emitters.hpp @@ -23,6 +23,7 @@ public: void emit_impl(const std::vector &in_idxs, const std::vector &out_idxs, const std::vector &pool_vec_idxs, const std::vector &pool_gpr_idxs, const emitter_context *emit_context = nullptr) const override {}; + protected: jit_mkldnn_emitter(mkldnn::impl::cpu::x64::jit_generator *host, mkldnn::impl::cpu::x64::cpu_isa_t host_isa, const MKLDNNNode* node, InferenceEngine::Precision exec_prc = InferenceEngine::Precision::FP32); @@ -34,7 +35,6 @@ protected: float alpha {0.f}; float beta {0.f}; -protected: std::shared_ptr> eltwise_injector_sse42; std::shared_ptr> eltwise_injector_avx2; std::shared_ptr> eltwise_injector_avx512_common; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h index a0a400dc008..d2ebc152987 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_conv_node.h @@ -59,7 +59,7 @@ protected: private: void addZeroPoints(mkldnn::primitive_attr& attr) const; - void setPostOps(mkldnn::primitive_attr &attr, bool initWeights) const ; + void setPostOps(mkldnn::primitive_attr &attr, bool initWeights) const; void filterSupportedDescriptors(); bool isPossibleToSkipInitConfig(MKLDNNDescriptor &desc) const; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h index d988c1d103c..9ce7e784e40 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.h @@ -86,11 +86,11 @@ public: inline bool getAcrossChannels() const { return acrossChannels_; - }; + } inline bool getNormalizeVariance() const { return normalizeVariance_; - }; + } bool canFuse(const MKLDNNNodePtr& node) const override; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roll_node.h b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roll_node.h index c8118caa153..da597d4d981 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roll_node.h +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roll_node.h @@ -29,7 +29,7 @@ private: void rollImpl(); std::vector shape; - const static std::vector supportedPrecisionSizes; + static const std::vector supportedPrecisionSizes; std::string layerErrorPrefix; size_t numOfDims; diff --git a/inference-engine/src/mkldnn_plugin/utils/ngraph_utils.hpp b/inference-engine/src/mkldnn_plugin/utils/ngraph_utils.hpp index d4ea7a2088b..c6b54febc5a 100644 --- a/inference-engine/src/mkldnn_plugin/utils/ngraph_utils.hpp +++ b/inference-engine/src/mkldnn_plugin/utils/ngraph_utils.hpp @@ -18,7 +18,7 @@ inline std::string getRTInfoValue(const std::map &node) { const auto &rtInfo = node->get_rt_info();