diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index e2e893b207d..dd7bede1b2c 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -16,6 +16,7 @@ #include #include "openvino/core/type/element_type_traits.hpp" #include "openvino/runtime/properties.hpp" +#include namespace MKLDNNPlugin { @@ -42,7 +43,7 @@ Config::Config() { } #endif - if (!with_cpu_x86_bfloat16()) + if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16)) enforceBF16 = false; CPU_DEBUG_CAP_ENABLE(readDebugCapsProperties()); @@ -105,7 +106,7 @@ void Config::readProperties(const std::map &prop) { IE_THROW() << "Wrong value for property key " << PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE; } else if (key == PluginConfigParams::KEY_ENFORCE_BF16) { if (val == PluginConfigParams::YES) { - if (with_cpu_x86_avx512_core()) { + if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) { enforceBF16 = true; manualEnforceBF16 = true; } else { @@ -120,7 +121,7 @@ void Config::readProperties(const std::map &prop) { } } else if (key == ov::hint::inference_precision.name()) { if (val == "bf16") { - if (with_cpu_x86_avx512_core()) { + if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) { enforceBF16 = true; manualEnforceBF16 = true; } else { diff --git a/src/plugins/intel_cpu/src/mkldnn_plugin.cpp b/src/plugins/intel_cpu/src/mkldnn_plugin.cpp index eecb6cf6f13..c460f4fee0e 100644 --- a/src/plugins/intel_cpu/src/mkldnn_plugin.cpp +++ b/src/plugins/intel_cpu/src/mkldnn_plugin.cpp @@ -121,11 +121,13 @@ # ifdef _WIN32 # include # include -# else +# elif defined(__APPLE__) # include # endif #endif +#include + using namespace MKLDNNPlugin; using namespace InferenceEngine; @@ -200,7 +202,7 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr {ngraph::element::u4, ngraph::element::u8} }; - if (!with_cpu_x86_avx512_core()) + if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) array.push_back({ngraph::element::bf16, ngraph::element::f32}); return array; @@ -504,7 +506,7 @@ static void TransformationUpToCPUSpecificOpSet(std::shared_ptr postLPTPassManager.register_pass(); postLPTPassManager.run_passes(nGraphFunc); - if (!useLpt && _enableSnippets && with_cpu_x86_avx2()) { + if (!useLpt && _enableSnippets && dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2)) { ngraph::pass::Manager tokenization_manager; tokenization_manager.register_pass(); tokenization_manager.register_pass(); @@ -656,7 +658,7 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std || Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled for the plugin */; const auto& BF16Prop = config.find(InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16); const bool enableBF16 = ((BF16Prop != config.end() && BF16Prop->second == PluginConfigParams::YES) - || engConfig.enforceBF16) && with_cpu_x86_avx512_core(); + || engConfig.enforceBF16) && dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core); const auto& modelCacheProp = config.find(InferenceEngine::PluginConfigParams::KEY_CACHE_DIR); const bool enableModelCache = (modelCacheProp != config.end() && !modelCacheProp->second.empty()) || !engConfig.cache_dir.empty(); @@ -753,20 +755,6 @@ Parameter Engine::GetConfig(const std::string& name, const std::map(regs), regs[0]); -#else - __cpuid_count(regs[0], regs[1], regs[0], regs[1], regs[2], regs[3]); -#endif - if (regs[1] & (1U << 16)) - return true; -#endif - return false; -} - Parameter Engine::GetMetricLegacy(const std::string& name, const std::map& options) const { if (name == METRIC_KEY(SUPPORTED_METRICS)) { std::vector metrics = { @@ -787,9 +775,9 @@ Parameter Engine::GetMetricLegacy(const std::string& name, const std::map capabilities; - if (with_cpu_x86_bfloat16()) + if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16)) capabilities.push_back(METRIC_VALUE(BF16)); - if (hasAVX512()) + if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_common)) capabilities.push_back(METRIC_VALUE(WINOGRAD)); capabilities.push_back(METRIC_VALUE(FP32)); capabilities.push_back(METRIC_VALUE(FP16)); @@ -855,9 +843,9 @@ Parameter Engine::GetMetric(const std::string& name, const std::map capabilities; - if (with_cpu_x86_bfloat16()) + if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16)) capabilities.push_back(METRIC_VALUE(BF16)); - if (hasAVX512()) + if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_common)) capabilities.push_back(METRIC_VALUE(WINOGRAD)); capabilities.push_back(METRIC_VALUE(FP32)); capabilities.push_back(METRIC_VALUE(FP16)); @@ -904,7 +892,8 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma const auto& lptProp = config.find(InferenceEngine::PluginConfigInternalParams::KEY_LP_TRANSFORMS_MODE); const bool enableLPT = (lptProp != config.end() && lptProp->second == PluginConfigParams::YES) /* enabled in the orig_config*/ || Config::LPTransformsMode::On == engConfig.lpTransformsMode /* or already enabled */; - const bool enableSnippets = !(conf.cache_dir.empty() || conf.enableDynamicBatch || (conf.enforceBF16 && with_cpu_x86_avx512_core())); + const bool enableSnippets = !(conf.cache_dir.empty() || conf.enableDynamicBatch || (conf.enforceBF16 + && dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core))); Transformation(clonedNetwork, enableLPT, enableSnippets, isLegacyAPI()); auto ops = clonedNetwork.getFunction()->get_ordered_ops(); std::unordered_set supported;