diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp index a4392d28479..b8f2a0409c0 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_plugin.cpp @@ -558,7 +558,7 @@ Engine::NetworkPerfStats Engine::NetworkMemBandwidthTolerance(const InferenceEng res.ratio_compute_deconvs = total_deconvs ? static_cast(compute_deconvs)/total_deconvs : 0; return res; } - +static bool hasAVX512(); InferenceEngine::ExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std::map &orig_config) { OV_ITT_SCOPED_TASK(itt::domains::MKLDNNPlugin, "Engine::LoadExeNetworkImpl"); @@ -624,16 +624,17 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std int num_streams; bool considerNonLimited = false; if (NetworkToleranceForLowCache.maxMemTolerance > NetworkPerfStats::memThresholdNotLimited) { - std::cout << "case 1.0" <= 12 && (NetworkToleranceForLowCache.ratio_mem_limited_convs <= NetworkPerfStats::memLimitedRatioThreshold)) { - std::cout << "case 1.2" < NetworkPerfStats::memThresholdAssumeLimitedAVX512 + && (NetworkToleranceForLowCache.ratio_mem_limited_convs <= NetworkPerfStats::memLimitedRatioThresholdAVX512)) { + std::cout << " case 1.2" <