diff --git a/src/bindings/python/tests/test_inference_engine/test_compiled_model.py b/src/bindings/python/tests/test_inference_engine/test_compiled_model.py index 60b5f44e8fc..e0bb872bf4d 100644 --- a/src/bindings/python/tests/test_inference_engine/test_compiled_model.py +++ b/src/bindings/python/tests/test_inference_engine/test_compiled_model.py @@ -29,8 +29,8 @@ def test_get_property(device): pytest.skip("Can't run on ARM plugin due-to CPU dependent test") func = core.read_model(model=test_net_xml, weights=test_net_bin) exec_net = core.compile_model(func, device) - config = exec_net.get_property("PERF_COUNT") - assert config == "NO" + profiling_enabled = exec_net.get_property("PERF_COUNT") + assert not profiling_enabled def test_get_runtime_model(device): diff --git a/src/plugins/intel_cpu/src/exec_network.cpp b/src/plugins/intel_cpu/src/exec_network.cpp index b2f20cd9020..5116c57989a 100644 --- a/src/plugins/intel_cpu/src/exec_network.cpp +++ b/src/plugins/intel_cpu/src/exec_network.cpp @@ -26,6 +26,7 @@ #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "ie_icore.hpp" #include "openvino/runtime/properties.hpp" +#include "openvino/util/common_util.hpp" #include #include @@ -310,7 +311,8 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name) if (name == ov::model_name) { // @todo Does not seem ok to 'dump()' the whole graph everytime in order to get a name - return graph.dump()->get_friendly_name(); + const std::string modelName = graph.dump()->get_friendly_name(); + return decltype(ov::model_name)::value_type(modelName); } else if (name == ov::optimal_number_of_infer_requests) { const auto streams = config.streamExecutorConfig._streams; return decltype(ov::optimal_number_of_infer_requests)::value_type(streams); // ov::optimal_number_of_infer_requests has no negative values @@ -332,19 +334,20 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name) return ov::Affinity::NONE; } else if (name == ov::inference_num_threads) { const auto num_threads = config.streamExecutorConfig._threads; - return num_threads; + return decltype(ov::inference_num_threads)::value_type(num_threads); } else if (name == ov::enable_profiling.name()) { const bool perfCount = config.collectPerfCounters; - return perfCount ? "YES" : "NO"; + return decltype(ov::enable_profiling)::value_type(perfCount); } else if (name == ov::hint::inference_precision) { const auto enforceBF16 = config.enforceBF16; - return enforceBF16 ? ov::element::bf16 : ov::element::f32; + const auto inference_precision = enforceBF16 ? ov::element::bf16 : ov::element::f32; + return decltype(ov::hint::inference_precision)::value_type(inference_precision); } else if (name == ov::hint::performance_mode) { - const auto perfHint = config.perfHintsConfig.ovPerfHint; + const auto perfHint = ov::util::from_string(config.perfHintsConfig.ovPerfHint, ov::hint::performance_mode); return perfHint; } else if (name == ov::hint::num_requests) { const auto perfHintNumRequests = config.perfHintsConfig.ovPerfHintNumRequests; - return perfHintNumRequests; + return decltype(ov::hint::num_requests)::value_type(perfHintNumRequests); } /* Internally legacy parameters are used with new API as part of migration procedure. * This fallback can be removed as soon as migration completed */ diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index b1e46577633..afe9058d4a7 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -106,6 +106,7 @@ #include #include #include "openvino/runtime/core.hpp" +#include "openvino/util/common_util.hpp" #include #include "performance_heuristics.hpp" @@ -568,6 +569,7 @@ void Engine::ApplyPerformanceHints(std::map &config, c if (mode_name == CONFIG_VALUE(LATENCY)) { config[CONFIG_KEY(CPU_THROUGHPUT_STREAMS)] = CONFIG_VALUE(CPU_THROUGHPUT_NUMA); + config[ov::num_streams.name()] = ov::util::to_string(ov::NumStreams(ov::NumStreams::NUMA)); } else if (mode_name == CONFIG_VALUE(THROUGHPUT)) { const auto isa = dnnl::get_effective_cpu_isa(); float isaSpecificThreshold = 1.0f; @@ -625,6 +627,7 @@ void Engine::ApplyPerformanceHints(std::map &config, c engConfig.perfHintsConfig.ovPerfHintNumRequests); } config[CONFIG_KEY(CPU_THROUGHPUT_STREAMS)] = std::to_string(num_streams); + config[ov::num_streams.name()] = ov::util::to_string(ov::NumStreams(num_streams)); } } @@ -745,11 +748,11 @@ Parameter Engine::GetConfig(const std::string& name, const std::map(); + const auto perfHint = ov::util::from_string(engConfig.perfHintsConfig.ovPerfHint, ov::hint::performance_mode); + return perfHint; } else if (name == ov::hint::num_requests) { const auto perfHintNumRequests = engConfig.perfHintsConfig.ovPerfHintNumRequests; return decltype(ov::hint::num_requests)::value_type(perfHintNumRequests); @@ -840,12 +843,13 @@ Parameter Engine::GetMetric(const std::string& name, const std::map availableDevices = { "" }; - return availableDevices; + return decltype(ov::available_devices)::value_type(availableDevices); } else if (name == ov::device::capabilities) { std::vector capabilities; if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16)) @@ -857,13 +861,13 @@ Parameter Engine::GetMetric(const std::string& name, const std::map range = std::make_tuple(1, 1, 1); - return range; + return decltype(ov::range_for_async_infer_requests)::value_type(range); } else if (name == ov::range_for_streams) { const std::tuple range = std::make_tuple(1, parallel_get_max_threads()); - return range; + return decltype(ov::range_for_streams)::value_type(range); } /* Internally legacy parameters are used with new API as part of migration procedure. * This fallback can be removed as soon as migration completed */