[CPU] Align return types handling for all the new API parameters (#10363)

This commit is contained in:
Egor Duplensky 2022-02-22 12:42:24 +03:00 committed by GitHub
parent 472ebc0cd9
commit 3f56438d06
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 25 additions and 18 deletions

View File

@ -29,8 +29,8 @@ def test_get_property(device):
pytest.skip("Can't run on ARM plugin due-to CPU dependent test")
func = core.read_model(model=test_net_xml, weights=test_net_bin)
exec_net = core.compile_model(func, device)
config = exec_net.get_property("PERF_COUNT")
assert config == "NO"
profiling_enabled = exec_net.get_property("PERF_COUNT")
assert not profiling_enabled
def test_get_runtime_model(device):

View File

@ -26,6 +26,7 @@
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "ie_icore.hpp"
#include "openvino/runtime/properties.hpp"
#include "openvino/util/common_util.hpp"
#include <algorithm>
#include <unordered_set>
@ -310,7 +311,8 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name)
if (name == ov::model_name) {
// @todo Does not seem ok to 'dump()' the whole graph everytime in order to get a name
return graph.dump()->get_friendly_name();
const std::string modelName = graph.dump()->get_friendly_name();
return decltype(ov::model_name)::value_type(modelName);
} else if (name == ov::optimal_number_of_infer_requests) {
const auto streams = config.streamExecutorConfig._streams;
return decltype(ov::optimal_number_of_infer_requests)::value_type(streams); // ov::optimal_number_of_infer_requests has no negative values
@ -332,19 +334,20 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name)
return ov::Affinity::NONE;
} else if (name == ov::inference_num_threads) {
const auto num_threads = config.streamExecutorConfig._threads;
return num_threads;
return decltype(ov::inference_num_threads)::value_type(num_threads);
} else if (name == ov::enable_profiling.name()) {
const bool perfCount = config.collectPerfCounters;
return perfCount ? "YES" : "NO";
return decltype(ov::enable_profiling)::value_type(perfCount);
} else if (name == ov::hint::inference_precision) {
const auto enforceBF16 = config.enforceBF16;
return enforceBF16 ? ov::element::bf16 : ov::element::f32;
const auto inference_precision = enforceBF16 ? ov::element::bf16 : ov::element::f32;
return decltype(ov::hint::inference_precision)::value_type(inference_precision);
} else if (name == ov::hint::performance_mode) {
const auto perfHint = config.perfHintsConfig.ovPerfHint;
const auto perfHint = ov::util::from_string(config.perfHintsConfig.ovPerfHint, ov::hint::performance_mode);
return perfHint;
} else if (name == ov::hint::num_requests) {
const auto perfHintNumRequests = config.perfHintsConfig.ovPerfHintNumRequests;
return perfHintNumRequests;
return decltype(ov::hint::num_requests)::value_type(perfHintNumRequests);
}
/* Internally legacy parameters are used with new API as part of migration procedure.
* This fallback can be removed as soon as migration completed */

View File

@ -106,6 +106,7 @@
#include <low_precision/multiply_to_group_convolution.hpp>
#include <low_precision/network_helper.hpp>
#include "openvino/runtime/core.hpp"
#include "openvino/util/common_util.hpp"
#include <ie_algorithm.hpp>
#include "performance_heuristics.hpp"
@ -568,6 +569,7 @@ void Engine::ApplyPerformanceHints(std::map<std::string, std::string> &config, c
if (mode_name == CONFIG_VALUE(LATENCY)) {
config[CONFIG_KEY(CPU_THROUGHPUT_STREAMS)] = CONFIG_VALUE(CPU_THROUGHPUT_NUMA);
config[ov::num_streams.name()] = ov::util::to_string(ov::NumStreams(ov::NumStreams::NUMA));
} else if (mode_name == CONFIG_VALUE(THROUGHPUT)) {
const auto isa = dnnl::get_effective_cpu_isa();
float isaSpecificThreshold = 1.0f;
@ -625,6 +627,7 @@ void Engine::ApplyPerformanceHints(std::map<std::string, std::string> &config, c
engConfig.perfHintsConfig.ovPerfHintNumRequests);
}
config[CONFIG_KEY(CPU_THROUGHPUT_STREAMS)] = std::to_string(num_streams);
config[ov::num_streams.name()] = ov::util::to_string(ov::NumStreams(num_streams));
}
}
@ -745,11 +748,11 @@ Parameter Engine::GetConfig(const std::string& name, const std::map<std::string,
return decltype(ov::enable_profiling)::value_type(perfCount);
} else if (name == ov::hint::inference_precision) {
const auto enforceBF16 = engConfig.enforceBF16;
return decltype(ov::hint::inference_precision)::value_type(
enforceBF16 ? ov::element::bf16 : ov::element::f32);
const auto inference_precision = enforceBF16 ? ov::element::bf16 : ov::element::f32;
return decltype(ov::hint::inference_precision)::value_type(inference_precision);
} else if (name == ov::hint::performance_mode) {
const auto perfHint = engConfig.perfHintsConfig.ovPerfHint;
return ov::Any{perfHint}.as<decltype(ov::hint::performance_mode)::value_type>();
const auto perfHint = ov::util::from_string(engConfig.perfHintsConfig.ovPerfHint, ov::hint::performance_mode);
return perfHint;
} else if (name == ov::hint::num_requests) {
const auto perfHintNumRequests = engConfig.perfHintsConfig.ovPerfHintNumRequests;
return decltype(ov::hint::num_requests)::value_type(perfHintNumRequests);
@ -840,12 +843,13 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
supportedProperties.reserve(roProperties.size() + rwProperties.size());
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return supportedProperties;
return decltype(ov::supported_properties)::value_type(supportedProperties);
} else if (name == ov::device::full_name) {
return deviceFullName;
return decltype(ov::device::full_name)::value_type(deviceFullName);
} else if (name == ov::available_devices) {
const std::vector<std::string> availableDevices = { "" };
return availableDevices;
return decltype(ov::available_devices)::value_type(availableDevices);
} else if (name == ov::device::capabilities) {
std::vector<std::string> capabilities;
if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16))
@ -857,13 +861,13 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
capabilities.push_back(METRIC_VALUE(INT8));
capabilities.push_back(METRIC_VALUE(BIN));
capabilities.push_back(ov::device::capability::EXPORT_IMPORT);
return capabilities;
return decltype(ov::device::capabilities)::value_type(capabilities);
} else if (name == ov::range_for_async_infer_requests) {
const std::tuple<unsigned int, unsigned int, unsigned int> range = std::make_tuple(1, 1, 1);
return range;
return decltype(ov::range_for_async_infer_requests)::value_type(range);
} else if (name == ov::range_for_streams) {
const std::tuple<unsigned int, unsigned int> range = std::make_tuple(1, parallel_get_max_threads());
return range;
return decltype(ov::range_for_streams)::value_type(range);
}
/* Internally legacy parameters are used with new API as part of migration procedure.
* This fallback can be removed as soon as migration completed */