[CPU] Add support for OV2.0 configuration API (#9997)

This commit is contained in:
Egor Duplensky 2022-02-04 22:26:42 +03:00 committed by GitHub
parent a8c520878d
commit c83d265416
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 491 additions and 96 deletions

View File

@ -156,7 +156,7 @@ struct Property<T, PropertyMutability::RO> : public util::BaseProperty<T, Proper
};
/**
* @brief Read-only property to get a std::vector<PropertyName> of supported read-only properies.
* @brief Read-only property to get a std::vector<PropertyName> of supported read-only properties.
*
* This can be used as a compiled model property as well.
*

View File

@ -14,6 +14,8 @@
#include "ie_system_conf.h"
#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
#include "openvino/core/type/element_type_traits.hpp"
#include "openvino/runtime/properties.hpp"
namespace MKLDNNPlugin {
@ -116,6 +118,21 @@ void Config::readProperties(const std::map<std::string, std::string> &prop) {
IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_ENFORCE_BF16
<< ". Expected only YES/NO";
}
} else if (key == ov::hint::inference_precision.name()) {
if (val == "bf16") {
if (with_cpu_x86_avx512_core()) {
enforceBF16 = true;
manualEnforceBF16 = true;
} else {
IE_THROW() << "Platform doesn't support BF16 format";
}
} else if (val == "f32") {
enforceBF16 = false;
manualEnforceBF16 = false;
} else {
IE_THROW() << "Wrong value for property key " << ov::hint::inference_precision.name()
<< ". Supported values: bf16, f32";
}
} else if (key == PluginConfigParams::KEY_CACHE_DIR) {
cache_dir = val;
} else if (PluginConfigInternalParams::KEY_CPU_RUNTIME_CACHE_CAPACITY == key) {
@ -132,58 +149,67 @@ void Config::readProperties(const std::map<std::string, std::string> &prop) {
} else {
IE_THROW(NotFound) << "Unsupported property " << key << " by CPU plugin";
}
_config.clear();
}
if (!prop.empty())
_config.clear();
if (exclusiveAsyncRequests) // Exclusive request feature disables the streams
streamExecutorConfig._streams = 1;
CPU_DEBUG_CAP_ENABLE(readDebugCapsProperties());
updateProperties();
}
void Config::updateProperties() {
if (!_config.size()) {
switch (streamExecutorConfig._threadBindingType) {
case IStreamsExecutor::ThreadBindingType::NONE:
_config.insert({ PluginConfigParams::KEY_CPU_BIND_THREAD, PluginConfigParams::NO });
break;
case IStreamsExecutor::ThreadBindingType::CORES:
_config.insert({ PluginConfigParams::KEY_CPU_BIND_THREAD, PluginConfigParams::YES });
break;
case IStreamsExecutor::ThreadBindingType::NUMA:
_config.insert({ PluginConfigParams::KEY_CPU_BIND_THREAD, PluginConfigParams::NUMA });
break;
case IStreamsExecutor::ThreadBindingType::HYBRID_AWARE:
_config.insert({ PluginConfigParams::KEY_CPU_BIND_THREAD, PluginConfigParams::HYBRID_AWARE });
break;
}
if (collectPerfCounters == true)
_config.insert({ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES });
else
_config.insert({ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::NO });
if (exclusiveAsyncRequests == true)
_config.insert({ PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, PluginConfigParams::YES });
else
_config.insert({ PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, PluginConfigParams::NO });
if (enableDynamicBatch == true)
_config.insert({ PluginConfigParams::KEY_DYN_BATCH_ENABLED, PluginConfigParams::YES });
else
_config.insert({ PluginConfigParams::KEY_DYN_BATCH_ENABLED, PluginConfigParams::NO });
_config.insert({ PluginConfigParams::KEY_DYN_BATCH_LIMIT, std::to_string(batchLimit) });
_config.insert({ PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, std::to_string(streamExecutorConfig._streams) });
_config.insert({ PluginConfigParams::KEY_CPU_THREADS_NUM, std::to_string(streamExecutorConfig._threads) });
IE_SUPPRESS_DEPRECATED_START
_config.insert({ PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT, dumpToDot });
IE_SUPPRESS_DEPRECATED_END
if (enforceBF16)
_config.insert({ PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES });
else
_config.insert({ PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO });
_config.insert({ PluginConfigParams::KEY_PERFORMANCE_HINT, perfHintsConfig.ovPerfHint });
_config.insert({ PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS,
std::to_string(perfHintsConfig.ovPerfHintNumRequests) });
_config.insert({PluginConfigParams::KEY_CACHE_DIR, cache_dir});
void Config::updateProperties() {
if (!_config.empty())
return;
switch (streamExecutorConfig._threadBindingType) {
case IStreamsExecutor::ThreadBindingType::NONE:
_config.insert({ PluginConfigParams::KEY_CPU_BIND_THREAD, PluginConfigParams::NO });
break;
case IStreamsExecutor::ThreadBindingType::CORES:
_config.insert({ PluginConfigParams::KEY_CPU_BIND_THREAD, PluginConfigParams::YES });
break;
case IStreamsExecutor::ThreadBindingType::NUMA:
_config.insert({ PluginConfigParams::KEY_CPU_BIND_THREAD, PluginConfigParams::NUMA });
break;
case IStreamsExecutor::ThreadBindingType::HYBRID_AWARE:
_config.insert({ PluginConfigParams::KEY_CPU_BIND_THREAD, PluginConfigParams::HYBRID_AWARE });
break;
}
if (collectPerfCounters == true)
_config.insert({ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES });
else
_config.insert({ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::NO });
if (exclusiveAsyncRequests == true)
_config.insert({ PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, PluginConfigParams::YES });
else
_config.insert({ PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, PluginConfigParams::NO });
if (enableDynamicBatch == true)
_config.insert({ PluginConfigParams::KEY_DYN_BATCH_ENABLED, PluginConfigParams::YES });
else
_config.insert({ PluginConfigParams::KEY_DYN_BATCH_ENABLED, PluginConfigParams::NO });
_config.insert({ PluginConfigParams::KEY_DYN_BATCH_LIMIT, std::to_string(batchLimit) });
_config.insert({ PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, std::to_string(streamExecutorConfig._streams) });
_config.insert({ PluginConfigParams::KEY_CPU_THREADS_NUM, std::to_string(streamExecutorConfig._threads) });
IE_SUPPRESS_DEPRECATED_START
_config.insert({ PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT, dumpToDot });
IE_SUPPRESS_DEPRECATED_END;
if (enforceBF16) {
_config.insert({ PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES });
} else {
_config.insert({ PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO });
}
_config.insert({ PluginConfigParams::KEY_PERFORMANCE_HINT, perfHintsConfig.ovPerfHint });
_config.insert({ PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS,
std::to_string(perfHintsConfig.ovPerfHintNumRequests) });
_config.insert({PluginConfigParams::KEY_CACHE_DIR, cache_dir});
}
#ifdef CPU_DEBUG_CAPS

View File

@ -11,6 +11,7 @@
#include "mkldnn_memory_state.h"
#include "mkldnn_itt.h"
#include "mkldnn_serialize.h"
#include "ngraph/type/element_type.hpp"
#include "nodes/mkldnn_memory_node.hpp"
#include <threading/ie_executor_manager.hpp>
#define FIX_62820 0
@ -19,14 +20,16 @@
#endif
#include <threading/ie_cpu_streams_executor.hpp>
#include <ie_system_conf.h>
#include <algorithm>
#include <unordered_set>
#include <utility>
#include <cstring>
#include <ngraph/opsets/opset1.hpp>
#include <transformations/utils/utils.hpp>
#include "cpp_interfaces/interface/ie_iplugin_internal.hpp"
#include "ie_icore.hpp"
#include "openvino/runtime/properties.hpp"
#include <algorithm>
#include <unordered_set>
#include <utility>
#include <cstring>
using namespace MKLDNNPlugin;
using namespace InferenceEngine;
@ -194,14 +197,25 @@ InferenceEngine::IInferRequestInternal::Ptr MKLDNNExecNetwork::CreateInferReques
}
std::shared_ptr<ngraph::Function> MKLDNNExecNetwork::GetExecGraphInfo() {
if (_graphs.size() == 0)
if (_graphs.empty())
IE_THROW() << "No graph was found";
return GetGraph()._graph.dump();
}
Parameter MKLDNNExecNetwork::GetConfig(const std::string &name) const {
if (_graphs.size() == 0) IE_THROW() << "No graph was found";
bool MKLDNNExecNetwork::isLegacyAPI() const {
const auto& core = _plugin->GetCore();
if (!core)
IE_THROW() << "Unable to get API version. Core is unavailable";
return !core->isNewAPI();
}
Parameter MKLDNNExecNetwork::GetConfigLegacy(const std::string &name) const {
if (_graphs.empty())
IE_THROW() << "No graph was found";
/* legacy implementation return all the parameters which is actually not correct
* since they are not reconfigurable. Fixed for new API */
Config engConfig = GetGraph()._graph.getProperty();
auto option = engConfig._config.find(name);
if (option != engConfig._config.end()) {
@ -211,12 +225,21 @@ Parameter MKLDNNExecNetwork::GetConfig(const std::string &name) const {
}
}
InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name) const {
if (_graphs.size() == 0)
IE_THROW() << "No graph was found";
/**
* Only legacy parameters are supported.
* No RW peroperties supported for new API.
* All the RO properties are covered with GetMetric() method and
* GetConfig() is not expected to be called by new API with params from new configuration API.
*/
Parameter MKLDNNExecNetwork::GetConfig(const std::string &name) const {
/* Internally legacy parameters are used with new API as part of migration procedure.
* This fallback can be removed as soon as migration completed */
return GetConfigLegacy(name);
}
InferenceEngine::Parameter MKLDNNExecNetwork::GetMetricLegacy(const std::string &name, const Graph& graph) const {
if (name == METRIC_KEY(NETWORK_NAME)) {
IE_SET_METRIC_RETURN(NETWORK_NAME, GetGraph()._graph.dump()->get_friendly_name());
IE_SET_METRIC_RETURN(NETWORK_NAME, graph.dump()->get_friendly_name());
} else if (name == METRIC_KEY(SUPPORTED_METRICS)) {
std::vector<std::string> metrics;
metrics.push_back(METRIC_KEY(NETWORK_NAME));
@ -226,12 +249,12 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name)
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, metrics);
} else if (name == METRIC_KEY(SUPPORTED_CONFIG_KEYS)) {
std::vector<std::string> configKeys;
for (auto && key : GetGraph()._graph.getProperty()._config) {
for (auto && key : graph.getProperty()._config) {
configKeys.push_back(key.first);
}
IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, configKeys);
} else if (name == METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)) {
Config engConfig = GetGraph()._graph.getProperty();
Config engConfig = graph.getProperty();
auto option = engConfig._config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS));
IE_ASSERT(option != engConfig._config.end());
auto streams = std::stoi(option->second);
@ -242,6 +265,80 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name)
}
}
InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name) const {
if (_graphs.empty())
IE_THROW() << "No graph was found";
// @todo Can't we just use local copy (_cfg) instead?
auto graphLock = GetGraph();
const auto& graph = graphLock._graph;
const auto& config = graph.getProperty();
if (isLegacyAPI()) {
return GetMetricLegacy(name, graph);
}
auto RO_property = [](const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RO);
};
if (name == ov::supported_properties) {
return std::vector<ov::PropertyName> {
RO_property(ov::supported_properties.name()),
RO_property(ov::model_name.name()),
RO_property(ov::optimal_number_of_infer_requests.name()),
RO_property(ov::streams::num.name()),
RO_property(ov::affinity.name()),
RO_property(ov::inference_num_threads.name()),
RO_property(ov::enable_profiling.name()),
RO_property(ov::hint::inference_precision.name()),
RO_property(ov::hint::performance_mode.name()),
RO_property(ov::hint::num_requests.name()),
};
}
if (name == ov::model_name) {
// @todo Does not seem ok to 'dump()' the whole graph everytime in order to get a name
return graph.dump()->get_friendly_name();
} else if (name == ov::optimal_number_of_infer_requests) {
const auto streams = config.streamExecutorConfig._streams;
return static_cast<uint32_t>(streams); // ov::optimal_number_of_infer_requests has no negative values
} else if (name == ov::streams::num) {
const auto streams = config.streamExecutorConfig._streams;
return static_cast<int32_t>(streams); // ov::streams::num has special negative values (AUTO = -1, NUMA = -2)
} else if (name == ov::affinity) {
const auto affinity = config.streamExecutorConfig._threadBindingType;
switch (affinity) {
case InferenceEngine::IStreamsExecutor::ThreadBindingType::NONE:
return ov::Affinity::NONE;
case InferenceEngine::IStreamsExecutor::ThreadBindingType::CORES:
return ov::Affinity::CORE;
case InferenceEngine::IStreamsExecutor::ThreadBindingType::NUMA:
return ov::Affinity::NUMA;
case InferenceEngine::IStreamsExecutor::ThreadBindingType::HYBRID_AWARE:
return ov::Affinity::HYBRID_AWARE;
}
return ov::Affinity::NONE;
} else if (name == ov::inference_num_threads) {
const auto num_threads = config.streamExecutorConfig._threads;
return num_threads;
} else if (name == ov::enable_profiling.name()) {
const bool perfCount = config.collectPerfCounters;
return perfCount ? "YES" : "NO";
} else if (name == ov::hint::inference_precision) {
const auto enforceBF16 = config.enforceBF16;
return enforceBF16 ? ov::element::bf16 : ov::element::f32;
} else if (name == ov::hint::performance_mode) {
const auto perfHint = config.perfHintsConfig.ovPerfHint;
return perfHint;
} else if (name == ov::hint::num_requests) {
const auto perfHintNumRequests = config.perfHintsConfig.ovPerfHintNumRequests;
return perfHintNumRequests;
}
/* Internally legacy parameters are used with new API as part of migration procedure.
* This fallback can be removed as soon as migration completed */
return GetMetricLegacy(name, graph);
}
bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::CNNNetwork &network) const {
InputsDataMap inputs = network.getInputsInfo();
@ -254,7 +351,7 @@ bool MKLDNNExecNetwork::CanProcessDynBatch(const InferenceEngine::CNNNetwork &ne
}
auto ops = function->get_ordered_ops();
for (auto op : ops) {
for (const auto& op : ops) {
auto type = TypeFromName(op->get_type_name());
if (type == Tile) {
const auto tile = std::dynamic_pointer_cast<const ngraph::opset1::Tile>(op);

View File

@ -73,8 +73,13 @@ protected:
*/
Graph::Lock GetGraph() const;
bool CanProcessDynBatch(const InferenceEngine::CNNNetwork &network) const;
bool isLegacyAPI() const;
InferenceEngine::Parameter GetConfigLegacy(const std::string &name) const;
InferenceEngine::Parameter GetMetricLegacy(const std::string &name, const Graph& graph) const;
};
} // namespace MKLDNNPlugin

View File

@ -129,8 +129,32 @@
using namespace MKLDNNPlugin;
using namespace InferenceEngine;
#define IE_CPU_PLUGIN_THROW(...) IE_THROW(__VA_ARGS__) << "CPU plugin: "
Engine::Engine() {
static std::string getDeviceFullName() {
std::string brand_string;
#if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(_M_ARM64)
const unsigned int addr_list[3] = { 0x80000002, 0x80000003, 0x80000004 };
unsigned int regs[4];
for (auto addr : addr_list) {
regs[0] = addr;
#ifdef _WIN32
__cpuid(reinterpret_cast<int*>(regs), regs[0]);
#else
__get_cpuid(regs[0], &regs[0], &regs[1], &regs[2], &regs[3]);
#endif
char *ch = reinterpret_cast<char*>(&regs[0]);
for (size_t j = 0; j < sizeof(regs); j++)
brand_string += ch[j];
}
#else
brand_string = "Non Intel Architecture";
#endif
return brand_string;
}
Engine::Engine() :
deviceFullName(getDeviceFullName()) {
_pluginName = "CPU";
extensionManager->AddExtension(std::make_shared<MKLDNNPlugin::MKLDNNExtension>());
}
@ -141,7 +165,7 @@ Engine::~Engine() {
ExecutorManager::getInstance()->clear("CPUCallbackExecutor");
}
static void TransformationUpToCPUSpecificOpSet(std::shared_ptr<ngraph::Function> nGraphFunc, const bool _enableLPT,
static void TransformationUpToCPUSpecificOpSet(const std::shared_ptr<ngraph::Function>& nGraphFunc, const bool _enableLPT,
const bool _enableSnippets) {
ngraph::pass::Manager manager;
manager.set_per_pass_validation(false);
@ -555,7 +579,7 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std
};
if (!supported_precisions.count(input_precision)) {
IE_THROW(NotImplemented)
IE_CPU_PLUGIN_THROW(NotImplemented)
<< "Input image format " << input_precision << " is not supported yet...";
}
}
@ -654,6 +678,7 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std
// update the props after the perf mode translated to configs
// TODO: Clarify the behavior of SetConfig method. Skip eng_config or not?
Config conf = engConfig;
conf.readProperties(config);
if (conf.enableDynamicBatch) {
conf.batchLimit = static_cast<int>(network.getBatchSize());
@ -663,22 +688,73 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std
}
void Engine::SetConfig(const std::map<std::string, std::string> &config) {
// accumulate config parameters on engine level
streamsSet = (config.find(PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS) != config.end());
engConfig.readProperties(config);
}
Parameter Engine::GetConfig(const std::string& name, const std::map<std::string, Parameter>& /*options*/) const {
bool Engine::isLegacyAPI() const {
const auto& core = GetCore();
if (!core)
IE_CPU_PLUGIN_THROW() << "Unable to get API version. Core is unavailable";
return !core->isNewAPI();
}
Parameter Engine::GetConfigLegacy(const std::string& name, const std::map<std::string, Parameter>& options) const {
Parameter result;
auto option = engConfig._config.find(name);
if (option != engConfig._config.end()) {
result = option->second;
} else {
IE_THROW() << "Unsupported config key " << name;
IE_CPU_PLUGIN_THROW() << ". Unsupported config parameter: " << name;
}
return result;
}
Parameter Engine::GetConfig(const std::string& name, const std::map<std::string, Parameter>& options) const {
if (isLegacyAPI())
return GetConfigLegacy(name, options);
if (name == ov::optimal_number_of_infer_requests) {
const auto streams = engConfig.streamExecutorConfig._streams;
return static_cast<uint32_t>(streams); // ov::optimal_number_of_infer_requests has no negative values
} else if (name == ov::streams::num) {
const auto streams = engConfig.streamExecutorConfig._streams;
return static_cast<int32_t>(streams); // ov::streams::num has special negative values (AUTO = -1, NUMA = -2)
} else if (name == ov::affinity) {
const auto affinity = engConfig.streamExecutorConfig._threadBindingType;
switch (affinity) {
case InferenceEngine::IStreamsExecutor::ThreadBindingType::NONE:
return ov::Affinity::NONE;
case InferenceEngine::IStreamsExecutor::ThreadBindingType::CORES:
return ov::Affinity::CORE;
case InferenceEngine::IStreamsExecutor::ThreadBindingType::NUMA:
return ov::Affinity::NUMA;
case InferenceEngine::IStreamsExecutor::ThreadBindingType::HYBRID_AWARE:
return ov::Affinity::HYBRID_AWARE;
}
return ov::Affinity::NONE;
} else if (name == ov::inference_num_threads) {
const auto num_threads = engConfig.streamExecutorConfig._threads;
return num_threads;
} else if (name == ov::enable_profiling.name()) {
const bool perfCount = engConfig.collectPerfCounters;
return perfCount ? "YES" : "NO";
} else if (name == ov::hint::inference_precision) {
const auto enforceBF16 = engConfig.enforceBF16;
return enforceBF16 ? ov::element::bf16 : ov::element::f32;
} else if (name == ov::hint::performance_mode) {
const auto perfHint = engConfig.perfHintsConfig.ovPerfHint;
return perfHint;
} else if (name == ov::hint::num_requests) {
const auto perfHintNumRequests = engConfig.perfHintsConfig.ovPerfHintNumRequests;
return perfHintNumRequests;
}
/* Internally legacy parameters are used with new API as part of migration procedure.
* This fallback can be removed as soon as migration completed */
return GetConfigLegacy(name, options);
}
static bool hasAVX512() {
#if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(_M_ARM64)
unsigned int regs[4] = {7, 0, 0, 0};
@ -693,7 +769,7 @@ static bool hasAVX512() {
return false;
}
Parameter Engine::GetMetric(const std::string& name, const std::map<std::string, Parameter>& /*options*/) const {
Parameter Engine::GetMetricLegacy(const std::string& name, const std::map<std::string, Parameter>& options) const {
if (name == METRIC_KEY(SUPPORTED_METRICS)) {
std::vector<std::string> metrics = {
METRIC_KEY(AVAILABLE_DEVICES),
@ -707,25 +783,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
};
IE_SET_METRIC_RETURN(SUPPORTED_METRICS, metrics);
} else if (name == METRIC_KEY(FULL_DEVICE_NAME)) {
std::string brand_string;
#if !defined(__arm__) && !defined(_M_ARM) && !defined(__aarch64__) && !defined(_M_ARM64)
unsigned int addr_list[3] = { 0x80000002, 0x80000003, 0x80000004 };
unsigned int regs[4];
for (auto addr : addr_list) {
regs[0] = addr;
#ifdef _WIN32
__cpuid(reinterpret_cast<int*>(regs), regs[0]);
#else
__get_cpuid(regs[0], &regs[0], &regs[1], &regs[2], &regs[3]);
#endif
char *ch = reinterpret_cast<char*>(&regs[0]);
for (size_t j = 0; j < sizeof(regs); j++)
brand_string += ch[j];
}
#else
brand_string = "Non Intel Architecture";
#endif
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, brand_string);
IE_SET_METRIC_RETURN(FULL_DEVICE_NAME, deviceFullName);
} else if (name == METRIC_KEY(AVAILABLE_DEVICES)) {
std::vector<std::string> availableDevices = { "" };
IE_SET_METRIC_RETURN(AVAILABLE_DEVICES, availableDevices);
@ -753,9 +811,72 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
IE_SET_METRIC_RETURN(RANGE_FOR_STREAMS, range);
} else if (name == METRIC_KEY(IMPORT_EXPORT_SUPPORT)) {
IE_SET_METRIC_RETURN(IMPORT_EXPORT_SUPPORT, true);
} else {
IE_THROW() << "Unsupported metric key " << name;
}
IE_CPU_PLUGIN_THROW() << "Unsupported metric key: " << name;
}
Parameter Engine::GetMetric(const std::string& name, const std::map<std::string, Parameter>& options) const {
if (isLegacyAPI())
return GetMetricLegacy(name, options);
auto RO_property = [](const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RO);
};
auto RW_property = [](const std::string& propertyName) {
return ov::PropertyName(propertyName, ov::PropertyMutability::RW);
};
if (name == ov::supported_properties) {
std::vector<ov::PropertyName> roProperties {RO_property(ov::supported_properties.name()),
RO_property(ov::available_devices.name()),
RO_property(ov::range_for_async_infer_requests.name()),
RO_property(ov::range_for_streams.name()),
RO_property(ov::device::full_name.name()),
RO_property(ov::device::capabilities.name())
};
// the whole config is RW before network is loaded.
std::vector<ov::PropertyName> rwProperties {RW_property(ov::streams::num.name()),
RW_property(ov::affinity.name()),
RW_property(ov::inference_num_threads.name()),
RW_property(ov::enable_profiling.name()),
RW_property(ov::hint::inference_precision.name()),
RW_property(ov::hint::performance_mode.name()),
RW_property(ov::hint::num_requests.name()),
};
std::vector<ov::PropertyName> supportedProperties;
supportedProperties.reserve(roProperties.size() + rwProperties.size());
supportedProperties.insert(supportedProperties.end(), roProperties.begin(), roProperties.end());
supportedProperties.insert(supportedProperties.end(), rwProperties.begin(), rwProperties.end());
return supportedProperties;
} else if (name == ov::device::full_name) {
return deviceFullName;
} else if (name == ov::available_devices) {
const std::vector<std::string> availableDevices = { "" };
return availableDevices;
} else if (name == ov::device::capabilities) {
std::vector<std::string> capabilities;
if (with_cpu_x86_bfloat16())
capabilities.push_back(METRIC_VALUE(BF16));
if (hasAVX512())
capabilities.push_back(METRIC_VALUE(WINOGRAD));
capabilities.push_back(METRIC_VALUE(FP32));
capabilities.push_back(METRIC_VALUE(FP16));
capabilities.push_back(METRIC_VALUE(INT8));
capabilities.push_back(METRIC_VALUE(BIN));
capabilities.push_back("IMPORT_EXPORT");
return capabilities;
} else if (name == ov::range_for_async_infer_requests) {
const std::tuple<unsigned int, unsigned int, unsigned int> range = std::make_tuple(1, 1, 1);
return range;
} else if (name == ov::range_for_streams) {
const std::tuple<unsigned int, unsigned int> range = std::make_tuple(1, parallel_get_max_threads());
return range;
}
/* Internally legacy parameters are used with new API as part of migration procedure.
* This fallback can be removed as soon as migration completed */
return GetMetricLegacy(name, options);
}
void Engine::AddExtension(const InferenceEngine::IExtensionPtr& extension) {
@ -844,7 +965,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma
res.supportedLayersMap.emplace(layerName, GetName());
}
} else {
IE_THROW() << "CPU plug-in doesn't support not ngraph-based model!";
IE_CPU_PLUGIN_THROW() << "Only ngraph-based models are supported!";
}
return res;

View File

@ -41,10 +41,17 @@ public:
const std::map<std::string, std::string>& config) override;
private:
bool isLegacyAPI() const;
InferenceEngine::Parameter GetMetricLegacy(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const;
InferenceEngine::Parameter GetConfigLegacy(const std::string& name, const std::map<std::string, InferenceEngine::Parameter>& options) const;
Config engConfig;
NumaNodesWeights weightsSharing;
MKLDNNExtensionManager::Ptr extensionManager = std::make_shared<MKLDNNExtensionManager>();
bool streamsSet = false;
const std::string deviceFullName;
};
} // namespace MKLDNNPlugin

View File

@ -0,0 +1,60 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "ngraph_functions/subgraph_builders.hpp"
#include "functional_test_utils/skip_tests_config.hpp"
#include <base/ov_behavior_test_utils.hpp>
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/compiled_model.hpp"
#include "openvino/runtime/properties.hpp"
#include <gtest/gtest.h>
using namespace ov::test::behavior;
namespace {
//
// Executable Network GetMetric
//
class OVClassConfigTestCPU : public ::testing::Test,
public ::testing::WithParamInterface<std::tuple<std::string, std::pair<std::string, ov::Any>>> {
public:
std::shared_ptr<ngraph::Function> model;
const std::string deviceName = "CPU";
void SetUp() override {
SKIP_IF_CURRENT_TEST_IS_DISABLED();
model = ngraph::builder::subgraph::makeConvPoolRelu();
}
};
TEST_F(OVClassConfigTestCPU, smoke_GetROPropertiesDoesNotThrow) {
ov::Core ie;
std::vector<ov::PropertyName> properties;
ov::CompiledModel compiledModel = ie.compile_model(model, deviceName);
ASSERT_NO_THROW(properties = compiledModel.get_property(ov::supported_properties));
for (const auto& property : properties) {
ASSERT_NO_THROW((void)compiledModel.get_property(property));
}
}
TEST_F(OVClassConfigTestCPU, smoke_SetROPropertiesThrow) {
ov::Core ie;
std::vector<ov::PropertyName> properties;
ov::CompiledModel compiledModel = ie.compile_model(model, deviceName);
ASSERT_NO_THROW(properties = compiledModel.get_property(ov::supported_properties));
for (auto it = properties.begin(); it != properties.end(); ++it) {
ASSERT_TRUE(it != properties.end());
ASSERT_FALSE(it->is_mutable());
ASSERT_THROW(compiledModel.set_property({{*it, "DUMMY VALUE"}}), ov::Exception);
}
}
} // namespace

View File

@ -3,7 +3,10 @@
//
#include "behavior/ov_plugin/core_integration.hpp"
#include <openvino/runtime/properties.hpp>
#include "ie_system_conf.h"
#include "openvino/runtime/core.hpp"
#include "openvino/core/type/element_type.hpp"
using namespace ov::test::behavior;
using namespace InferenceEngine::PluginConfigParams;
@ -79,17 +82,93 @@ INSTANTIATE_TEST_SUITE_P(
//////////////////////////////////////////////////////////////////////////////////////////
TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedThrow) {
TEST(OVClassBasicTest, smoke_SetConfigInferenceNumThreads) {
ov::Core ie;
std::string value = {};
int32_t value = 0;
int32_t num_threads = 1;
ASSERT_NO_THROW(ie.set_property("CPU", {{KEY_CPU_THREADS_NUM, "1"}}));
ASSERT_NO_THROW(value = ie.get_property("CPU", KEY_CPU_THREADS_NUM).as<std::string>());
ASSERT_EQ("1", value);
ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads));
ASSERT_EQ(num_threads, value);
ASSERT_NO_THROW(ie.set_property("CPU", {{KEY_CPU_THREADS_NUM, "4"}}));
ASSERT_NO_THROW(value = ie.get_property("CPU", KEY_CPU_THREADS_NUM).as<std::string>());
ASSERT_EQ("4", value);
num_threads = 4;
ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads));
ASSERT_EQ(num_threads, value);
}
TEST(OVClassBasicTest, smoke_SetConfigStreamsNum) {
ov::Core ie;
int32_t value = 0;
int32_t num_streams = 1;
auto setGetProperty = [&ie](int32_t& getProperty, int32_t setProperty){
ASSERT_NO_THROW(ie.set_property("CPU", ov::streams::num(setProperty)));
ASSERT_NO_THROW(getProperty = ie.get_property("CPU", ov::streams::num));
};
setGetProperty(value, num_streams);
ASSERT_EQ(num_streams, value);
num_streams = ov::streams::NUMA;
setGetProperty(value, num_streams);
ASSERT_GT(value, 0); // value has been configured automatically
num_streams = ov::streams::AUTO;
setGetProperty(value, num_streams);
ASSERT_GT(value, 0); // value has been configured automatically
}
TEST(OVClassBasicTest, smoke_SetConfigAffinity) {
ov::Core ie;
ov::Affinity value = ov::Affinity::NONE;
#if (defined(__APPLE__) || defined(_WIN32))
auto numaNodes = InferenceEngine::getAvailableNUMANodes();
auto defaultBindThreadParameter = numaNodes.size() > 1 ? ov::Affinity::NUMA : ov::Affinity::NONE;
#else
auto defaultBindThreadParameter = ov::Affinity::CORE;
#endif
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity));
ASSERT_EQ(defaultBindThreadParameter, value);
const ov::Affinity affinity = ov::Affinity::HYBRID_AWARE;
ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity));
ASSERT_EQ(affinity, value);
}
TEST(OVClassBasicTest, smoke_SetConfigHintInferencePrecision) {
ov::Core ie;
auto value = ov::element::f32;
const auto precision = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32;
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision));
ASSERT_EQ(precision, value);
const auto forcedPrecision = ov::element::f32;
ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forcedPrecision)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision));
ASSERT_EQ(precision, forcedPrecision);
}
TEST(OVClassBasicTest, smoke_SetConfigEnableProfiling) {
ov::Core ie;
bool value;
const bool enableProfilingDefault = false;
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling));
ASSERT_EQ(enableProfilingDefault, value);
const bool enableProfiling = true;
ASSERT_NO_THROW(ie.set_property("CPU", ov::enable_profiling(enableProfiling)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling));
ASSERT_EQ(enableProfiling, value);
}
// IE Class Query network