Properties improvements: part 2 (#16489)

* Properties improvements: part 2

* Accurate configs handling in HETERO / BATCH

* Align plugins in caching properties

* Fixed caching mock tests

* Added new TestNoCachingProperties test

* Fixed test

* Added ov::caching_properties to API 1.0 metrics as well

* Fixes for HETERO plugin

* Fixed tests

* Even more refactoring in HETERO plugin config management
This commit is contained in:
Ilya Lavrenov 2023-03-25 19:28:05 +04:00 committed by GitHub
parent a96da994ec
commit e66b837104
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 302 additions and 223 deletions

View File

@ -492,10 +492,8 @@ ov::Plugin ov::CoreImpl::get_plugin(const std::string& pluginName) const {
{
auto supportedConfigKeys =
plugin.get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS), {}).as<std::vector<std::string>>();
auto config_iter = std::find(supportedConfigKeys.begin(),
supportedConfigKeys.end(),
CONFIG_KEY_INTERNAL(CONFIG_DEVICE_ID));
const bool supportsConfigDeviceID = config_iter != supportedConfigKeys.end();
const bool supportsConfigDeviceID =
ov::util::contains(supportedConfigKeys, CONFIG_KEY_INTERNAL(CONFIG_DEVICE_ID));
const std::string deviceKey =
supportsConfigDeviceID ? CONFIG_KEY_INTERNAL(CONFIG_DEVICE_ID) : CONFIG_KEY(DEVICE_ID);
@ -739,6 +737,26 @@ ov::RemoteContext ov::CoreImpl::create_context(const std::string& device_name, c
ov::AnyMap ov::CoreImpl::get_supported_property(const std::string& full_device_name,
const ov::AnyMap& user_properties) const {
if (is_virtual_device(full_device_name)) {
// Considerations:
// 1. in case of virtual devices all the magic will happen on the level when
// virtual device calls ICore::get_supported_property for real HW devices
// so, for now we can returns user properties almost as is without any
// filtering / flattening
// 2. The only exception here: while common properties like ov::num::streams or
// ov::hint::performance_mode are shared across all the devices, the
// ov::device::priority cannot be shared, because it's specific for current virtual
// plugin. So, we need to remove ov::device::priorities from the list, because it's
// supposed to be set for current virtual plugin and cannot be propogated down
ov::AnyMap return_properties = clone_map(user_properties);
auto device_priorities_it = return_properties.find(ov::device::priorities.name());
if (device_priorities_it != return_properties.end()) {
return_properties.erase(device_priorities_it);
}
return return_properties;
}
static const std::vector<std::string> core_level_properties = {
ov::cache_dir.name(),
ov::force_tbb_terminate.name(),
@ -750,28 +768,6 @@ ov::AnyMap ov::CoreImpl::get_supported_property(const std::string& full_device_n
const auto flattened = ov::parseDeviceNameIntoConfig(full_device_name, user_properties);
const std::string& device_name = flattened._deviceName;
const auto& flattened_config = flattened._config;
ov::AnyMap supported_config, options;
// fill 'options' to provide more information to ICore::get_property calls
{
auto priority_prop_name = get_device_priority_property(device_name).prop_name;
auto it = flattened_config.find(priority_prop_name);
if (it != flattened_config.end())
options[it->first] = it->second;
else if (device_name == "HETERO") {
// TODO: remove together with API 1.0
priority_prop_name = "TARGET_FALLBACK";
it = flattened_config.find(priority_prop_name);
if (it != flattened_config.end())
options[it->first] = it->second;
} else if (device_name == "BATCH") {
// TODO: remove together with API 1.0
priority_prop_name = CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG);
it = flattened_config.find(priority_prop_name);
if (it != flattened_config.end())
options[it->first] = it->second;
}
}
// virtual plugins should bypass core-level properties to HW plugins
// so, we need to report them as supported
@ -780,16 +776,16 @@ ov::AnyMap ov::CoreImpl::get_supported_property(const std::string& full_device_n
// try to search against IE API 1.0' SUPPORTED_CONFIG_KEYS
try {
const auto supported_keys =
GetMetric(device_name, METRIC_KEY(SUPPORTED_CONFIG_KEYS), options).as<std::vector<std::string>>();
GetMetric(device_name, METRIC_KEY(SUPPORTED_CONFIG_KEYS), {}).as<std::vector<std::string>>();
for (auto&& config_key : supported_keys) {
supported_config_keys.emplace_back(config_key);
}
} catch (ov::Exception&) {
}
// try to search against OV API 2.0' supported_properties
// try to search against OV API 2.0' mutable supported_properties
try {
for (auto&& property : ICore::get_property(device_name, ov::supported_properties, options)) {
for (auto&& property : ICore::get_property(device_name, ov::supported_properties, {})) {
if (property.is_mutable()) {
supported_config_keys.emplace_back(std::move(property));
}
@ -797,11 +793,14 @@ ov::AnyMap ov::CoreImpl::get_supported_property(const std::string& full_device_n
} catch (ov::Exception&) {
}
// collect supported properties for HW device
AnyMap supported_config;
for (auto&& kvp : flattened_config) {
if (util::contains(supported_config_keys, kvp.first)) {
supported_config[kvp.first] = kvp.second;
}
}
return supported_config;
}
@ -908,10 +907,10 @@ void ov::CoreImpl::set_property(const std::string& device_name, const AnyMap& pr
// unsupport to set ov::device::properties to HW device through this function
auto devices = get_registered_devices();
for (auto&& config : properties) {
auto is_secondary_config_for_hw_device = config.first.find(ov::device::properties.name()) != std::string::npos;
OPENVINO_ASSERT(!is_secondary_config_for_hw_device,
const auto is_secondary_property = config.first.find(ov::device::properties.name()) != std::string::npos;
OPENVINO_ASSERT(!is_secondary_property,
"set_property do not support ov::device::propreties. "
"You can configure the devices through the compile_model()/loadNetwork() API.");
"You can configure the devices through the compile_model()/query_model() API.");
}
set_property_for_device(properties, device_name);
}
@ -1087,16 +1086,11 @@ void ov::CoreImpl::set_property_for_device(const ov::AnyMap& configMap, const st
}
// Add device specific value to support device_name.device_id cases
{
auto supportedConfigKeys =
plugin.second.get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS), {}).as<std::vector<std::string>>();
auto config_iter = std::find(supportedConfigKeys.begin(),
supportedConfigKeys.end(),
CONFIG_KEY_INTERNAL(CONFIG_DEVICE_ID));
const bool supportsConfigDeviceID = config_iter != supportedConfigKeys.end();
const std::string deviceKey =
supportsConfigDeviceID ? CONFIG_KEY_INTERNAL(CONFIG_DEVICE_ID) : CONFIG_KEY(DEVICE_ID);
if (!parser.get_device_id().empty()) {
const std::string deviceKey =
device_supports_property(plugin.second, CONFIG_KEY_INTERNAL(CONFIG_DEVICE_ID))
? CONFIG_KEY_INTERNAL(CONFIG_DEVICE_ID)
: CONFIG_KEY(DEVICE_ID);
configCopy[deviceKey] = parser.get_device_id();
}
}
@ -1239,17 +1233,16 @@ ov::AnyMap ov::CoreImpl::create_compile_config(const ov::Plugin& plugin, const o
}
// 1. Move DEVICE_ID key to property_config
const bool supports_device_id = device_supports_property(plugin, ov::device::id);
auto deviceIt = user_config.find(ov::device::id.name());
if (deviceIt != user_config.end()) {
property_config[deviceIt->first] = deviceIt->second.as<std::string>();
} else {
// we likely need to extract default device_id from the plugin,
// but we suppose when we call plugin.get_property it will provide the answer
// for the default device (e.g. DEVICE_ID = 0 for GPU)
} else if (supports_device_id) {
property_config[ov::device::id.name()] = plugin.get_property(ov::device::id, {});
}
// 2. Extract config keys which affect compilation process
auto caching_props = plugin.get_property(ov::caching_properties);
auto caching_props = plugin.get_property(ov::caching_properties, property_config);
OPENVINO_ASSERT(!caching_props.empty(), "ov::caching_properties returned by ", plugin.get_name(), " are empty");
ov::AnyMap compile_config;
@ -1285,7 +1278,7 @@ void ov::CoreImpl::CoreConfig::set_and_update(ov::AnyMap& config) {
std::lock_guard<std::mutex> lock(_cacheConfigMutex);
// fill global cache config
_cacheConfig = CoreConfig::CacheConfig::create(it->second.as<std::string>());
// sets cache config per-device if it's set explicitly before
// sets cache config per-device if it's not set explicitly before
for (auto& deviceCfg : _cacheConfigPerDevice) {
deviceCfg.second = CoreConfig::CacheConfig::create(it->second.as<std::string>());
}

View File

@ -21,6 +21,7 @@
#include "openvino/pass/manager.hpp"
#include "openvino/runtime/device_id_parser.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"
#include "openvino/util/common_util.hpp"
#include "transformations/common_optimizations/dimension_tracking.hpp"
#include "transformations/init_node_info.hpp"
#include "transformations/utils/utils.hpp"
@ -600,9 +601,9 @@ std::shared_ptr<ngraph::Function> AutoBatchExecutableNetwork::GetExecGraphInfo()
: _networkWithoutBatch->GetExecGraphInfo();
}
void AutoBatchExecutableNetwork::SetConfig(const std::map<std::string, InferenceEngine::Parameter>& config) {
auto timeout = config.find(CONFIG_KEY(AUTO_BATCH_TIMEOUT));
if (timeout == config.end() || config.size() > 1) {
void AutoBatchExecutableNetwork::SetConfig(const std::map<std::string, InferenceEngine::Parameter>& user_config) {
auto timeout = user_config.find(CONFIG_KEY(AUTO_BATCH_TIMEOUT));
if (timeout == user_config.end() || user_config.size() > 1) {
IE_THROW() << "The only config that can be changed on the fly for the AutoBatching the is the "
<< CONFIG_KEY(AUTO_BATCH_TIMEOUT);
} else {
@ -664,8 +665,8 @@ InferenceEngine::Parameter AutoBatchExecutableNetwork::GetMetric(const std::stri
namespace {
std::map<std::string, std::string> mergeConfigs(std::map<std::string, std::string> config,
const std::map<std::string, std::string>& local) {
for (auto&& kvp : local) {
const std::map<std::string, std::string>& user_config) {
for (auto&& kvp : user_config) {
config[kvp.first] = kvp.second;
}
return config;
@ -690,41 +691,25 @@ DeviceInformation AutoBatchInferencePlugin::ParseBatchDevice(const std::string&
return {deviceName, {{}}, batch};
}
DeviceInformation AutoBatchInferencePlugin::ParseMetaDevice(const std::string& devicesBatchCfg,
const std::map<std::string, std::string>& config) const {
auto getDeviceConfig = [&](const DeviceName& deviceWithID) {
ov::DeviceIDParser deviceParser(deviceWithID);
std::string deviceName = deviceParser.get_device_name();
std::map<std::string, std::string> tconfig = mergeConfigs(_config, config);
// passthrough the cache dir to core->loadnetwork when underlying device does not support cache dir
auto deviceConfig = GetCore()->GetSupportedConfig(deviceWithID, tconfig);
if (tconfig.find(CONFIG_KEY(CACHE_DIR)) != tconfig.end() &&
deviceConfig.find(CONFIG_KEY(CACHE_DIR)) == deviceConfig.end()) {
auto tmpiter = tconfig.find(CONFIG_KEY(CACHE_DIR));
if (tmpiter != tconfig.end())
deviceConfig.insert({tmpiter->first, tmpiter->second});
}
return deviceConfig;
};
DeviceInformation AutoBatchInferencePlugin::ParseMetaDevice(
const std::string& devicesBatchCfg,
const std::map<std::string, std::string>& user_config) const {
auto metaDevice = ParseBatchDevice(devicesBatchCfg);
metaDevice.config = getDeviceConfig(metaDevice.deviceName);
metaDevice.config = GetCore()->GetSupportedConfig(metaDevice.deviceName, user_config);
auto cfg = config;
// check that no irrelevant config-keys left
for (auto k : config) {
for (auto k : user_config) {
const auto& name = k.first;
auto found_in_supported_cfg = std::find(supported_configKeys.begin(), supported_configKeys.end(), k.first);
auto found_in_device_cfg = metaDevice.config.find(k.first);
if (found_in_device_cfg == metaDevice.config.end() && found_in_supported_cfg == supported_configKeys.end()) {
if (metaDevice.config.find(name) == metaDevice.config.end() &&
!ov::util::contains(supported_configKeys, name)) {
IE_THROW() << "Unsupported config key: " << name;
}
}
return metaDevice;
}
RemoteContext::Ptr AutoBatchInferencePlugin::CreateContext(const InferenceEngine::ParamMap& config) {
auto cfg = config;
RemoteContext::Ptr AutoBatchInferencePlugin::CreateContext(const InferenceEngine::ParamMap& remote_properties) {
auto cfg = remote_properties;
auto it = cfg.find(CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG));
if (it == cfg.end())
it = cfg.find(ov::device::priorities.name());
@ -741,7 +726,7 @@ RemoteContext::Ptr AutoBatchInferencePlugin::CreateContext(const InferenceEngine
}
Parameter AutoBatchInferencePlugin::GetConfig(const std::string& name,
const std::map<std::string, Parameter>& options) const {
const std::map<std::string, Parameter>& user_options) const {
if (supported_configKeys.end() != std::find(supported_configKeys.begin(), supported_configKeys.end(), name)) {
auto it = _config.find(name);
if (it == _config.end()) {
@ -754,8 +739,8 @@ Parameter AutoBatchInferencePlugin::GetConfig(const std::string& name,
}
}
void AutoBatchInferencePlugin::CheckConfig(const std::map<std::string, std::string>& config) {
for (auto&& kvp : config) {
void AutoBatchInferencePlugin::CheckConfig(const std::map<std::string, std::string>& user_config) {
for (auto&& kvp : user_config) {
const auto name = kvp.first;
const auto val = kvp.second;
if (supported_configKeys.end() == std::find(supported_configKeys.begin(), supported_configKeys.end(), name))
@ -775,9 +760,9 @@ void AutoBatchInferencePlugin::CheckConfig(const std::map<std::string, std::stri
}
}
void AutoBatchInferencePlugin::SetConfig(const std::map<std::string, std::string>& config) {
CheckConfig(config);
for (auto&& kvp : config) {
void AutoBatchInferencePlugin::SetConfig(const std::map<std::string, std::string>& user_config) {
CheckConfig(user_config);
for (auto&& kvp : user_config) {
_config[kvp.first] = kvp.second;
}
}
@ -792,7 +777,7 @@ AutoBatchInferencePlugin::AutoBatchInferencePlugin() {
InferenceEngine::Parameter AutoBatchInferencePlugin::GetMetric(
const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& options) const {
const std::map<std::string, InferenceEngine::Parameter>& user_options) const {
if (name == METRIC_KEY(SUPPORTED_METRICS)) {
std::vector<std::string> metrics;
metrics.push_back(METRIC_KEY(SUPPORTED_METRICS));
@ -810,26 +795,26 @@ InferenceEngine::Parameter AutoBatchInferencePlugin::GetMetric(
IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadExeNetworkImpl(
const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) {
return LoadNetworkImpl(network, nullptr, config);
const std::map<std::string, std::string>& user_config) {
return LoadNetworkImpl(network, nullptr, user_config);
}
InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadNetworkImpl(
const InferenceEngine::CNNNetwork& network,
const std::shared_ptr<InferenceEngine::RemoteContext> ctx,
const std::map<std::string, std::string>& config) {
const std::map<std::string, std::string>& user_config) {
auto core = GetCore();
if (core == nullptr) {
IE_THROW() << "Please, work with Auto-Batching device via InferencEngine::Core object";
}
auto fullConfig = mergeConfigs(_config, config);
auto fullConfig = mergeConfigs(_config, user_config);
auto device_batch = fullConfig.find(CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG));
if (device_batch == fullConfig.end())
device_batch = fullConfig.find(ov::device::priorities.name());
if (device_batch == fullConfig.end()) {
IE_THROW() << "KEY_AUTO_BATCH key is not set for BATCH device";
}
auto metaDevice = ParseMetaDevice(device_batch->second, fullConfig);
auto metaDevice = ParseMetaDevice(device_batch->second, user_config);
const auto& deviceName = metaDevice.deviceName;
const auto& deviceConfig = metaDevice.config;
auto deviceConfigNoAutoBatch = deviceConfig;
@ -915,8 +900,8 @@ InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadN
auto optBatchSize = core->GetMetric(deviceName, METRIC_KEY(OPTIMAL_BATCH_SIZE), options).as<unsigned int>();
auto res = core->GetConfig(deviceName, CONFIG_KEY(PERFORMANCE_HINT_NUM_REQUESTS)).as<std::string>();
requests = PerfHintsConfig::CheckPerformanceHintRequestValue(res);
const auto& reqs = config.find(CONFIG_KEY(PERFORMANCE_HINT_NUM_REQUESTS));
if (reqs != config.end())
const auto& reqs = user_config.find(CONFIG_KEY(PERFORMANCE_HINT_NUM_REQUESTS));
if (reqs != user_config.end())
requests = static_cast<unsigned int>(PerfHintsConfig::CheckPerformanceHintRequestValue(reqs->second));
if (requests)
optBatchSize = std::max(1u, std::min(requests, optBatchSize));
@ -985,17 +970,17 @@ InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadN
InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadExeNetworkImpl(
const InferenceEngine::CNNNetwork& network,
const std::shared_ptr<InferenceEngine::RemoteContext>& context,
const std::map<std::string, std::string>& config) {
return LoadNetworkImpl(network, context, config);
const std::map<std::string, std::string>& user_config) {
return LoadNetworkImpl(network, context, user_config);
}
InferenceEngine::QueryNetworkResult AutoBatchInferencePlugin::QueryNetwork(
const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config) const {
const std::map<std::string, std::string>& user_config) const {
auto core = GetCore();
if (!core)
return InferenceEngine::QueryNetworkResult();
auto cfg = config;
auto cfg = user_config;
for (auto c : cfg) {
if (c.first == CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) || c.first == ov::device::priorities.name()) {
auto val = c.second;

View File

@ -59,22 +59,30 @@ template <typename T>
using NodeMap = std::unordered_map<ngraph::Node*, T>;
HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& network,
const Engine::Configs& config,
const Configs& user_config,
Engine* plugin)
: InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr,
std::make_shared<InferenceEngine::ImmediateExecutor>()),
_heteroPlugin{plugin},
_name{network.getName()},
_config{config} {
_hetero_config{},
_device_config{} {
auto function = network.getFunction();
IE_ASSERT(function != nullptr);
auto clonedFunction = ngraph::clone_function(*function);
// hetero_config, device_config and user_config are unchanged global and local configs set by user
// we need to create _hetero_config and _device_config based on them, which will
// contain only hetero (_hetero_config) and only device (_device_config) properties
auto parsed_config = _heteroPlugin->MergeConfigs(user_config);
_hetero_config = parsed_config.hetero_config;
_device_config = parsed_config.device_config;
bool dumpDotFile = false;
if (std::getenv("OPENVINO_HETERO_VISUALIZE")) {
dumpDotFile = true;
} else {
auto itDumpDotFile = _config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT));
dumpDotFile = itDumpDotFile != _config.end() ? (itDumpDotFile->second == YES) : false;
auto itDumpDotFile = _hetero_config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT));
dumpDotFile = itDumpDotFile != _hetero_config.end() ? (itDumpDotFile->second == YES) : false;
}
QueryNetworkResult queryNetworkResult;
@ -92,7 +100,9 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo
}
if (queryNetworkResult.supportedLayersMap.empty()) {
queryNetworkResult = _heteroPlugin->QueryNetwork(network, _config);
// here we need to bypass unchanged / unparsed user-set configuration
// because it can contain TARGET_FALLBACK / ov::device::priorities
queryNetworkResult = _heteroPlugin->QueryNetwork(network, user_config);
}
using Input = ngraph::Input<ngraph::Node>;
@ -434,23 +444,30 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo
++id;
}
for (auto&& network : _networks) {
auto metaDevices = _heteroPlugin->GetDevicePlugins(network._device, _config);
auto metaDevices = _heteroPlugin->GetDevicePlugins(network._device, _device_config);
auto config = metaDevices[network._device];
// disable caching for subgraphs, because the whole HERERO model is cached
config[ov::cache_dir.name()] = "";
// disable caching for subgraphs, because the whole HETERO model is cached
auto device_config = metaDevices[network._device];
device_config[ov::cache_dir.name()] = "";
network._network = _heteroPlugin->GetCore()->LoadNetwork(network._clonedNetwork, network._device, config);
network._network =
_heteroPlugin->GetCore()->LoadNetwork(network._clonedNetwork, network._device, device_config);
}
}
HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& heteroModel,
const std::map<std::string, std::string>& configs,
const Configs& user_config,
Engine* heteroPlugin)
: _heteroPlugin(heteroPlugin) {
: _heteroPlugin(heteroPlugin),
_hetero_config{},
_device_config{} {
std::string heteroXmlStr;
std::getline(heteroModel, heteroXmlStr);
auto parsed_config = _heteroPlugin->MergeConfigs(user_config);
_hetero_config = parsed_config.hetero_config;
_device_config = parsed_config.device_config;
pugi::xml_document heteroXmlDoc;
pugi::xml_parse_result res = heteroXmlDoc.load_string(heteroXmlStr.c_str());
@ -471,10 +488,14 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& heteroModel,
pugi::xml_node outputsNode = heteroNode.child("outputs");
FOREACH_CHILD (outputNode, outputsNode, "output") { networkOutputs.insert(GetStrAttr(outputNode, "name")); }
Engine::Configs importedConfigs;
auto configsNode = heteroNode.child("configs");
FOREACH_CHILD (configNode, configsNode, "config") {
importedConfigs.emplace(GetStrAttr(configNode, "key"), GetStrAttr(configNode, "value"));
auto heteroConfigsNode = heteroNode.child("hetero_config");
FOREACH_CHILD (heteroConfigNode, heteroConfigsNode, "config") {
_hetero_config.emplace(GetStrAttr(heteroConfigNode, "key"), GetStrAttr(heteroConfigNode, "value"));
}
auto deviceConfigsNode = heteroNode.child("device_config");
FOREACH_CHILD (deviceConfigNode, deviceConfigsNode, "config") {
_device_config.emplace(GetStrAttr(deviceConfigNode, "key"), GetStrAttr(deviceConfigNode, "value"));
}
auto blobNamesNode = heteroNode.child("blob_names_map");
@ -482,16 +503,12 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& heteroModel,
_blobNameMap.emplace(GetStrAttr(blobNameNode, "key"), GetStrAttr(blobNameNode, "value"));
}
for (auto&& config : configs) {
importedConfigs[config.first] = config.second;
}
std::vector<NetworkDesc> descs;
pugi::xml_node subnetworksNode = heteroNode.child("subnetworks");
FOREACH_CHILD (subnetworkNode, subnetworksNode, "subnetwork") {
auto deviceName = GetStrAttr(subnetworkNode, "device");
auto metaDevices = _heteroPlugin->GetDevicePlugins(deviceName, importedConfigs);
auto metaDevices = _heteroPlugin->GetDevicePlugins(deviceName, _device_config);
assert(metaDevices.size() == 1);
auto& loadConfig = metaDevices[deviceName];
@ -593,7 +610,6 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(std::istream& heteroModel,
FOREACH_CHILD (resultNode, resultsNode, "result") { _results.emplace_back(parseNode(resultNode, false)); }
// save state
this->_config = importedConfigs;
this->_networks = std::move(descs);
this->SetPointerToPlugin(_heteroPlugin->shared_from_this());
}
@ -680,11 +696,18 @@ void HeteroExecutableNetwork::Export(std::ostream& heteroModel) {
}
}
auto configsNode = heteroNode.append_child("configs");
for (auto&& config : _config) {
auto configNode = configsNode.append_child("config");
configNode.append_attribute("key").set_value(config.first.c_str());
configNode.append_attribute("value").set_value(config.second.c_str());
auto heteroConfigsNode = heteroNode.append_child("hetero_config");
for (auto&& config : _hetero_config) {
auto heteroConfigNode = heteroConfigsNode.append_child("config");
heteroConfigNode.append_attribute("key").set_value(config.first.c_str());
heteroConfigNode.append_attribute("value").set_value(config.second.c_str());
}
auto deviceConfigsNode = heteroNode.append_child("device_config");
for (auto&& config : _device_config) {
auto deviceConfigNode = deviceConfigsNode.append_child("config");
deviceConfigNode.append_attribute("key").set_value(config.first.c_str());
deviceConfigNode.append_attribute("value").set_value(config.second.c_str());
}
auto blobNamesNode = heteroNode.append_child("blob_names_map");
@ -762,13 +785,17 @@ IInferRequestInternal::Ptr HeteroExecutableNetwork::CreateInferRequest() {
InferenceEngine::Parameter HeteroExecutableNetwork::GetConfig(const std::string& name) const {
InferenceEngine::Parameter result;
if (name == "TARGET_FALLBACK" || name == ov::device::priorities.name()) {
result = _heteroPlugin->GetTargetFallback(_config, false);
} else if (name == HETERO_CONFIG_KEY(DUMP_GRAPH_DOT) || name == CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)) {
auto it = _config.find(name);
IE_ASSERT(it != _config.end());
result = it->second == YES ? true : false;
result = _heteroPlugin->GetTargetFallback(_hetero_config, false);
} else if (name == HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)) {
auto it = _hetero_config.find(name);
IE_ASSERT(it != _hetero_config.end());
result = it->second == YES;
} else if (name == CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)) {
auto it = _device_config.find(name);
IE_ASSERT(it != _device_config.end());
result = it->second == YES;
} else {
IE_THROW() << "Unsupported ExecutableNetwork config key: " << name;
IE_THROW() << "Unsupported Hetero ExecutableNetwork config key: " << name;
}
return result;
@ -833,6 +860,6 @@ InferenceEngine::Parameter HeteroExecutableNetwork::GetMetric(const std::string&
}
return decltype(ov::execution_devices)::value_type{exeDevices};
} else {
IE_THROW() << "Unsupported ExecutableNetwork metric key: " << name;
IE_THROW() << "Unsupported Hetero ExecutableNetwork metric key: " << name;
}
}

View File

@ -21,11 +21,10 @@
#include "async_infer_request.hpp"
#include "ie_icore.hpp"
#include "infer_request.hpp"
#include "plugin.hpp"
namespace HeteroPlugin {
class Engine;
/**
* @class ExecutableNetwork
* @brief Interface of executable network
@ -34,22 +33,13 @@ class HeteroExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadS
public:
typedef std::shared_ptr<HeteroExecutableNetwork> Ptr;
/**
* @brief constructor
*/
HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& network,
const std::map<std::string, std::string>& config,
Engine* plugin);
/**
* @brief Import from opened file constructor
*/
HeteroExecutableNetwork(std::istream& heteroModel,
const std::map<std::string, std::string>& config,
Engine* plugin);
HeteroExecutableNetwork(const InferenceEngine::CNNNetwork& network, const Configs& user_config, Engine* plugin);
HeteroExecutableNetwork(std::istream& heteroModel, const Configs& user_config, Engine* plugin);
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(
InferenceEngine::InputsDataMap networkInputs,
InferenceEngine::OutputsDataMap networkOutputs) override;
InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(
const std::vector<std::shared_ptr<const ov::Node>>& inputs,
const std::vector<std::shared_ptr<const ov::Node>>& outputs) override;
@ -63,9 +53,6 @@ public:
void Export(std::ostream& modelFile) override;
private:
void InitCNNImpl(const InferenceEngine::CNNNetwork& network);
void InitNgraph(const InferenceEngine::CNNNetwork& network);
struct NetworkDesc {
std::string _device;
InferenceEngine::CNNNetwork _clonedNetwork;
@ -75,7 +62,8 @@ private:
std::vector<NetworkDesc> _networks;
Engine* _heteroPlugin;
std::string _name;
std::map<std::string, std::string> _config;
Configs _hetero_config;
Configs _device_config;
std::unordered_map<std::string, std::string> _blobNameMap;
};

View File

@ -16,6 +16,7 @@
#include "ie_plugin_config.hpp"
#include "executable_network.hpp"
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "openvino/util/common_util.hpp"
#include "openvino/runtime/properties.hpp"
#include "internal_properties.hpp"
#include "openvino/util/common_util.hpp"
@ -26,45 +27,120 @@ using namespace InferenceEngine::PluginConfigParams;
using namespace InferenceEngine::HeteroConfigParams;
using namespace HeteroPlugin;
Engine::Engine() {
_pluginName = "HETERO";
_config[KEY_EXCLUSIVE_ASYNC_REQUESTS] = YES;
_config[HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)] = NO;
}
namespace {
Engine::Configs mergeConfigs(Engine::Configs config, const Engine::Configs& local) {
for (auto&& kvp : local) {
config[kvp.first] = kvp.second;
}
return config;
}
Engine::Configs mergeConfigs(Engine::Configs config, const ov::AnyMap& local) {
for (auto&& kvp : local) {
config[kvp.first] = kvp.second.as<std::string>();
}
return config;
}
const std::vector<std::string>& getSupportedConfigKeys() {
const std::vector<std::string>& getHeteroSupportedConfigKeys() {
static const std::vector<std::string> supported_configKeys = {HETERO_CONFIG_KEY(DUMP_GRAPH_DOT),
"TARGET_FALLBACK",
ov::device::priorities.name(),
CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)};
ov::device::priorities.name()};
return supported_configKeys;
}
const std::vector<std::string>& getHeteroDeviceSupportedConfigKeys() {
static const std::vector<std::string> supported_configKeys = {CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)};
return supported_configKeys;
}
std::vector<std::string> getSupportedConfigKeys() {
std::vector<std::string> supported_configKeys = getHeteroSupportedConfigKeys();
for (auto&& key : getHeteroDeviceSupportedConfigKeys())
supported_configKeys.emplace_back(key);
return supported_configKeys;
}
ov::AnyMap any_copy(const Configs& params) {
ov::AnyMap result;
for (auto&& value : params) {
result.emplace(value.first, value.second);
}
return result;
}
Configs any_copy(const ov::AnyMap& params) {
Configs result;
for (auto&& value : params) {
result.emplace(value.first, value.second.as<std::string>());
}
return result;
}
ov::AnyMap clone_map(const ov::AnyMap& m) {
ov::AnyMap rm;
for (auto&& kvp : m) {
rm[kvp.first] = kvp.second.is<ov::AnyMap>() ? ov::Any(clone_map(kvp.second.as<ov::AnyMap>())) : kvp.second;
}
return rm;
}
} // namespace
std::string Engine::GetTargetFallback(const Engine::Configs& config, bool raise_exception) const {
auto it = config.find("TARGET_FALLBACK");
if (it == config.end()) {
it = config.find(ov::device::priorities.name());
Engine::Engine() {
_pluginName = "HETERO";
_config[HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)] = NO;
_device_config[CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)] = YES;
}
ParsedConfig<ov::AnyMap> Engine::MergeConfigs(const ov::AnyMap& user_config) const {
auto device_config = clone_map(user_config);
auto hetero_config = _config;
// after API 1.0 removal, replace with the loop over getHeteroSupportedConfigKeys()
{
auto try_merge_property = [&](const std::string& property_name) -> bool {
auto property_it = device_config.find(property_name);
if (property_it != device_config.end()) {
// migrate HETERO property to hetero_config
hetero_config[property_it->first] = property_it->second.as<std::string>();
// and erase it from device_config
device_config.erase(property_it->first);
return true;
}
return false;
};
try_merge_property(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT));
// if we have not found TARGET_FALLBACK in user_config, let's try to find device::priorities
// Note: we can have conflicts here like
// core.set_property(HETERO, TARGET_FALLBACK=MULTI,CPU)
// core.compile_model(HETERO, DEVICE_PRIORITIES=GPU.0,GPU.1)
// so, we need to check whether TARGET_FALLBACK was set before in set_property
// This check can be removed after API 1.0 is removed
if (!try_merge_property("TARGET_FALLBACK") && hetero_config.find("TARGET_FALLBACK") == hetero_config.end()) {
try_merge_property(ov::device::priorities.name());
}
}
if (it == config.end()) {
// merge device_config settings
for (auto&& key : getHeteroDeviceSupportedConfigKeys()) {
auto user_config_it = user_config.find(key);
if (user_config_it != user_config.end()) {
device_config[user_config_it->first] = user_config_it->second;
}
}
return {hetero_config, device_config};
}
ParsedConfig<Configs> Engine::MergeConfigs(const Configs& user_config) const {
auto parsed_config = MergeConfigs(any_copy(user_config));
return {parsed_config.hetero_config, any_copy(parsed_config.device_config)};
}
std::string Engine::GetTargetFallback(const Configs& user_config, bool raise_exception) const {
return GetTargetFallback(any_copy(user_config), raise_exception);
}
std::string Engine::GetTargetFallback(const ov::AnyMap& user_config, bool raise_exception) const {
auto hetero_config = MergeConfigs(user_config).hetero_config;
auto it = hetero_config.find("TARGET_FALLBACK");
if (it == hetero_config.end()) {
it = hetero_config.find(ov::device::priorities.name());
}
if (it == hetero_config.end()) {
if (raise_exception)
IE_THROW() << "The '" << ov::device::priorities.name()
<< "' option was not defined for heterogeneous plugin";
@ -74,77 +150,74 @@ std::string Engine::GetTargetFallback(const Engine::Configs& config, bool raise_
}
InferenceEngine::IExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network,
const Configs& config) {
const Configs& user_config) {
if (GetCore() == nullptr) {
IE_THROW() << "Please, work with HETERO device via InferencEngine::Core object";
}
auto tconfig = mergeConfigs(_config, config);
std::string fallbackDevicesStr = GetTargetFallback(tconfig);
DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, tconfig);
auto function = network.getFunction();
if (function == nullptr) {
IE_THROW() << "HETERO device supports just ngraph network representation";
if (network.getFunction() == nullptr) {
IE_THROW() << "HETERO device supports only nGraph model representation";
}
return std::make_shared<HeteroExecutableNetwork>(network, mergeConfigs(_config, config), this);
return std::make_shared<HeteroExecutableNetwork>(network, user_config, this);
}
InferenceEngine::IExecutableNetworkInternal::Ptr Engine::ImportNetwork(
std::istream& heteroModel,
const std::map<std::string, std::string>& config) {
return std::make_shared<HeteroExecutableNetwork>(heteroModel, mergeConfigs(_config, config), this);
const std::map<std::string, std::string>& user_config) {
return std::make_shared<HeteroExecutableNetwork>(heteroModel, user_config, this);
}
Engine::DeviceMetaInformationMap Engine::GetDevicePlugins(const std::string& targetFallback,
const Configs& localConfig) const {
const Configs& device_config) const {
auto fallbackDevices = ov::DeviceIDParser::get_hetero_devices(targetFallback);
Engine::DeviceMetaInformationMap metaDevices;
for (auto&& deviceName : fallbackDevices) {
auto itPlugin = metaDevices.find(deviceName);
if (metaDevices.end() == itPlugin) {
metaDevices[deviceName] = GetCore()->GetSupportedConfig(deviceName, mergeConfigs(_config, localConfig));
metaDevices[deviceName] = GetCore()->GetSupportedConfig(deviceName, device_config);
}
}
return metaDevices;
}
void Engine::SetConfig(const Configs& configs) {
for (auto&& kvp : configs) {
void Engine::SetConfig(const Configs& user_config) {
for (auto&& kvp : user_config) {
const auto& name = kvp.first;
const auto& supported_configKeys = getSupportedConfigKeys();
if (supported_configKeys.end() != std::find(supported_configKeys.begin(), supported_configKeys.end(), name))
if (ov::util::contains(getHeteroSupportedConfigKeys(), name))
_config[name] = kvp.second;
else if (ov::util::contains(getHeteroDeviceSupportedConfigKeys(), name))
_device_config[name] = kvp.second;
else
IE_THROW() << "Unsupported config key: " << name;
IE_THROW() << "Unsupported HETERO config key: " << name;
}
}
QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const Configs& config) const {
QueryNetworkResult qr;
QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const Configs& user_config) const {
if (GetCore() == nullptr) {
IE_THROW() << "Please, work with HETERO device via InferencEngine::Core object";
IE_THROW() << "Please, work with HETERO device via ov::Core object";
}
auto tconfig = mergeConfigs(_config, config);
std::string fallbackDevicesStr = GetTargetFallback(tconfig);
DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, tconfig);
auto parsed_config = MergeConfigs(user_config);
std::string fallbackDevicesStr = GetTargetFallback(parsed_config.hetero_config);
DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, parsed_config.device_config);
auto function = network.getFunction();
if (function == nullptr) {
IE_THROW() << "HETERO device supports just ngraph network representation";
IE_THROW() << "HETERO device supports just nGraph model representation";
}
std::map<std::string, QueryNetworkResult> queryResults;
for (auto&& metaDevice : metaDevices) {
auto& deviceName = metaDevice.first;
queryResults[deviceName] = GetCore()->QueryNetwork(network, deviceName, metaDevice.second);
const auto& deviceName = metaDevice.first;
const auto& device_config = metaDevice.second;
queryResults[deviceName] = GetCore()->QueryNetwork(network, deviceName, device_config);
}
// WARNING: Here is devices with user set priority
auto fallbackDevices = ov::DeviceIDParser::get_hetero_devices(fallbackDevicesStr);
QueryNetworkResult qr;
for (auto&& deviceName : fallbackDevices) {
for (auto&& layerQueryResult : queryResults[deviceName].supportedLayersMap) {
qr.supportedLayersMap.emplace(layerQueryResult);
@ -157,7 +230,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const Configs
return qr;
}
Parameter Engine::GetMetric(const std::string& name, const std::map<std::string, Parameter>& options) const {
Parameter Engine::GetMetric(const std::string& name, const ov::AnyMap& user_options) const {
if (ov::supported_properties == name) {
return decltype(ov::supported_properties)::value_type{
ov::PropertyName{ov::supported_properties.name(), ov::PropertyMutability::RO},
@ -168,8 +241,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
} else if (ov::caching_properties == name) {
return decltype(ov::caching_properties)::value_type{ov::hetero::caching_device_properties.name()};
} else if (ov::hetero::caching_device_properties == name) {
auto tconfig = mergeConfigs(_config, options);
std::string targetFallback = GetTargetFallback(tconfig);
std::string targetFallback = GetTargetFallback(user_options);
return decltype(ov::hetero::caching_device_properties)::value_type{DeviceCachingProperties(targetFallback)};
} else if (METRIC_KEY(SUPPORTED_METRICS) == name) {
IE_SET_METRIC_RETURN(SUPPORTED_METRICS,
@ -188,7 +260,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
} else if (ov::device::capabilities == name) {
return decltype(ov::device::capabilities)::value_type{{ov::device::capability::EXPORT_IMPORT}};
} else {
IE_THROW() << "Unsupported metric key: " << name;
IE_THROW() << "Unsupported HETERO metric key: " << name;
}
}
@ -222,20 +294,25 @@ std::string Engine::DeviceCachingProperties(const std::string& targetFallback) c
return result.empty() ? "" : ov::Any(result).as<std::string>();
}
Parameter Engine::GetConfig(const std::string& name, const std::map<std::string, Parameter>& /*options*/) const {
Parameter Engine::GetConfig(const std::string& name, const ov::AnyMap& options) const {
if (name == HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)) {
auto it = _config.find(HETERO_CONFIG_KEY(DUMP_GRAPH_DOT));
auto it = _config.find(name);
IE_ASSERT(it != _config.end());
bool dump = it->second == YES;
return {dump};
} else if (name == ov::device::priorities) {
std::string targetFallback = GetTargetFallback(_config);
std::string targetFallback = GetTargetFallback(options);
auto priorities = ov::util::from_string(targetFallback, ov::device::priorities);
return decltype(ov::device::priorities)::value_type{priorities};
} else if (name == "TARGET_FALLBACK") {
return GetTargetFallback(_config);
return GetTargetFallback(options);
} else if (name == CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)) {
auto it = _device_config.find(name);
IE_ASSERT(it != _device_config.end());
bool exclusive_async = it->second == YES;
return {exclusive_async};
} else {
IE_THROW() << "Unsupported config key: " << name;
IE_THROW() << "Unsupported HETERO config key: " << name;
}
}

View File

@ -17,9 +17,16 @@
namespace HeteroPlugin {
using Configs = std::map<std::string, std::string>;
template <typename T>
struct ParsedConfig {
Configs hetero_config;
T device_config;
};
class Engine : public InferenceEngine::IInferencePlugin {
public:
using Configs = std::map<std::string, std::string>;
using DeviceMetaInformationMap = std::unordered_map<std::string, Configs>;
Engine();
@ -32,23 +39,25 @@ public:
InferenceEngine::QueryNetworkResult QueryNetwork(const InferenceEngine::CNNNetwork& network,
const Configs& config) const override;
InferenceEngine::Parameter GetMetric(
const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::Parameter GetMetric(const std::string& name, const ov::AnyMap& options) const override;
InferenceEngine::Parameter GetConfig(
const std::string& name,
const std::map<std::string, InferenceEngine::Parameter>& options) const override;
InferenceEngine::Parameter GetConfig(const std::string& name, const ov::AnyMap& options) const override;
InferenceEngine::IExecutableNetworkInternal::Ptr ImportNetwork(
std::istream& heteroModel,
const std::map<std::string, std::string>& config) override;
InferenceEngine::IExecutableNetworkInternal::Ptr ImportNetwork(std::istream& heteroModel,
const Configs& config) override;
DeviceMetaInformationMap GetDevicePlugins(const std::string& targetFallback, const Configs& localConfig) const;
std::string GetTargetFallback(const Engine::Configs& config, bool raise_exception = true) const;
std::string GetTargetFallback(const Configs& config, bool raise_exception = true) const;
std::string GetTargetFallback(const ov::AnyMap& config, bool raise_exception = true) const;
ParsedConfig<Configs> MergeConfigs(const Configs& user_config) const;
ParsedConfig<ov::AnyMap> MergeConfigs(const ov::AnyMap& user_config) const;
private:
std::string DeviceCachingProperties(const std::string& targetFallback) const;
Configs _device_config;
};
} // namespace HeteroPlugin