From 3eb1aa59de94145de02e776dc13bb2ac96e12b3b Mon Sep 17 00:00:00 2001 From: Vladimir Paramuzov Date: Fri, 24 Sep 2021 12:32:38 +0300 Subject: [PATCH] [MULTI] Fixed devices comparison to respect default device id (#7311) * [GPU] Fixed default device id * [MULTI] Fixed devices comparison to respect default device id --- .../src/cldnn_engine/cldnn_config.h | 2 +- .../src/inference_engine/src/ie_core.cpp | 9 ++++ .../multi_device_async_infer_request.cpp | 48 ++++++++++--------- .../multi_device_exec_network.cpp | 4 ++ .../multi_device_exec_network.hpp | 2 + .../src/multi_device/multi_device_plugin.cpp | 20 +++++++- inference-engine/src/plugin_api/ie_icore.hpp | 11 +++++ .../src/plugin_api/ie_performance_hints.hpp | 6 +-- .../behavior/config.cpp | 7 +++ .../multi/gpu_remote_blob_tests.cpp | 1 + .../plugin/shared/include/behavior/config.hpp | 21 ++++++++ .../cpp_interfaces/interface/mock_icore.hpp | 1 + 12 files changed, 105 insertions(+), 27 deletions(-) diff --git a/inference-engine/src/cldnn_engine/cldnn_config.h b/inference-engine/src/cldnn_engine/cldnn_config.h index 4400b6c295e..e3e4581cca1 100644 --- a/inference-engine/src/cldnn_engine/cldnn_config.h +++ b/inference-engine/src/cldnn_engine/cldnn_config.h @@ -30,7 +30,7 @@ struct Config { tuningConfig(), graph_dumps_dir(""), sources_dumps_dir(""), - device_id(""), + device_id("0"), kernels_cache_dir(""), n_threads(std::max(static_cast(1), std::thread::hardware_concurrency())), enable_loop_unrolling(true) { diff --git a/inference-engine/src/inference_engine/src/ie_core.cpp b/inference-engine/src/inference_engine/src/ie_core.cpp index 7eb8d320aeb..9f8f2699cf3 100644 --- a/inference-engine/src/inference_engine/src/ie_core.cpp +++ b/inference-engine/src/inference_engine/src/ie_core.cpp @@ -594,6 +594,15 @@ public: return copyParameterValue(GetCPPPluginByName(parsed._deviceName).get_metric(name, parsed._config)); } + ie::Parameter GetConfig(const std::string& deviceName, const std::string& name) const override { + auto parsed = parseDeviceNameIntoConfig(deviceName); + + // we need to return a copy of Parameter object which is created on Core side, + // not in InferenceEngine plugin side, which can be unloaded from Core in a parallel thread + // TODO: remove this WA after *-31417 is resolved + return copyParameterValue(GetCPPPluginByName(parsed._deviceName).get_config(name, parsed._config)); + } + /** * @brief Returns devices available for neural networks inference * diff --git a/inference-engine/src/multi_device/multi_device_async_infer_request.cpp b/inference-engine/src/multi_device/multi_device_async_infer_request.cpp index ae461d8fce5..a0f4a0776fe 100644 --- a/inference-engine/src/multi_device/multi_device_async_infer_request.cpp +++ b/inference-engine/src/multi_device/multi_device_async_infer_request.cpp @@ -9,6 +9,9 @@ #include #include "multi_device_async_infer_request.hpp" +#include +#include +#include namespace MultiDevicePlugin { using namespace InferenceEngine; @@ -35,31 +38,32 @@ MultiDeviceAsyncInferRequest::MultiDeviceAsyncInferRequest( _pipeline = { // if the request is coming with device-specific remote blobs make sure it is scheduled to the specific device only: { /*TaskExecutor*/ std::make_shared(), /*task*/ [this] { - // by default, no preferred device: - _multiDeviceExecutableNetwork->_thisPreferredDeviceName = ""; - // if any input is remote (e.g. was set with SetBlob), let' use the corresponding device - for (const auto &it : _multiDeviceExecutableNetwork->GetInputsInfo()) { - auto b = _inferRequest->GetBlob(it.first); - auto r = b->as(); - if (r) { - const auto name = r->getDeviceName(); - const auto res = std::find_if( - _multiDeviceExecutableNetwork->_devicePrioritiesInitial.cbegin(), - _multiDeviceExecutableNetwork->_devicePrioritiesInitial.cend(), - [&name](const MultiDevicePlugin::DeviceInformation& d){ - return d.deviceName == name; }); - if (_multiDeviceExecutableNetwork->_devicePrioritiesInitial.cend() == res) { - IE_THROW() << "None of the devices (for which current MULTI-device configuration was " - "initialized) supports a remote blob created on the device named " << name; + // by default, no preferred device: + _multiDeviceExecutableNetwork->_thisPreferredDeviceName = ""; + // if any input is remote (e.g. was set with SetBlob), let' use the corresponding device + for (const auto &it : _multiDeviceExecutableNetwork->GetInputsInfo()) { + auto b = _inferRequest->GetBlob(it.first); + auto r = b->as(); + if (r) { + const auto name = r->getDeviceName(); + const auto res = std::find_if( + _multiDeviceExecutableNetwork->_devicePrioritiesInitial.cbegin(), + _multiDeviceExecutableNetwork->_devicePrioritiesInitial.cend(), + [&name](const MultiDevicePlugin::DeviceInformation& d) { + return (d.defaultDeviceID.empty() ? d.deviceName : (d.deviceName + "." + d.defaultDeviceID)) == name; + }); + if (_multiDeviceExecutableNetwork->_devicePrioritiesInitial.cend() == res) { + IE_THROW() << "None of the devices (for which current MULTI-device configuration was " + "initialized) supports a remote blob created on the device named " << name; - } else { + } else { // it is ok to take the c_str() here (as pointed in the multi_device_exec_network.hpp we need to use const char*) // as the original strings are from the "persistent" vector (with the right lifetime) - _multiDeviceExecutableNetwork->_thisPreferredDeviceName = res->deviceName.c_str(); - break; - } - } - } + _multiDeviceExecutableNetwork->_thisPreferredDeviceName = res->deviceName.c_str(); + break; + } + } + } }}, // as the scheduling algo may select any device, this stage accepts the scheduling decision (actual workerRequest) // then sets the device-agnostic blobs to the actual (device-specific) request diff --git a/inference-engine/src/multi_device/multi_device_exec_network.cpp b/inference-engine/src/multi_device/multi_device_exec_network.cpp index 3a2a3673e14..778d1ff3f5e 100644 --- a/inference-engine/src/multi_device/multi_device_exec_network.cpp +++ b/inference-engine/src/multi_device/multi_device_exec_network.cpp @@ -372,6 +372,10 @@ std::shared_ptr MultiDeviceExecutableNetwork::Ge << " Current list of devices allowed via the DEVICE_PRIORITIES config: " << devices_names; } +std::shared_ptr MultiDeviceExecutableNetwork::GetCore() const { + return _plugin->GetCore(); +} + InferenceEngine::IInferRequestInternal::Ptr MultiDeviceExecutableNetwork::CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::OutputsDataMap networkOutputs) { auto num = _numRequestsCreated++; diff --git a/inference-engine/src/multi_device/multi_device_exec_network.hpp b/inference-engine/src/multi_device/multi_device_exec_network.hpp index aafbdc4819c..4a687377b34 100644 --- a/inference-engine/src/multi_device/multi_device_exec_network.hpp +++ b/inference-engine/src/multi_device/multi_device_exec_network.hpp @@ -36,6 +36,7 @@ struct DeviceInformation { DeviceName deviceName; std::map config; int numRequestsPerDevices; + std::string defaultDeviceID; }; template @@ -131,6 +132,7 @@ public: InferenceEngine::IInferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::OutputsDataMap networkOutputs) override; std::shared_ptr GetContext() const override; + std::shared_ptr GetCore() const; ~MultiDeviceExecutableNetwork() override; void ScheduleToWorkerInferRequest(InferenceEngine::Task, DeviceName preferred_device = ""); diff --git a/inference-engine/src/multi_device/multi_device_plugin.cpp b/inference-engine/src/multi_device/multi_device_plugin.cpp index b4f6e3aa49e..8017d1e07de 100644 --- a/inference-engine/src/multi_device/multi_device_plugin.cpp +++ b/inference-engine/src/multi_device/multi_device_plugin.cpp @@ -108,6 +108,19 @@ std::vector MultiDeviceInferencePlugin::ParseMetaDevices(cons return GetSupportedConfig(tconfig, deviceName); }; + auto getDefaultDeviceID = [this](std::string deviceName) -> std::string { + std::vector supportedMetrics = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS)); + if (std::find(supportedMetrics.begin(), supportedMetrics.end(), METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != supportedMetrics.end()) { + std::vector supportKeys = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + + if (std::find(supportKeys.begin(), supportKeys.end(), CONFIG_KEY(DEVICE_ID)) != supportKeys.end()) { + return GetCore()->GetConfig(deviceName, CONFIG_KEY(DEVICE_ID)).as(); + } + } + + return ""; + }; + for (auto && d : devicesWithRequests) { auto openingBracket = d.find_first_of('('); auto closingBracket = d.find_first_of(')', openingBracket); @@ -123,8 +136,13 @@ std::vector MultiDeviceInferencePlugin::ParseMetaDevices(cons } } + std::string defaultDeviceID = ""; + DeviceIDParser parsed{deviceName}; + if (parsed.getDeviceID().empty()) + defaultDeviceID = getDefaultDeviceID(deviceName); + // create meta device - metaDevices.push_back({ deviceName, getDeviceConfig(deviceName), numRequests }); + metaDevices.push_back({ deviceName, getDeviceConfig(deviceName), numRequests, defaultDeviceID }); } return metaDevices; diff --git a/inference-engine/src/plugin_api/ie_icore.hpp b/inference-engine/src/plugin_api/ie_icore.hpp index 3d4c16927cb..afce99109f5 100644 --- a/inference-engine/src/plugin_api/ie_icore.hpp +++ b/inference-engine/src/plugin_api/ie_icore.hpp @@ -118,6 +118,17 @@ public: */ virtual Parameter GetMetric(const std::string& deviceName, const std::string& name) const = 0; + /** + * @brief Gets configuration dedicated to device behaviour. + * + * The method is targeted to extract information which can be set via SetConfig method. + * + * @param deviceName - A name of a device to get a configuration value. + * @param name - config key. + * @return Value of config corresponding to config key. + */ + virtual Parameter GetConfig(const std::string& deviceName, const std::string& name) const = 0; + /** * @brief Returns devices available for neural networks inference * diff --git a/inference-engine/src/plugin_api/ie_performance_hints.hpp b/inference-engine/src/plugin_api/ie_performance_hints.hpp index 3904611375f..c9303f43818 100644 --- a/inference-engine/src/plugin_api/ie_performance_hints.hpp +++ b/inference-engine/src/plugin_api/ie_performance_hints.hpp @@ -72,7 +72,7 @@ struct PerfHintsConfig { * @return configuration value */ static std::string CheckPerformanceHintValue(const std::string& val) { - if (val == PluginConfigParams::LATENCY || val == PluginConfigParams::THROUGHPUT) + if (val == PluginConfigParams::LATENCY || val == PluginConfigParams::THROUGHPUT || val == "") return val; else IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_PERFORMANCE_HINT @@ -88,7 +88,7 @@ struct PerfHintsConfig { int val_i = -1; try { val_i = std::stoi(val); - if (val_i > 0) + if (val_i >= 0) return val_i; else throw std::logic_error("wrong val"); @@ -99,4 +99,4 @@ struct PerfHintsConfig { } } }; -} // namespace InferenceEngine \ No newline at end of file +} // namespace InferenceEngine diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp index e6219abd561..882572fead2 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/config.cpp @@ -132,6 +132,13 @@ namespace { ::testing::ValuesIn(conf)), CorrectConfigAPITests::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, DefaultValuesConfigTests, + ::testing::Combine( + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_GPU), + ::testing::ValuesIn(conf)), + CorrectConfigAPITests::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(smoke_GPU_BehaviorTests, CorrectConfigAPITests, ::testing::Combine( ::testing::ValuesIn(netPrecisions), diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp index 26e2655607d..f33a3844410 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/multi/gpu_remote_blob_tests.cpp @@ -10,6 +10,7 @@ const std::vector device_names_and_support_for_remote_blobs { {{GPU}, true}, // GPU via MULTI, + {{"GPU.0"}, true}, // GPU.0 via MULTI, #ifdef ENABLE_MKL_DNN {{GPU, CPU}, true}, // GPU+CPU {{CPU, GPU}, true}, // CPU+GPU diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp index 8887aaacbfe..fcf283b85d3 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp @@ -133,6 +133,27 @@ namespace BehaviorTestsDefinitions { ASSERT_EQ(std::find(supportedOptions.cbegin(), supportedOptions.cend(), key), supportedOptions.cend()); } + using DefaultValuesConfigTests = BehaviorTestsUtils::BehaviorTestsBasic; + + TEST_P(DefaultValuesConfigTests, CanSetDefaultValueBackToPlugin) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + InferenceEngine::CNNNetwork cnnNet(function); + InferenceEngine::Parameter metric; + ASSERT_NO_THROW(metric = ie->GetMetric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + std::vector keys = metric; + + for (auto& key : keys) { + InferenceEngine::Parameter configValue; + ASSERT_NO_THROW(configValue = ie->GetConfig(targetDevice, key)); + + ASSERT_NO_THROW(ie->SetConfig({{ key, configValue.as()}}, targetDevice)) + << "device=" << targetDevice << " " + << "config key=" << key << " " + << "value=" << configValue.as(); + } + } + using IncorrectConfigTests = BehaviorTestsUtils::BehaviorTestsBasic; TEST_P(IncorrectConfigTests, SetConfigWithIncorrectKey) { diff --git a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp index 009cdb22595..0154e0a7d61 100644 --- a/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp +++ b/inference-engine/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp @@ -30,6 +30,7 @@ public: const InferenceEngine::CNNNetwork&, const std::string&, const std::map&)); MOCK_CONST_METHOD2(GetMetric, InferenceEngine::Parameter(const std::string&, const std::string&)); + MOCK_CONST_METHOD2(GetConfig, InferenceEngine::Parameter(const std::string&, const std::string&)); MOCK_CONST_METHOD0(GetAvailableDevices, std::vector()); MOCK_CONST_METHOD1(DeviceSupportsImportExport, bool(const std::string&)); // NOLINT not a cast to bool