Added new tests that config set via ie.SetConfig or LoadNetwork is returned via exe_net.GetConfig and align plugin impl to that (#7627)

This commit is contained in:
Maxim Shevtsov 2021-09-24 10:03:24 +03:00 committed by GitHub
parent 95342de8c8
commit f2dda1bbca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 58 additions and 9 deletions

View File

@ -285,10 +285,13 @@ void Config::adjustKeyMapValues() {
else
key_config_map[PluginConfigParams::KEY_DYN_BATCH_ENABLED] = PluginConfigParams::NO;
if (nv12_two_inputs)
if (nv12_two_inputs) {
key_config_map[CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS] = PluginConfigParams::YES;
else
key_config_map[GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS] = PluginConfigParams::YES;
} else {
key_config_map[CLDNNConfigParams::KEY_CLDNN_NV12_TWO_INPUTS] = PluginConfigParams::NO;
key_config_map[GPUConfigParams::KEY_GPU_NV12_TWO_INPUTS] = PluginConfigParams::NO;
}
if (enable_fp16_for_quantized_models)
key_config_map[CLDNNConfigParams::KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS] = PluginConfigParams::YES;
@ -343,9 +346,9 @@ void Config::adjustKeyMapValues() {
key_config_map[GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING] = PluginConfigParams::YES;
else
key_config_map[GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING] = PluginConfigParams::NO;
key_config_map.insert({ PluginConfigParams::KEY_PERFORMANCE_HINT, perfHintsConfig.ovPerfHint });
key_config_map.insert({ PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS,
std::to_string(perfHintsConfig.ovPerfHintNumRequests) });
key_config_map[PluginConfigParams::KEY_PERFORMANCE_HINT]= perfHintsConfig.ovPerfHint;
key_config_map[PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS] =
std::to_string(perfHintsConfig.ovPerfHintNumRequests);
}
IE_SUPPRESS_DEPRECATED_END

View File

@ -571,7 +571,11 @@ std::map<std::string, std::string> clDNNEngine::ConvertPerfHintsToConfig(
config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(1);
} else if (mode_name == CONFIG_VALUE(THROUGHPUT)) {
config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = CONFIG_VALUE(GPU_THROUGHPUT_AUTO);
config[GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE] = std::to_string(1);
//checking throttling (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig)
const auto bInConfig = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() ||
config.find(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) != config.end();
if (!bInConfig && !throttlingSet)
config[GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE] = std::to_string(1);
}
}
}
@ -675,6 +679,8 @@ RemoteContext::Ptr clDNNEngine::GetDefaultContext(const ParamMap& params) {
void clDNNEngine::SetConfig(const std::map<std::string, std::string> &config) {
streamsSet = (config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) != config.end());
throttlingSet = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() ||
config.find(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) != config.end();
_impl->m_config.UpdateFromMap(config);
}

View File

@ -21,6 +21,7 @@ class clDNNEngine : public InferenceEngine::IInferencePlugin,
struct impl;
std::shared_ptr<impl> _impl;
bool streamsSet = false;
bool throttlingSet = false;
// key: device_id, value: cldnn device
std::map<std::string, cldnn::device::ptr> device_map;

View File

@ -517,8 +517,12 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std
num_streams = std::max(default_num_streams, num_streams_less_aggressive);
}
auto num_requests = config.find(PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS);
if (num_requests != config.end())
num_streams = std::min(num_streams, PerfHintsConfig::CheckPerformanceHintRequestValue(num_requests->second));
if (engConfig.perfHintsConfig.ovPerfHintNumRequests) // set thru SetConfig to the plugin
num_streams = std::min(engConfig.perfHintsConfig.ovPerfHintNumRequests,
engConfig.perfHintsConfig.ovPerfHintNumRequests);
if (num_requests != config.end()) // arrived with config to the LoadNetwork (and thus higher pri)
num_streams = std::min(num_streams,
PerfHintsConfig::CheckPerformanceHintRequestValue(num_requests->second));
config[PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS] = std::to_string(num_streams);
}
}

View File

@ -27,7 +27,12 @@ namespace {
{{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "8"}},
{{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::NO}},
{{InferenceEngine::PluginConfigParams::KEY_CPU_BIND_THREAD, InferenceEngine::PluginConfigParams::YES}},
{{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "10"}}
{{InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_LIMIT, "10"}},
// check that hints doesn't override customer value (now for streams and later for other config opts)
{{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "3"}},
{{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY},
{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "3"}},
};
const std::vector<std::map<std::string, std::string>> MultiConfigs = {

View File

@ -97,6 +97,13 @@ namespace {
{{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY}},
{{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY},
{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT_NUM_REQUESTS, "1"}},
// check that hints doesn't override customer value (now for streams/throttling and later for other config opts)
{{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT},
{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, "3"}},
{{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::LATENCY},
{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, "3"}},
{{InferenceEngine::PluginConfigParams::KEY_PERFORMANCE_HINT, InferenceEngine::PluginConfigParams::THROUGHPUT},
{InferenceEngine::GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE, "0"}},
};
IE_SUPPRESS_DEPRECATED_END

View File

@ -181,6 +181,29 @@ namespace BehaviorTestsDefinitions {
using CorrectConfigAPITests = BehaviorTestsUtils::BehaviorTestsBasic;
TEST_P(CorrectConfigAPITests, canLoadCorrectNetworkAndCheckConfig) {
InferenceEngine::CNNNetwork cnnNet(function);
auto execNet = ie->LoadNetwork(cnnNet, targetDevice, configuration);
for (const auto& configItem : configuration) {
InferenceEngine::Parameter param;
ASSERT_NO_THROW(param = execNet.GetConfig(configItem.first));
ASSERT_FALSE(param.empty());
ASSERT_EQ(param, InferenceEngine::Parameter(configItem.second));
}
}
TEST_P(CorrectConfigAPITests, canSetCorrectConfigLoadNetworkAndCheckConfig) {
InferenceEngine::CNNNetwork cnnNet(function);
ASSERT_NO_THROW(ie->SetConfig(configuration, targetDevice));
auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
for (const auto& configItem : configuration) {
InferenceEngine::Parameter param;
ASSERT_NO_THROW(param = execNet.GetConfig(configItem.first));
ASSERT_FALSE(param.empty());
ASSERT_EQ(param, InferenceEngine::Parameter(configItem.second));
}
}
TEST_P(CorrectConfigAPITests, CanSetExclusiveAsyncRequests) {
// Skip test according to plugin specific disabledTestPatterns() (if any)
SKIP_IF_CURRENT_TEST_IS_DISABLED()