diff --git a/docs/template_plugin/src/template_config.cpp b/docs/template_plugin/src/template_config.cpp index f10d6576671..e7ad8a473d0 100644 --- a/docs/template_plugin/src/template_config.cpp +++ b/docs/template_plugin/src/template_config.cpp @@ -40,7 +40,11 @@ Configuration::Configuration(const ConfigMap& config, const Configuration& defau } InferenceEngine::Parameter Configuration::Get(const std::string& name) const { - if (name == CONFIG_KEY(DEVICE_ID)) { + auto streamExecutorConfigKeys = _streamsExecutorConfig.SupportedKeys(); + if ((streamExecutorConfigKeys.end() != + std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), name))) { + return _streamsExecutorConfig.GetConfig(name); + } else if (name == CONFIG_KEY(DEVICE_ID)) { return {std::to_string(deviceId)}; } else if (name == CONFIG_KEY(PERF_COUNT)) { return {perfCount}; diff --git a/src/inference/dev_api/threading/ie_istreams_executor.hpp b/src/inference/dev_api/threading/ie_istreams_executor.hpp index 4882f240618..1ab1bb47d62 100644 --- a/src/inference/dev_api/threading/ie_istreams_executor.hpp +++ b/src/inference/dev_api/threading/ie_istreams_executor.hpp @@ -56,7 +56,7 @@ public: * @brief Supported Configuration keys * @return vector of supported configuration keys */ - std::vector SupportedKeys(); + std::vector SupportedKeys() const; /** * @brief Parses configuration key/value pair @@ -70,7 +70,7 @@ public: * @param key configuration key * @return configuration value wrapped into Parameter */ - Parameter GetConfig(const std::string& key); + Parameter GetConfig(const std::string& key) const; /** * @brief Create appropriate multithreaded configuration diff --git a/src/inference/src/threading/ie_istreams_executor.cpp b/src/inference/src/threading/ie_istreams_executor.cpp index e6bcdc2dad4..ca6bfe27ebf 100644 --- a/src/inference/src/threading/ie_istreams_executor.cpp +++ b/src/inference/src/threading/ie_istreams_executor.cpp @@ -15,16 +15,20 @@ #include "ie_parameter.hpp" #include "ie_plugin_config.hpp" #include "ie_system_conf.h" +#include "openvino/runtime/properties.hpp" namespace InferenceEngine { IStreamsExecutor::~IStreamsExecutor() {} -std::vector IStreamsExecutor::Config::SupportedKeys() { +std::vector IStreamsExecutor::Config::SupportedKeys() const { return { CONFIG_KEY(CPU_THROUGHPUT_STREAMS), CONFIG_KEY(CPU_BIND_THREAD), CONFIG_KEY(CPU_THREADS_NUM), CONFIG_KEY_INTERNAL(CPU_THREADS_PER_STREAM), + ov::streams::num.name(), + ov::inference_num_threads.name(), + ov::affinity.name(), }; } int IStreamsExecutor::Config::GetDefaultNumStreams() { @@ -59,6 +63,29 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri << ". Expected only YES(binds to cores) / NO(no binding) / NUMA(binds to NUMA nodes) / " "HYBRID_AWARE (let the runtime recognize and use the hybrid cores)"; } + } else if (key == ov::affinity) { + ov::Affinity affinity; + std::stringstream{value} >> affinity; + switch (affinity) { + case ov::Affinity::NONE: + _threadBindingType = ThreadBindingType::NONE; + break; + case ov::Affinity::CORE: { +#if (defined(__APPLE__) || defined(_WIN32)) + _threadBindingType = ThreadBindingType::NUMA; +#else + _threadBindingType = ThreadBindingType::CORES; +#endif + } break; + case ov::Affinity::NUMA: + _threadBindingType = ThreadBindingType::NUMA; + break; + case ov::Affinity::HYBRID_AWARE: + _threadBindingType = ThreadBindingType::HYBRID_AWARE; + break; + default: + OPENVINO_UNREACHABLE("Unsupported affinity type"); + } } else if (key == CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) { if (value == CONFIG_VALUE(CPU_THROUGHPUT_NUMA)) { _streams = static_cast(getAvailableNUMANodes().size()); @@ -80,7 +107,23 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri } _streams = val_i; } - } else if (key == CONFIG_KEY(CPU_THREADS_NUM)) { + } else if (key == ov::streams::num) { + int32_t streams = std::stoi(value); + if (streams == ov::streams::NUMA) { + _streams = static_cast(getAvailableNUMANodes().size()); + } else if (streams == ov::streams::AUTO) { + // bare minimum of streams (that evenly divides available number of cores) + _streams = GetDefaultNumStreams(); + } else if (streams >= 0) { + _streams = streams; + } else { + OPENVINO_UNREACHABLE("Wrong value for property key ", + ov::streams::num.name(), + ". Expected non negative numbers (#streams) or ", + "ov::streams::NUMA|ov::streams::AUTO, Got: ", + streams); + } + } else if (key == CONFIG_KEY(CPU_THREADS_NUM) || key == ov::inference_num_threads) { int val_i; try { val_i = std::stoi(value); @@ -111,26 +154,37 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri } } -Parameter IStreamsExecutor::Config::GetConfig(const std::string& key) { - if (key == CONFIG_KEY(CPU_BIND_THREAD)) { +Parameter IStreamsExecutor::Config::GetConfig(const std::string& key) const { + if (key == ov::affinity) { + switch (_threadBindingType) { + case IStreamsExecutor::ThreadBindingType::NONE: + return ov::Affinity::NONE; + case IStreamsExecutor::ThreadBindingType::CORES: + return ov::Affinity::CORE; + case IStreamsExecutor::ThreadBindingType::NUMA: + return ov::Affinity::NUMA; + case IStreamsExecutor::ThreadBindingType::HYBRID_AWARE: + return ov::Affinity::HYBRID_AWARE; + } + } else if (key == CONFIG_KEY(CPU_BIND_THREAD)) { switch (_threadBindingType) { case IStreamsExecutor::ThreadBindingType::NONE: return {CONFIG_VALUE(NO)}; - break; case IStreamsExecutor::ThreadBindingType::CORES: return {CONFIG_VALUE(YES)}; - break; case IStreamsExecutor::ThreadBindingType::NUMA: return {CONFIG_VALUE(NUMA)}; - break; case IStreamsExecutor::ThreadBindingType::HYBRID_AWARE: return {CONFIG_VALUE(HYBRID_AWARE)}; - break; } } else if (key == CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) { return {std::to_string(_streams)}; + } else if (key == ov::streams::num) { + return decltype(ov::streams::num)::value_type{_streams}; } else if (key == CONFIG_KEY(CPU_THREADS_NUM)) { return {std::to_string(_threads)}; + } else if (key == ov::inference_num_threads) { + return decltype(ov::inference_num_threads)::value_type{_threads}; } else if (key == CONFIG_KEY_INTERNAL(CPU_THREADS_PER_STREAM)) { return {std::to_string(_threadsPerStream)}; } else { diff --git a/src/tests/functional/inference_engine/task_executor_tests.cpp b/src/tests/functional/inference_engine/task_executor_tests.cpp index 33bc7f90724..ec8d247258b 100644 --- a/src/tests/functional/inference_engine/task_executor_tests.cpp +++ b/src/tests/functional/inference_engine/task_executor_tests.cpp @@ -189,16 +189,6 @@ TEST_P(ASyncTaskExecutorTests, runAndWaitDoesNotOwnTasks) { class StreamsExecutorConfigTest : public ::testing::Test {}; -TEST_F(StreamsExecutorConfigTest, streamsExecutorConfigReturnStrings) { - auto streams = getNumberOfCPUCores(); - auto threads = parallel_get_max_threads(); - auto config = IStreamsExecutor::Config::MakeDefaultMultiThreaded({"TestCPUStreamsExecutor", - streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE}); - for (auto&& key : config.SupportedKeys()) { - ASSERT_NO_THROW(config.GetConfig(key).as()); - } -} - static auto Executors = ::testing::Values( [] { auto streams = getNumberOfCPUCores();