migrate threading related interface from API 1.0 to 2.0 (#21167)

* migrate threading related interface from API 1.0 to 2.0

* fix code style

* fix @ref issue in doc

* change <> to quotation marks

* restore threading related interface API 1.0

* restore the changes of legacy code
This commit is contained in:
Sun Xiaoxia
2023-11-24 14:52:44 +08:00
committed by GitHub
parent cf0c141e12
commit b7edd5df69
14 changed files with 59 additions and 38 deletions

View File

@@ -6,16 +6,16 @@
# define IN_OV_COMPONENT
# define WAS_OV_LIBRARY_DEFINED
#endif
#include <threading/ie_cpu_streams_executor.hpp>
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
#ifdef WAS_OV_LIBRARY_DEFINED
# undef IN_OV_COMPONENT
# undef WAS_OV_LIBRARY_DEFINED
#endif
#include <memory>
#include <future>
#include <iostream>
#include <memory>
void example1() {
// ! [itask_executor:define_pipeline]
@@ -24,9 +24,12 @@ void example1() {
// When the promise is created we can get std::future to wait the result
auto future = promise->get_future();
// Rather simple task
InferenceEngine::Task task = [] {std::cout << "Some Output" << std::endl; };
ov::threading::Task task = [] {
std::cout << "Some Output" << std::endl;
};
// Create an executor
InferenceEngine::ITaskExecutor::Ptr taskExecutor = std::make_shared<InferenceEngine::CPUStreamsExecutor>();
ov::threading::ITaskExecutor::Ptr taskExecutor =
std::make_shared<ov::threading::CPUStreamsExecutor>(ov::threading::IStreamsExecutor::Config{});
if (taskExecutor == nullptr) {
// ProcessError(e);
return;

View File

@@ -27,6 +27,11 @@ namespace threading {
*/
class OPENVINO_RUNTIME_API CPUStreamsExecutor : public IStreamsExecutor {
public:
/**
* @brief A shared pointer to a CPUStreamsExecutor object
*/
using Ptr = std::shared_ptr<CPUStreamsExecutor>;
/**
* @brief Constructor
* @param config Stream executor parameters

View File

@@ -12,7 +12,7 @@
#include <memory>
#include <string>
#include "threading/ie_itask_executor.hpp"
#include "openvino/runtime/threading/itask_executor.hpp"
namespace ov {
namespace threading {
@@ -23,6 +23,11 @@ namespace threading {
*/
class ImmediateExecutor : public ITaskExecutor {
public:
/**
* @brief A shared pointer to a ImmediateExecutor object
*/
using Ptr = std::shared_ptr<ImmediateExecutor>;
/**
* @brief Destroys the object.
*/

View File

@@ -32,6 +32,11 @@ namespace threading {
*/
class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor {
public:
/**
* A shared pointer to IStreamsExecutor interface
*/
using Ptr = std::shared_ptr<IStreamsExecutor>;
/**
* @brief Defines inference thread binding type
*/

View File

@@ -46,6 +46,11 @@ run tasks in current thread.
*/
class OPENVINO_RUNTIME_API ITaskExecutor {
public:
/**
* A shared pointer to ITaskExecutor interface
*/
using Ptr = std::shared_ptr<ITaskExecutor>;
/**
* @brief Destroys the object.
*/

View File

@@ -16,7 +16,7 @@
#include "openvino/runtime/properties.hpp"
#include "openvino/runtime/threading/cpu_streams_info.hpp"
#include "openvino/util/log.hpp"
#include "threading/ie_parallel_custom_arena.hpp"
#include "parallel_custom_arena.hpp"
namespace ov {
namespace threading {

View File

@@ -10,7 +10,6 @@
#include <memory>
#include <mutex>
#include <string>
#include <threading/ie_executor_manager.hpp>
#include <vector>
#include "any_copy.hpp"
@@ -38,6 +37,7 @@
#include "openvino/runtime/compiled_model.hpp"
#include "openvino/runtime/core.hpp"
#include "openvino/runtime/device_id_parser.hpp"
#include "openvino/runtime/threading/executor_manager.hpp"
#include "openvino/util/common_util.hpp"
#include "openvino/util/file_util.hpp"
#include "openvino/util/shared_object.hpp"
@@ -391,7 +391,7 @@ Parameter Core::GetConfig(const std::string& deviceName, const std::string& name
}
if (name == CONFIG_KEY(FORCE_TBB_TERMINATE)) {
const auto flag = executorManager()->getTbbFlag();
const auto flag = ov::threading::executor_manager()->get_property(ov::force_tbb_terminate.name()).as<bool>();
return flag ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO);
}

View File

@@ -8,13 +8,14 @@
#include <future>
#include <ie_parallel.hpp>
#include <thread>
#include <threading/ie_cpu_streams_executor.hpp>
#include <threading/ie_immediate_executor.hpp>
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
#include "openvino/runtime/threading/immediate_executor.hpp"
using namespace ::testing;
using namespace std;
using namespace InferenceEngine;
using namespace InferenceEngine::details;
using namespace ov;
using namespace threading;
static constexpr const auto MAX_NUMBER_OF_TASKS_IN_QUEUE = 10;
@@ -210,7 +211,7 @@ TEST_P(ASyncTaskExecutorTests, runAndWaitDoesNotOwnTasks) {
useCount = sharedCounter.use_count();
}};
sharedCounter.reset();
taskExecutor->runAndWait(tasks);
taskExecutor->run_and_wait(tasks);
ASSERT_EQ(1, useCount);
}
@@ -218,7 +219,7 @@ class StreamsExecutorConfigTest : public ::testing::Test {};
static auto Executors = ::testing::Values(
[] {
auto streams = getNumberOfCPUCores();
auto streams = get_number_of_cpu_cores();
auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(
IStreamsExecutor::Config{"TestCPUStreamsExecutor",
@@ -227,7 +228,7 @@ static auto Executors = ::testing::Values(
IStreamsExecutor::ThreadBindingType::NONE});
},
[] {
auto streams = getNumberOfLogicalCPUCores(true);
auto streams = get_number_of_logical_cpu_cores(true);
auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(
IStreamsExecutor::Config{"TestCPUStreamsExecutor",
@@ -236,7 +237,7 @@ static auto Executors = ::testing::Values(
IStreamsExecutor::ThreadBindingType::NONE});
},
[] {
auto streams = getNumberOfLogicalCPUCores(false);
auto streams = get_number_of_logical_cpu_cores(false);
auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(
IStreamsExecutor::Config{"TestCPUStreamsExecutor",
@@ -252,7 +253,7 @@ INSTANTIATE_TEST_SUITE_P(TaskExecutorTests, TaskExecutorTests, Executors);
static auto AsyncExecutors = ::testing::Values(
[] {
auto streams = getNumberOfCPUCores();
auto streams = get_number_of_cpu_cores();
auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(
IStreamsExecutor::Config{"TestCPUStreamsExecutor",
@@ -261,7 +262,7 @@ static auto AsyncExecutors = ::testing::Values(
IStreamsExecutor::ThreadBindingType::NONE});
},
[] {
auto streams = getNumberOfLogicalCPUCores(true);
auto streams = get_number_of_logical_cpu_cores(true);
auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(
IStreamsExecutor::Config{"TestCPUStreamsExecutor",
@@ -270,7 +271,7 @@ static auto AsyncExecutors = ::testing::Values(
IStreamsExecutor::ThreadBindingType::NONE});
},
[] {
auto streams = getNumberOfLogicalCPUCores(false);
auto streams = get_number_of_logical_cpu_cores(false);
auto threads = parallel_get_max_threads();
return std::make_shared<CPUStreamsExecutor>(
IStreamsExecutor::Config{"TestCPUStreamsExecutor",

View File

@@ -13,7 +13,6 @@
#include <ngraph/opsets/opset1.hpp>
#include <transformations/utils/utils.hpp>
#include <threading/ie_executor_manager.hpp>
#include "openvino/runtime/auto/properties.hpp"
#include "openvino/runtime/device_id_parser.hpp"
#include "openvino/runtime/internal_properties.hpp"

View File

@@ -13,7 +13,7 @@
#include "openvino/core/type/element_type.hpp"
#include "openvino/runtime/intel_cpu/properties.hpp"
#include "serialize.h"
#include "threading/ie_executor_manager.hpp"
#include "openvino/runtime/threading/executor_manager.hpp"
#include "transformations/transformation_pipeline.h"
#define FIX_62820 0
#if FIX_62820 && ((IE_THREAD == IE_THREAD_TBB) || (IE_THREAD == IE_THREAD_TBB_AUTO))
@@ -22,7 +22,7 @@
#include "openvino/runtime/properties.hpp"
#include "openvino/util/common_util.hpp"
#include "threading/ie_cpu_streams_executor.hpp"
#include "openvino/runtime/threading/cpu_streams_executor.hpp"
#include "transformations/utils/utils.hpp"
#include <cpu/x64/cpu_isa_traits.hpp>

View File

@@ -21,7 +21,6 @@
#include "openvino/runtime/threading/executor_manager.hpp"
#include "performance_heuristics.hpp"
#include "serialize.h"
#include "threading/ie_executor_manager.hpp"
#include "transformations/transformation_pipeline.h"
#include "transformations/utils/utils.hpp"
#include "utils/denormals.hpp"

View File

@@ -9,7 +9,6 @@
#include <map>
#include <memory>
#include <string>
#include <threading/ie_executor_manager.hpp>
#include <vector>
#include "gna_infer_request.hpp"

View File

@@ -7,7 +7,7 @@
#include <vector>
#include "tests_common.hpp"
#include <threading/ie_executor_manager.hpp>
#include "openvino/runtime/threading/executor_manager.hpp"
#ifdef _WIN32
# ifndef NOMINMAX
@@ -65,11 +65,11 @@ void TestsCommon::SetUp() {
if (memsize != 0) {
std::cout << "\nMEM_USAGE=" << getVmSizeInKB() << "KB\n";
}
InferenceEngine::executorManager()->clear();
ov::threading::executor_manager()->clear();
}
void TestsCommon::TearDown() {
InferenceEngine::executorManager()->clear();
ov::threading::executor_manager()->clear();
}
/**

View File

@@ -4,7 +4,7 @@
#pragma once
#include "threading/ie_executor_manager.hpp"
#include "openvino/runtime/threading/executor_manager.hpp"
#include "base/behavior_test_utils.hpp"
@@ -79,30 +79,30 @@ protected:
};
TEST_P(InferRequestConfigTest, canSetExclusiveAsyncRequests) {
ASSERT_EQ(0ul, InferenceEngine::executorManager()->getExecutorsNumber());
ASSERT_EQ(0ul, ov::threading::executor_manager()->get_executors_number());
ASSERT_NO_THROW(createInferRequestWithConfig());
if (target_device.find(ov::test::utils::DEVICE_AUTO) == std::string::npos &&
target_device.find(ov::test::utils::DEVICE_MULTI) == std::string::npos &&
target_device.find(ov::test::utils::DEVICE_HETERO) == std::string::npos &&
target_device.find(ov::test::utils::DEVICE_BATCH) == std::string::npos) {
ASSERT_EQ(streamExecutorNumber, InferenceEngine::executorManager()->getExecutorsNumber());
ASSERT_EQ(streamExecutorNumber, ov::threading::executor_manager()->get_executors_number());
}
}
TEST_P(InferRequestConfigTest, withoutExclusiveAsyncRequests) {
ASSERT_EQ(0u, InferenceEngine::executorManager()->getExecutorsNumber());
ASSERT_EQ(0u, ov::threading::executor_manager()->get_executors_number());
ASSERT_NO_THROW(createInferRequestWithConfig());
if (target_device.find(ov::test::utils::DEVICE_AUTO) == std::string::npos &&
target_device.find(ov::test::utils::DEVICE_MULTI) == std::string::npos &&
target_device.find(ov::test::utils::DEVICE_HETERO) == std::string::npos &&
target_device.find(ov::test::utils::DEVICE_BATCH) == std::string::npos) {
ASSERT_EQ(streamExecutorNumber, InferenceEngine::executorManager()->getExecutorsNumber());
ASSERT_EQ(streamExecutorNumber, ov::threading::executor_manager()->get_executors_number());
}
}
TEST_P(InferRequestConfigTest, ReusableCPUStreamsExecutor) {
ASSERT_EQ(0u, InferenceEngine::executorManager()->getExecutorsNumber());
ASSERT_EQ(0u, InferenceEngine::executorManager()->getIdleCPUStreamsExecutorsNumber());
ASSERT_EQ(0u, ov::threading::executor_manager()->get_executors_number());
ASSERT_EQ(0u, ov::threading::executor_manager()->get_idle_cpu_streams_executors_number());
{
// Load config
@@ -118,13 +118,13 @@ TEST_P(InferRequestConfigTest, ReusableCPUStreamsExecutor) {
execNet = ie->LoadNetwork(cnnNet, target_device, config);
execNet.CreateInferRequest();
if (target_device == ov::test::utils::DEVICE_KEEMBAY) {
ASSERT_EQ(1u, InferenceEngine::executorManager()->getExecutorsNumber());
ASSERT_EQ(0u, InferenceEngine::executorManager()->getIdleCPUStreamsExecutorsNumber());
ASSERT_EQ(1u, ov::threading::executor_manager()->get_executors_number());
ASSERT_EQ(0u, ov::threading::executor_manager()->get_idle_cpu_streams_executors_number());
} else if ((target_device == ov::test::utils::DEVICE_AUTO) ||
(target_device == ov::test::utils::DEVICE_MULTI)) {
} else {
ASSERT_EQ(0u, InferenceEngine::executorManager()->getExecutorsNumber());
ASSERT_GE(2u, InferenceEngine::executorManager()->getIdleCPUStreamsExecutorsNumber());
ASSERT_EQ(0u, ov::threading::executor_manager()->get_executors_number());
ASSERT_GE(2u, ov::threading::executor_manager()->get_idle_cpu_streams_executors_number());
}
}
}