enable new property ov::hint::use_cpu_pinning (#16383)
* enable ov::hint::use_cpu_pinning
* update test case for comments
* update header file
* update header file
* Delete cpu_streams_calculation.hpp
* Revert "Delete cpu_streams_calculation.hpp"
This reverts commit a1074ca843
.
* update config name
* fix code styple issue
* update for merge conflict
This commit is contained in:
parent
e7c1cdf982
commit
86da15e621
@ -119,6 +119,14 @@ ov_property_key_affinity;
|
|||||||
OPENVINO_C_VAR(const char*)
|
OPENVINO_C_VAR(const char*)
|
||||||
ov_property_key_inference_num_threads;
|
ov_property_key_inference_num_threads;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Read-write property, it is high-level OpenVINO hint for using CPU pinning to bind CPU threads to processors
|
||||||
|
* during inference
|
||||||
|
* @ingroup ov_property_c_api
|
||||||
|
*/
|
||||||
|
OPENVINO_C_VAR(const char*)
|
||||||
|
ov_property_key_hint_use_cpu_pinning;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Read-write property, it is high-level OpenVINO hint for using hyper threading processors during CPU inference
|
* @brief Read-write property, it is high-level OpenVINO hint for using hyper threading processors during CPU inference
|
||||||
* @ingroup ov_property_c_api
|
* @ingroup ov_property_c_api
|
||||||
|
@ -23,6 +23,7 @@ const char* ov_property_key_num_streams = "NUM_STREAMS";
|
|||||||
const char* ov_property_key_affinity = "AFFINITY";
|
const char* ov_property_key_affinity = "AFFINITY";
|
||||||
const char* ov_property_key_inference_num_threads = "INFERENCE_NUM_THREADS";
|
const char* ov_property_key_inference_num_threads = "INFERENCE_NUM_THREADS";
|
||||||
const char* ov_property_key_hint_performance_mode = "PERFORMANCE_HINT";
|
const char* ov_property_key_hint_performance_mode = "PERFORMANCE_HINT";
|
||||||
|
const char* ov_property_key_hint_use_cpu_pinning = "USE_CPU_PINNING";
|
||||||
const char* ov_property_key_hint_scheduling_core_type = "SCHEDULING_CORE_TYPE";
|
const char* ov_property_key_hint_scheduling_core_type = "SCHEDULING_CORE_TYPE";
|
||||||
const char* ov_property_key_hint_use_hyper_threading = "USE_HYPER_THREADING";
|
const char* ov_property_key_hint_use_hyper_threading = "USE_HYPER_THREADING";
|
||||||
const char* ov_property_key_hint_inference_precision = "INFERENCE_PRECISION_HINT";
|
const char* ov_property_key_hint_inference_precision = "INFERENCE_PRECISION_HINT";
|
||||||
|
@ -260,6 +260,21 @@ TEST_P(ov_core_test, ov_core_set_property_enum_invalid) {
|
|||||||
EXPECT_STRNE(invalid_mode, ret);
|
EXPECT_STRNE(invalid_mode, ret);
|
||||||
ov_free(ret);
|
ov_free(ret);
|
||||||
|
|
||||||
|
const char* key_pin = ov_property_key_hint_use_cpu_pinning;
|
||||||
|
const char* val_pin = "YES";
|
||||||
|
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_pin, val_pin));
|
||||||
|
ret = nullptr;
|
||||||
|
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_pin, &ret));
|
||||||
|
EXPECT_STREQ(val_pin, ret);
|
||||||
|
ov_free(ret);
|
||||||
|
|
||||||
|
const char* invalid_val = "INVALID_VAL";
|
||||||
|
OV_EXPECT_NOT_OK(ov_core_set_property(core, device_name.c_str(), key_pin, invalid_val));
|
||||||
|
ret = nullptr;
|
||||||
|
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_pin, &ret));
|
||||||
|
EXPECT_STRNE(invalid_val, ret);
|
||||||
|
ov_free(ret);
|
||||||
|
|
||||||
const char* key_type = ov_property_key_hint_scheduling_core_type;
|
const char* key_type = ov_property_key_hint_scheduling_core_type;
|
||||||
const char* val_type = "PCORE_ONLY";
|
const char* val_type = "PCORE_ONLY";
|
||||||
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_type, val_type));
|
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_type, val_type));
|
||||||
@ -268,10 +283,11 @@ TEST_P(ov_core_test, ov_core_set_property_enum_invalid) {
|
|||||||
EXPECT_STREQ(val_type, ret);
|
EXPECT_STREQ(val_type, ret);
|
||||||
ov_free(ret);
|
ov_free(ret);
|
||||||
|
|
||||||
const char* invalid_val = "INVALID_VAL";
|
|
||||||
OV_EXPECT_NOT_OK(ov_core_set_property(core, device_name.c_str(), key_type, invalid_val));
|
OV_EXPECT_NOT_OK(ov_core_set_property(core, device_name.c_str(), key_type, invalid_val));
|
||||||
ret = nullptr;
|
ret = nullptr;
|
||||||
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_type, &ret));
|
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_type, &ret));
|
||||||
|
EXPECT_STRNE(invalid_val, ret);
|
||||||
|
ov_free(ret);
|
||||||
|
|
||||||
const char* key_ht = ov_property_key_hint_use_hyper_threading;
|
const char* key_ht = ov_property_key_hint_use_hyper_threading;
|
||||||
const char* val_ht = "YES";
|
const char* val_ht = "YES";
|
||||||
@ -284,7 +300,6 @@ TEST_P(ov_core_test, ov_core_set_property_enum_invalid) {
|
|||||||
OV_EXPECT_NOT_OK(ov_core_set_property(core, device_name.c_str(), key_ht, invalid_val));
|
OV_EXPECT_NOT_OK(ov_core_set_property(core, device_name.c_str(), key_ht, invalid_val));
|
||||||
ret = nullptr;
|
ret = nullptr;
|
||||||
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_ht, &ret));
|
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_ht, &ret));
|
||||||
|
|
||||||
EXPECT_STRNE(invalid_val, ret);
|
EXPECT_STRNE(invalid_val, ret);
|
||||||
ov_free(ret);
|
ov_free(ret);
|
||||||
|
|
||||||
@ -305,12 +320,21 @@ TEST_P(ov_core_test, ov_core_set_and_get_property_enum) {
|
|||||||
EXPECT_STREQ(affinity, ret);
|
EXPECT_STREQ(affinity, ret);
|
||||||
ov_free(ret);
|
ov_free(ret);
|
||||||
|
|
||||||
|
const char* key_pin = ov_property_key_hint_use_cpu_pinning;
|
||||||
|
const char* val_pin = "YES";
|
||||||
|
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_pin, val_pin));
|
||||||
|
ret = nullptr;
|
||||||
|
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_pin, &ret));
|
||||||
|
EXPECT_STREQ(val_pin, ret);
|
||||||
|
ov_free(ret);
|
||||||
|
|
||||||
const char* key_type = ov_property_key_hint_scheduling_core_type;
|
const char* key_type = ov_property_key_hint_scheduling_core_type;
|
||||||
const char* val_type = "PCORE_ONLY";
|
const char* val_type = "PCORE_ONLY";
|
||||||
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_type, val_type));
|
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_type, val_type));
|
||||||
ret = nullptr;
|
ret = nullptr;
|
||||||
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_type, &ret));
|
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_type, &ret));
|
||||||
EXPECT_STREQ(val_type, ret);
|
EXPECT_STREQ(val_type, ret);
|
||||||
|
ov_free(ret);
|
||||||
|
|
||||||
const char* key_ht = ov_property_key_hint_use_hyper_threading;
|
const char* key_ht = ov_property_key_hint_use_hyper_threading;
|
||||||
const char* val_ht = "YES";
|
const char* val_ht = "YES";
|
||||||
@ -318,7 +342,6 @@ TEST_P(ov_core_test, ov_core_set_and_get_property_enum) {
|
|||||||
ret = nullptr;
|
ret = nullptr;
|
||||||
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_ht, &ret));
|
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_ht, &ret));
|
||||||
EXPECT_STREQ(val_ht, ret);
|
EXPECT_STREQ(val_ht, ret);
|
||||||
|
|
||||||
ov_free(ret);
|
ov_free(ret);
|
||||||
|
|
||||||
ov_core_free(core);
|
ov_core_free(core);
|
||||||
|
@ -72,6 +72,7 @@ void regmodule_properties(py::module m) {
|
|||||||
wrap_property_RW(m_hint, ov::hint::inference_precision, "inference_precision");
|
wrap_property_RW(m_hint, ov::hint::inference_precision, "inference_precision");
|
||||||
wrap_property_RW(m_hint, ov::hint::model_priority, "model_priority");
|
wrap_property_RW(m_hint, ov::hint::model_priority, "model_priority");
|
||||||
wrap_property_RW(m_hint, ov::hint::performance_mode, "performance_mode");
|
wrap_property_RW(m_hint, ov::hint::performance_mode, "performance_mode");
|
||||||
|
wrap_property_RW(m_hint, ov::hint::use_cpu_pinning, "use_cpu_pinning");
|
||||||
wrap_property_RW(m_hint, ov::hint::scheduling_core_type, "scheduling_core_type");
|
wrap_property_RW(m_hint, ov::hint::scheduling_core_type, "scheduling_core_type");
|
||||||
wrap_property_RW(m_hint, ov::hint::use_hyper_threading, "use_hyper_threading");
|
wrap_property_RW(m_hint, ov::hint::use_hyper_threading, "use_hyper_threading");
|
||||||
wrap_property_RW(m_hint, ov::hint::execution_mode, "execution_mode");
|
wrap_property_RW(m_hint, ov::hint::execution_mode, "execution_mode");
|
||||||
|
@ -225,6 +225,16 @@ def test_properties_ro(ov_property_ro, expected_value):
|
|||||||
"PERFORMANCE_HINT",
|
"PERFORMANCE_HINT",
|
||||||
((properties.hint.PerformanceMode.UNDEFINED, properties.hint.PerformanceMode.UNDEFINED),),
|
((properties.hint.PerformanceMode.UNDEFINED, properties.hint.PerformanceMode.UNDEFINED),),
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
properties.hint.use_cpu_pinning,
|
||||||
|
"USE_CPU_PINNING",
|
||||||
|
(
|
||||||
|
(True, True),
|
||||||
|
(False, False),
|
||||||
|
(1, True),
|
||||||
|
(0, False),
|
||||||
|
),
|
||||||
|
),
|
||||||
(
|
(
|
||||||
properties.hint.scheduling_core_type,
|
properties.hint.scheduling_core_type,
|
||||||
"SCHEDULING_CORE_TYPE",
|
"SCHEDULING_CORE_TYPE",
|
||||||
@ -450,6 +460,7 @@ def test_single_property_setting(device):
|
|||||||
properties.affinity(properties.Affinity.NONE),
|
properties.affinity(properties.Affinity.NONE),
|
||||||
properties.hint.inference_precision(Type.f32),
|
properties.hint.inference_precision(Type.f32),
|
||||||
properties.hint.performance_mode(properties.hint.PerformanceMode.LATENCY),
|
properties.hint.performance_mode(properties.hint.PerformanceMode.LATENCY),
|
||||||
|
properties.hint.use_cpu_pinning(True),
|
||||||
properties.hint.scheduling_core_type(properties.hint.SchedulingCoreType.PCORE_ONLY),
|
properties.hint.scheduling_core_type(properties.hint.SchedulingCoreType.PCORE_ONLY),
|
||||||
properties.hint.use_hyper_threading(True),
|
properties.hint.use_hyper_threading(True),
|
||||||
properties.hint.num_requests(12),
|
properties.hint.num_requests(12),
|
||||||
@ -464,6 +475,7 @@ def test_single_property_setting(device):
|
|||||||
properties.affinity(): properties.Affinity.NONE,
|
properties.affinity(): properties.Affinity.NONE,
|
||||||
properties.hint.inference_precision(): Type.f32,
|
properties.hint.inference_precision(): Type.f32,
|
||||||
properties.hint.performance_mode(): properties.hint.PerformanceMode.LATENCY,
|
properties.hint.performance_mode(): properties.hint.PerformanceMode.LATENCY,
|
||||||
|
properties.hint.use_cpu_pinning(): True,
|
||||||
properties.hint.scheduling_core_type(): properties.hint.SchedulingCoreType.PCORE_ONLY,
|
properties.hint.scheduling_core_type(): properties.hint.SchedulingCoreType.PCORE_ONLY,
|
||||||
properties.hint.use_hyper_threading(): True,
|
properties.hint.use_hyper_threading(): True,
|
||||||
properties.hint.num_requests(): 12,
|
properties.hint.num_requests(): 12,
|
||||||
|
@ -409,10 +409,26 @@ inline std::istream& operator>>(std::istream& is, SchedulingCoreType& core_type)
|
|||||||
static constexpr Property<SchedulingCoreType> scheduling_core_type{"SCHEDULING_CORE_TYPE"};
|
static constexpr Property<SchedulingCoreType> scheduling_core_type{"SCHEDULING_CORE_TYPE"};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief This property allows hyper threading during inference.
|
* @brief This property allows CPU threads pinning during inference.
|
||||||
* @ingroup ov_runtime_cpp_prop_api
|
* @ingroup ov_runtime_cpp_prop_api
|
||||||
*
|
*
|
||||||
* Developer can use this property to use or not use hyper threading during inference. If user does not explicitly set
|
* Developer can use this property to use or not use CPU threads pinning during inference. If user does not explicitly
|
||||||
|
* set value for this property, OpenVINO may choose any desired value based on internal logic.
|
||||||
|
*
|
||||||
|
* The following code is example to use this property.
|
||||||
|
*
|
||||||
|
* @code
|
||||||
|
* ie.set_property(ov::hint::use_cpu_pinning(true));
|
||||||
|
* ie.set_property(ov::hint::use_cpu_pinning(false));
|
||||||
|
* @endcode
|
||||||
|
*/
|
||||||
|
static constexpr Property<bool> use_cpu_pinning{"USE_CPU_PINNING"};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief This property define if using hyper threading during inference.
|
||||||
|
* @ingroup ov_runtime_cpp_prop_api
|
||||||
|
*
|
||||||
|
* Developer can use this property to use or not use CPU pinning during inference. If user does not explicitly set
|
||||||
* value for this property, OpenVINO may choose any desired value based on internal logic.
|
* value for this property, OpenVINO may choose any desired value based on internal logic.
|
||||||
*
|
*
|
||||||
* The following code is example to use this property.
|
* The following code is example to use this property.
|
||||||
|
@ -79,6 +79,17 @@ void Config::readProperties(const std::map<std::string, std::string> &prop) {
|
|||||||
streamExecutorConfig.SetConfig(key, val);
|
streamExecutorConfig.SetConfig(key, val);
|
||||||
} else if (hintsConfigKeys.end() != std::find(hintsConfigKeys.begin(), hintsConfigKeys.end(), key)) {
|
} else if (hintsConfigKeys.end() != std::find(hintsConfigKeys.begin(), hintsConfigKeys.end(), key)) {
|
||||||
perfHintsConfig.SetConfig(key, val);
|
perfHintsConfig.SetConfig(key, val);
|
||||||
|
} else if (key == ov::hint::use_cpu_pinning.name()) {
|
||||||
|
if (val == PluginConfigParams::YES) {
|
||||||
|
useCpuPinning = true;
|
||||||
|
changedCpuPinning = true;
|
||||||
|
} else if (val == PluginConfigParams::NO) {
|
||||||
|
useCpuPinning = false;
|
||||||
|
changedCpuPinning = true;
|
||||||
|
} else {
|
||||||
|
IE_THROW() << "Wrong value " << val << "for property key " << ov::hint::use_cpu_pinning.name()
|
||||||
|
<< ". Expected only true/false." << std::endl;
|
||||||
|
}
|
||||||
} else if (key == ov::hint::scheduling_core_type.name()) {
|
} else if (key == ov::hint::scheduling_core_type.name()) {
|
||||||
const auto core_type = ov::util::from_string(val, ov::hint::scheduling_core_type);
|
const auto core_type = ov::util::from_string(val, ov::hint::scheduling_core_type);
|
||||||
if (core_type == ov::hint::SchedulingCoreType::ANY_CORE ||
|
if (core_type == ov::hint::SchedulingCoreType::ANY_CORE ||
|
||||||
|
@ -51,6 +51,8 @@ struct Config {
|
|||||||
size_t rtCacheCapacity = 5000ul;
|
size_t rtCacheCapacity = 5000ul;
|
||||||
InferenceEngine::IStreamsExecutor::Config streamExecutorConfig;
|
InferenceEngine::IStreamsExecutor::Config streamExecutorConfig;
|
||||||
InferenceEngine::PerfHintsConfig perfHintsConfig;
|
InferenceEngine::PerfHintsConfig perfHintsConfig;
|
||||||
|
bool useCpuPinning = true;
|
||||||
|
bool changedCpuPinning = false;
|
||||||
ov::hint::SchedulingCoreType schedulingCoreType = ov::hint::SchedulingCoreType::ANY_CORE;
|
ov::hint::SchedulingCoreType schedulingCoreType = ov::hint::SchedulingCoreType::ANY_CORE;
|
||||||
bool useHyperThreading = true;
|
bool useHyperThreading = true;
|
||||||
bool changedHyperThreading = false;
|
bool changedHyperThreading = false;
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
|
|
||||||
namespace ov {
|
namespace ov {
|
||||||
namespace intel_cpu {
|
namespace intel_cpu {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Generate streams information table according to processors type table.
|
* @brief Generate streams information table according to processors type table.
|
||||||
* @param[in] input_streams is the targeted number of streams set by user via ov::num_streams or hints.
|
* @param[in] input_streams is the targeted number of streams set by user via ov::num_streams or hints.
|
||||||
|
@ -313,6 +313,7 @@ InferenceEngine::Parameter ExecNetwork::GetMetric(const std::string &name) const
|
|||||||
RO_property(ov::hint::performance_mode.name()),
|
RO_property(ov::hint::performance_mode.name()),
|
||||||
RO_property(ov::hint::execution_mode.name()),
|
RO_property(ov::hint::execution_mode.name()),
|
||||||
RO_property(ov::hint::num_requests.name()),
|
RO_property(ov::hint::num_requests.name()),
|
||||||
|
RO_property(ov::hint::use_cpu_pinning.name()),
|
||||||
RO_property(ov::hint::scheduling_core_type.name()),
|
RO_property(ov::hint::scheduling_core_type.name()),
|
||||||
RO_property(ov::hint::use_hyper_threading.name()),
|
RO_property(ov::hint::use_hyper_threading.name()),
|
||||||
RO_property(ov::execution_devices.name()),
|
RO_property(ov::execution_devices.name()),
|
||||||
@ -355,6 +356,9 @@ InferenceEngine::Parameter ExecNetwork::GetMetric(const std::string &name) const
|
|||||||
} else if (name == ov::hint::performance_mode) {
|
} else if (name == ov::hint::performance_mode) {
|
||||||
const auto perfHint = ov::util::from_string(config.perfHintsConfig.ovPerfHint, ov::hint::performance_mode);
|
const auto perfHint = ov::util::from_string(config.perfHintsConfig.ovPerfHint, ov::hint::performance_mode);
|
||||||
return perfHint;
|
return perfHint;
|
||||||
|
} else if (name == ov::hint::use_cpu_pinning.name()) {
|
||||||
|
const bool use_pin = config.useCpuPinning;
|
||||||
|
return decltype(ov::hint::use_cpu_pinning)::value_type(use_pin);
|
||||||
} else if (name == ov::hint::scheduling_core_type) {
|
} else if (name == ov::hint::scheduling_core_type) {
|
||||||
const auto core_type = config.schedulingCoreType;
|
const auto core_type = config.schedulingCoreType;
|
||||||
return core_type;
|
return core_type;
|
||||||
|
@ -596,6 +596,9 @@ Parameter Engine::GetConfig(const std::string& name, const std::map<std::string,
|
|||||||
} else if (name == ov::hint::performance_mode) {
|
} else if (name == ov::hint::performance_mode) {
|
||||||
const auto perfHint = ov::util::from_string(engConfig.perfHintsConfig.ovPerfHint, ov::hint::performance_mode);
|
const auto perfHint = ov::util::from_string(engConfig.perfHintsConfig.ovPerfHint, ov::hint::performance_mode);
|
||||||
return perfHint;
|
return perfHint;
|
||||||
|
} else if (name == ov::hint::use_cpu_pinning) {
|
||||||
|
const bool pin_value = engConfig.useCpuPinning;
|
||||||
|
return decltype(ov::hint::use_cpu_pinning)::value_type(pin_value);
|
||||||
} else if (name == ov::hint::scheduling_core_type) {
|
} else if (name == ov::hint::scheduling_core_type) {
|
||||||
const auto core_type = engConfig.schedulingCoreType;
|
const auto core_type = engConfig.schedulingCoreType;
|
||||||
return core_type;
|
return core_type;
|
||||||
@ -693,6 +696,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
|
|||||||
RW_property(ov::hint::performance_mode.name()),
|
RW_property(ov::hint::performance_mode.name()),
|
||||||
RW_property(ov::hint::execution_mode.name()),
|
RW_property(ov::hint::execution_mode.name()),
|
||||||
RW_property(ov::hint::num_requests.name()),
|
RW_property(ov::hint::num_requests.name()),
|
||||||
|
RW_property(ov::hint::use_cpu_pinning.name()),
|
||||||
RW_property(ov::hint::scheduling_core_type.name()),
|
RW_property(ov::hint::scheduling_core_type.name()),
|
||||||
RW_property(ov::hint::use_hyper_threading.name()),
|
RW_property(ov::hint::use_hyper_threading.name()),
|
||||||
RW_property(ov::device::id.name()),
|
RW_property(ov::device::id.name()),
|
||||||
|
@ -80,6 +80,8 @@ INSTANTIATE_TEST_SUITE_P(
|
|||||||
smoke_OVClassGetAvailableDevices, OVClassGetAvailableDevices,
|
smoke_OVClassGetAvailableDevices, OVClassGetAvailableDevices,
|
||||||
::testing::Values("CPU"));
|
::testing::Values("CPU"));
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetConfigTest, OVClassSetUseCpuPinningHintConfigTest, ::testing::Values("CPU"));
|
||||||
|
|
||||||
INSTANTIATE_TEST_SUITE_P(
|
INSTANTIATE_TEST_SUITE_P(
|
||||||
smoke_OVClassSetModelPriorityConfigTest, OVClassSetModelPriorityConfigTest,
|
smoke_OVClassSetModelPriorityConfigTest, OVClassSetModelPriorityConfigTest,
|
||||||
::testing::Values("MULTI", "AUTO"));
|
::testing::Values("MULTI", "AUTO"));
|
||||||
|
@ -121,6 +121,7 @@ using OVClassLoadNetworkTest = OVClassQueryNetworkTest;
|
|||||||
using OVClassSetGlobalConfigTest = OVClassBaseTestP;
|
using OVClassSetGlobalConfigTest = OVClassBaseTestP;
|
||||||
using OVClassSetModelPriorityConfigTest = OVClassBaseTestP;
|
using OVClassSetModelPriorityConfigTest = OVClassBaseTestP;
|
||||||
using OVClassSetExecutionModeHintConfigTest = OVClassBaseTestP;
|
using OVClassSetExecutionModeHintConfigTest = OVClassBaseTestP;
|
||||||
|
using OVClassSetUseCpuPinningHintConfigTest = OVClassBaseTestP;
|
||||||
using OVClassSetSchedulingCoreTypeHintConfigTest = OVClassBaseTestP;
|
using OVClassSetSchedulingCoreTypeHintConfigTest = OVClassBaseTestP;
|
||||||
using OVClassSetUseHyperThreadingHintConfigTest = OVClassBaseTestP;
|
using OVClassSetUseHyperThreadingHintConfigTest = OVClassBaseTestP;
|
||||||
using OVClassSetTBBForceTerminatePropertyTest = OVClassBaseTestP;
|
using OVClassSetTBBForceTerminatePropertyTest = OVClassBaseTestP;
|
||||||
@ -611,6 +612,23 @@ TEST_P(OVClassSetExecutionModeHintConfigTest, SetConfigNoThrow) {
|
|||||||
ASSERT_EQ(ov::hint::ExecutionMode::PERFORMANCE, ie.get_property(target_device, ov::hint::execution_mode));
|
ASSERT_EQ(ov::hint::ExecutionMode::PERFORMANCE, ie.get_property(target_device, ov::hint::execution_mode));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_P(OVClassSetUseCpuPinningHintConfigTest, SetConfigNoThrow) {
|
||||||
|
ov::Core ie = createCoreWithTemplate();
|
||||||
|
|
||||||
|
OV_ASSERT_PROPERTY_SUPPORTED(ov::hint::use_cpu_pinning);
|
||||||
|
|
||||||
|
bool defaultMode{};
|
||||||
|
ASSERT_NO_THROW(defaultMode = ie.get_property(target_device, ov::hint::use_cpu_pinning));
|
||||||
|
(void)defaultMode;
|
||||||
|
|
||||||
|
ASSERT_EQ(true, ie.get_property(target_device, ov::hint::use_cpu_pinning));
|
||||||
|
|
||||||
|
ie.set_property(target_device, ov::hint::use_cpu_pinning(false));
|
||||||
|
ASSERT_EQ(false, ie.get_property(target_device, ov::hint::use_cpu_pinning));
|
||||||
|
ie.set_property(target_device, ov::hint::use_cpu_pinning(true));
|
||||||
|
ASSERT_EQ(true, ie.get_property(target_device, ov::hint::use_cpu_pinning));
|
||||||
|
}
|
||||||
|
|
||||||
TEST_P(OVClassSetSchedulingCoreTypeHintConfigTest, SetConfigNoThrow) {
|
TEST_P(OVClassSetSchedulingCoreTypeHintConfigTest, SetConfigNoThrow) {
|
||||||
ov::Core ie = createCoreWithTemplate();
|
ov::Core ie = createCoreWithTemplate();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user