Enable new property ov::hint::scheduling_core_type (#16106)

* enable apply_processor_type()

* declare PROCESSOR_TYPE

* enable readProperties

* test case for get_property()

* enable set_property() and test cases

* reduce changes

* fix code style issue

* fix python test case issue

* remove python interface

* move processor type definition out of dev_api

* refine coding

* add dependency

* update header file

* update description

* merge intel_cpu header file

* add inline in-code documentation

* change 'UNDEFINED' to 'DEFAULT'

* remove ProcTypeConfig

* refine change

* refine change

* refine process_type to scheduling_core_type

* refine description

* fix code style issue

* change to ov::hint::scheduling_core_type

* fix code style issue

* fix code style issue

* fix python issue

* fix python issue

* fix python issue

* fix python issue

* change core_type_cfg to ov::hint::SchedulingCoreType

* update test case for comments

* update test case for comments

* add default for comments

* update code style

* update for comments

* update for comments

* fix typo

* move cpu_map_scheduling into threading folder

* update for merge conflict

* update for code style
This commit is contained in:
Shen, Wanglei 2023-03-28 14:04:30 +08:00 committed by GitHub
parent 906939a1f1
commit a726f0ae38
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 270 additions and 7 deletions

View File

@ -133,6 +133,13 @@ ov_property_key_hint_use_hyper_threading;
OPENVINO_C_VAR(const char*)
ov_property_key_hint_performance_mode;
/**
* @brief Read-write property, it is high-level OpenVINO Hints for the type of CPU core used during inference
* @ingroup ov_property_c_api
*/
OPENVINO_C_VAR(const char*)
ov_property_key_hint_scheduling_core_type;
/**
* @brief Read-write property<ov_element_type_e> to set the hint for device to use specified precision for inference.
* @ingroup ov_property_c_api

View File

@ -23,6 +23,7 @@ const char* ov_property_key_num_streams = "NUM_STREAMS";
const char* ov_property_key_affinity = "AFFINITY";
const char* ov_property_key_inference_num_threads = "INFERENCE_NUM_THREADS";
const char* ov_property_key_hint_performance_mode = "PERFORMANCE_HINT";
const char* ov_property_key_hint_scheduling_core_type = "SCHEDULING_CORE_TYPE";
const char* ov_property_key_hint_use_hyper_threading = "USE_HYPER_THREADING";
const char* ov_property_key_hint_inference_precision = "INFERENCE_PRECISION_HINT";
const char* ov_property_key_hint_num_requests = "PERFORMANCE_HINT_NUM_REQUESTS";

View File

@ -260,6 +260,19 @@ TEST_P(ov_core_test, ov_core_set_property_enum_invalid) {
EXPECT_STRNE(invalid_mode, ret);
ov_free(ret);
const char* key_type = ov_property_key_hint_scheduling_core_type;
const char* val_type = "PCORE_ONLY";
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_type, val_type));
ret = nullptr;
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_type, &ret));
EXPECT_STREQ(val_type, ret);
ov_free(ret);
const char* invalid_val = "INVALID_VAL";
OV_EXPECT_NOT_OK(ov_core_set_property(core, device_name.c_str(), key_type, invalid_val));
ret = nullptr;
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_type, &ret));
const char* key_ht = ov_property_key_hint_use_hyper_threading;
const char* val_ht = "YES";
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_ht, val_ht));
@ -268,10 +281,10 @@ TEST_P(ov_core_test, ov_core_set_property_enum_invalid) {
EXPECT_STREQ(val_ht, ret);
ov_free(ret);
const char* invalid_val = "INVALID_VAL";
OV_EXPECT_NOT_OK(ov_core_set_property(core, device_name.c_str(), key_ht, invalid_val));
ret = nullptr;
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_ht, &ret));
EXPECT_STRNE(invalid_val, ret);
ov_free(ret);
@ -292,12 +305,20 @@ TEST_P(ov_core_test, ov_core_set_and_get_property_enum) {
EXPECT_STREQ(affinity, ret);
ov_free(ret);
const char* key_type = ov_property_key_hint_scheduling_core_type;
const char* val_type = "PCORE_ONLY";
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_type, val_type));
ret = nullptr;
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_type, &ret));
EXPECT_STREQ(val_type, ret);
const char* key_ht = ov_property_key_hint_use_hyper_threading;
const char* val_ht = "YES";
OV_EXPECT_OK(ov_core_set_property(core, device_name.c_str(), key_ht, val_ht));
ret = nullptr;
OV_EXPECT_OK(ov_core_get_property(core, device_name.c_str(), key_ht, &ret));
EXPECT_STREQ(val_ht, ret);
ov_free(ret);
ov_core_free(core);

View File

@ -58,6 +58,11 @@ void regmodule_properties(py::module m) {
.value("THROUGHPUT", ov::hint::PerformanceMode::THROUGHPUT)
.value("CUMULATIVE_THROUGHPUT", ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT);
py::enum_<ov::hint::SchedulingCoreType>(m_hint, "SchedulingCoreType", py::arithmetic())
.value("ANY_CORE", ov::hint::SchedulingCoreType::ANY_CORE)
.value("PCORE_ONLY", ov::hint::SchedulingCoreType::PCORE_ONLY)
.value("ECORE_ONLY", ov::hint::SchedulingCoreType::ECORE_ONLY);
py::enum_<ov::hint::ExecutionMode>(m_hint, "ExecutionMode", py::arithmetic())
.value("UNDEFINED", ov::hint::ExecutionMode::UNDEFINED)
.value("PERFORMANCE", ov::hint::ExecutionMode::PERFORMANCE)
@ -67,6 +72,7 @@ void regmodule_properties(py::module m) {
wrap_property_RW(m_hint, ov::hint::inference_precision, "inference_precision");
wrap_property_RW(m_hint, ov::hint::model_priority, "model_priority");
wrap_property_RW(m_hint, ov::hint::performance_mode, "performance_mode");
wrap_property_RW(m_hint, ov::hint::scheduling_core_type, "scheduling_core_type");
wrap_property_RW(m_hint, ov::hint::use_hyper_threading, "use_hyper_threading");
wrap_property_RW(m_hint, ov::hint::execution_mode, "execution_mode");
wrap_property_RW(m_hint, ov::hint::num_requests, "num_requests");

View File

@ -171,6 +171,8 @@ py::object from_ov_any(const ov::Any& any) {
return py::cast(any.as<ov::hint::Priority>());
} else if (any.is<ov::hint::PerformanceMode>()) {
return py::cast(any.as<ov::hint::PerformanceMode>());
} else if (any.is<ov::hint::SchedulingCoreType>()) {
return py::cast(any.as<ov::hint::SchedulingCoreType>());
} else if (any.is<ov::hint::ExecutionMode>()) {
return py::cast(any.as<ov::hint::ExecutionMode>());
} else if (any.is<ov::log::Level>()) {
@ -336,6 +338,8 @@ ov::Any py_object_to_any(const py::object& py_obj) {
return py::cast<ov::hint::Priority>(py_obj);
} else if (py::isinstance<ov::hint::PerformanceMode>(py_obj)) {
return py::cast<ov::hint::PerformanceMode>(py_obj);
} else if (py::isinstance<ov::hint::SchedulingCoreType>(py_obj)) {
return py::cast<ov::hint::SchedulingCoreType>(py_obj);
} else if (py::isinstance<ov::log::Level>(py_obj)) {
return py::cast<ov::log::Level>(py_obj);
} else if (py::isinstance<ov::device::Type>(py_obj)) {

View File

@ -60,6 +60,14 @@ def test_properties_rw_base():
(properties.hint.PerformanceMode.CUMULATIVE_THROUGHPUT, "PerformanceMode.CUMULATIVE_THROUGHPUT", 3),
),
),
(
properties.hint.SchedulingCoreType,
(
(properties.hint.SchedulingCoreType.ANY_CORE, "SchedulingCoreType.ANY_CORE", 0),
(properties.hint.SchedulingCoreType.PCORE_ONLY, "SchedulingCoreType.PCORE_ONLY", 1),
(properties.hint.SchedulingCoreType.ECORE_ONLY, "SchedulingCoreType.ECORE_ONLY", 2),
),
),
(
properties.hint.ExecutionMode,
(
@ -219,6 +227,11 @@ def test_properties_ro(ov_property_ro, expected_value):
"PERFORMANCE_HINT",
((properties.hint.PerformanceMode.UNDEFINED, properties.hint.PerformanceMode.UNDEFINED),),
),
(
properties.hint.scheduling_core_type,
"SCHEDULING_CORE_TYPE",
((properties.hint.SchedulingCoreType.PCORE_ONLY, properties.hint.SchedulingCoreType.PCORE_ONLY),),
),
(
properties.hint.use_hyper_threading,
"USE_HYPER_THREADING",
@ -409,6 +422,7 @@ def test_single_property_setting(device):
properties.affinity(properties.Affinity.NONE),
properties.inference_precision(Type.f32),
properties.hint.performance_mode(properties.hint.PerformanceMode.LATENCY),
properties.hint.scheduling_core_type(properties.hint.SchedulingCoreType.PCORE_ONLY),
properties.hint.use_hyper_threading(True),
properties.hint.num_requests(12),
properties.streams.num(5),
@ -422,6 +436,7 @@ def test_single_property_setting(device):
properties.affinity(): properties.Affinity.NONE,
properties.inference_precision(): Type.f32,
properties.hint.performance_mode(): properties.hint.PerformanceMode.LATENCY,
properties.hint.scheduling_core_type(): properties.hint.SchedulingCoreType.PCORE_ONLY,
properties.hint.use_hyper_threading(): True,
properties.hint.num_requests(): 12,
properties.streams.num(): 5,
@ -434,6 +449,7 @@ def test_single_property_setting(device):
properties.affinity(): "NONE",
"INFERENCE_PRECISION_HINT": Type.f32,
properties.hint.performance_mode(): properties.hint.PerformanceMode.LATENCY,
properties.hint.scheduling_core_type(): properties.hint.SchedulingCoreType.PCORE_ONLY,
properties.hint.num_requests(): 12,
"NUM_STREAMS": properties.streams.Num(5),
},

View File

@ -15,6 +15,15 @@
namespace ov {
/**
* @brief Limit available CPU resource in processors type table according to scheduling core type property
* @param[in] input_type input value of core type property.
* @param[in] proc_type_table candidate processors available at this time
* @return updated proc_type_table which removed unmatched processors
*/
std::vector<std::vector<int>> apply_scheduling_core_type(const ov::hint::SchedulingCoreType input_type,
const std::vector<std::vector<int>>& proc_type_table);
/**
* @brief Limit available CPU resource in processors type table according to hyper threading property
* @param[in] input_type indicate value of property use_hyper_threading.
@ -24,6 +33,6 @@ namespace ov {
*/
std::vector<std::vector<int>> apply_hyper_threading(bool input_type,
const bool input_changed,
const std::vector<std::vector<int>> proc_type_table);
const std::vector<std::vector<int>>& proc_type_table);
} // namespace ov

View File

@ -351,6 +351,62 @@ inline std::istream& operator>>(std::istream& is, PerformanceMode& performance_m
*/
static constexpr Property<PerformanceMode> performance_mode{"PERFORMANCE_HINT"};
/**
* @enum SchedulingCoreType
* @brief This enum contains definition of core type can be used for CPU tasks on different devices.
*/
enum class SchedulingCoreType {
ANY_CORE = 0, //!< Any processors can be used.
PCORE_ONLY = 1, //!< Only processors of performance-cores can be used.
ECORE_ONLY = 2, //!< Only processors of efficient-cores can be used.
};
/** @cond INTERNAL */
inline std::ostream& operator<<(std::ostream& os, const SchedulingCoreType& core_type) {
switch (core_type) {
case SchedulingCoreType::ANY_CORE:
return os << "ANY_CORE";
case SchedulingCoreType::PCORE_ONLY:
return os << "PCORE_ONLY";
case SchedulingCoreType::ECORE_ONLY:
return os << "ECORE_ONLY";
default:
throw ov::Exception{"Unsupported core type!"};
}
}
inline std::istream& operator>>(std::istream& is, SchedulingCoreType& core_type) {
std::string str;
is >> str;
if (str == "ANY_CORE") {
core_type = SchedulingCoreType::ANY_CORE;
} else if (str == "PCORE_ONLY") {
core_type = SchedulingCoreType::PCORE_ONLY;
} else if (str == "ECORE_ONLY") {
core_type = SchedulingCoreType::ECORE_ONLY;
} else {
throw ov::Exception{"Unsupported core type: " + str};
}
return is;
}
/** @endcond */
/**
* @brief This property defines CPU core type which can be used during inference.
* @ingroup ov_runtime_cpp_prop_api
*
* Developer can use this property to select specific CPU cores for inference. Please refer SchedulingCoreType for
* all definition of core type.
*
* The following code is an example to only use efficient-cores for inference on hybrid CPU. If user sets this
* configuration on a platform with only performance-cores, CPU inference will still run on the performance-cores.
*
* @code
* ie.set_property(ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::ECORE_ONLY));
* @endcode
*/
static constexpr Property<SchedulingCoreType> scheduling_core_type{"SCHEDULING_CORE_TYPE"};
/**
* @brief This property allows hyper threading during inference.
* @ingroup ov_runtime_cpp_prop_api

View File

@ -8,9 +8,41 @@
namespace ov {
std::vector<std::vector<int>> apply_scheduling_core_type(const ov::hint::SchedulingCoreType input_type,
const std::vector<std::vector<int>>& proc_type_table) {
std::vector<std::vector<int>> result_table = proc_type_table;
switch (input_type) {
case ov::hint::SchedulingCoreType::ANY_CORE:
break;
case ov::hint::SchedulingCoreType::PCORE_ONLY:
if (proc_type_table[0][EFFICIENT_CORE_PROC] > 0) {
for (auto& i : result_table) {
i[ALL_PROC] -= i[EFFICIENT_CORE_PROC];
i[EFFICIENT_CORE_PROC] = 0;
}
}
break;
case ov::hint::SchedulingCoreType::ECORE_ONLY:
if ((proc_type_table[0][EFFICIENT_CORE_PROC] > 0) &&
(proc_type_table[0][EFFICIENT_CORE_PROC] != proc_type_table[0][ALL_PROC])) {
for (auto& i : result_table) {
i[ALL_PROC] -= i[MAIN_CORE_PROC] + i[HYPER_THREADING_PROC];
i[MAIN_CORE_PROC] = 0;
i[HYPER_THREADING_PROC] = 0;
}
}
break;
default:
throw ov::Exception{"Unsupported core type!"};
}
return result_table;
}
std::vector<std::vector<int>> apply_hyper_threading(bool input_value,
const bool input_changed,
const std::vector<std::vector<int>> proc_type_table) {
const std::vector<std::vector<int>>& proc_type_table) {
std::vector<std::vector<int>> result_table = proc_type_table;
if ((proc_type_table[0][HYPER_THREADING_PROC] > 0) &&

View File

@ -78,6 +78,18 @@ void Config::readProperties(const std::map<std::string, std::string> &prop) {
streamExecutorConfig.SetConfig(key, val);
} else if (hintsConfigKeys.end() != std::find(hintsConfigKeys.begin(), hintsConfigKeys.end(), key)) {
perfHintsConfig.SetConfig(key, val);
} else if (key == ov::hint::scheduling_core_type.name()) {
const auto core_type = ov::util::from_string(val, ov::hint::scheduling_core_type);
if (core_type == ov::hint::SchedulingCoreType::ANY_CORE ||
core_type == ov::hint::SchedulingCoreType::PCORE_ONLY ||
core_type == ov::hint::SchedulingCoreType::ECORE_ONLY) {
schedulingCoreType = core_type;
} else {
IE_THROW() << "Wrong value " << val << "for property key " << ov::hint::scheduling_core_type.name()
<< ". Expected only " << ov::hint::SchedulingCoreType::ANY_CORE << "/"
<< ov::hint::SchedulingCoreType::PCORE_ONLY << "/"
<< ov::hint::SchedulingCoreType::ECORE_ONLY << std::endl;
}
} else if (key == ov::hint::use_hyper_threading.name()) {
if (val == PluginConfigParams::YES) {
useHyperThreading = true;

View File

@ -7,6 +7,7 @@
#include <threading/ie_istreams_executor.hpp>
#include <ie_performance_hints.hpp>
#include <ie/ie_common.h>
#include <openvino/runtime/properties.hpp>
#include <openvino/util/common_util.hpp>
#include "utils/debug_caps_config.h"
@ -49,6 +50,7 @@ struct Config {
size_t rtCacheCapacity = 5000ul;
InferenceEngine::IStreamsExecutor::Config streamExecutorConfig;
InferenceEngine::PerfHintsConfig perfHintsConfig;
ov::hint::SchedulingCoreType schedulingCoreType = ov::hint::SchedulingCoreType::ANY_CORE;
bool useHyperThreading = true;
bool changedHyperThreading = false;
#if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64)

View File

@ -86,7 +86,7 @@ std::vector<std::vector<int>> get_streams_info_table(const int input_streams,
(n_threads_per_stream < proc_type_table[0][MAIN_CORE_PROC] * 2)) {
n_threads_per_stream = proc_type_table[0][MAIN_CORE_PROC];
} else if (n_threads_per_stream < proc_type_table[0][MAIN_CORE_PROC]) {
n_threads_per_stream = int(
n_threads_per_stream = static_cast<int>(
proc_type_table[0][MAIN_CORE_PROC] /
((proc_type_table[0][MAIN_CORE_PROC] + n_threads_per_stream - 1) / n_threads_per_stream));
}

View File

@ -11,6 +11,7 @@
#include <vector>
#include "openvino/runtime/properties.hpp"
namespace ov {
namespace intel_cpu {

View File

@ -308,6 +308,7 @@ InferenceEngine::Parameter ExecNetwork::GetMetric(const std::string &name) const
RO_property(ov::inference_precision.name()),
RO_property(ov::hint::performance_mode.name()),
RO_property(ov::hint::num_requests.name()),
RO_property(ov::hint::scheduling_core_type.name()),
RO_property(ov::hint::use_hyper_threading.name()),
RO_property(ov::execution_devices.name()),
};
@ -349,6 +350,9 @@ InferenceEngine::Parameter ExecNetwork::GetMetric(const std::string &name) const
} else if (name == ov::hint::performance_mode) {
const auto perfHint = ov::util::from_string(config.perfHintsConfig.ovPerfHint, ov::hint::performance_mode);
return perfHint;
} else if (name == ov::hint::scheduling_core_type) {
const auto core_type = config.schedulingCoreType;
return core_type;
} else if (name == ov::hint::use_hyper_threading.name()) {
const bool use_ht = config.useHyperThreading;
return decltype(ov::hint::use_hyper_threading)::value_type(use_ht);

View File

@ -514,6 +514,9 @@ Parameter Engine::GetConfig(const std::string& name, const std::map<std::string,
} else if (name == ov::hint::performance_mode) {
const auto perfHint = ov::util::from_string(engConfig.perfHintsConfig.ovPerfHint, ov::hint::performance_mode);
return perfHint;
} else if (name == ov::hint::scheduling_core_type) {
const auto core_type = engConfig.schedulingCoreType;
return core_type;
} else if (name == ov::hint::use_hyper_threading) {
const bool ht_value = engConfig.useHyperThreading;
return decltype(ov::hint::use_hyper_threading)::value_type(ht_value);
@ -605,6 +608,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
RW_property(ov::inference_precision.name()),
RW_property(ov::hint::performance_mode.name()),
RW_property(ov::hint::num_requests.name()),
RW_property(ov::hint::scheduling_core_type.name()),
RW_property(ov::hint::use_hyper_threading.name()),
RW_property(ov::device::id.name()),
};

View File

@ -52,6 +52,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetConfigTest,
OVClassSetUseHyperThreadingHintConfigTest,
::testing::Values("CPU"));
INSTANTIATE_TEST_SUITE_P(smoke_OVClassSetConfigTest,
OVClassSetSchedulingCoreTypeHintConfigTest,
::testing::Values("CPU"));
INSTANTIATE_TEST_SUITE_P(
smoke_OVClassGetMetricTest, OVClassGetMetricTest_OPTIMIZATION_CAPABILITIES,
::testing::Values("CPU"));

View File

@ -16,6 +16,72 @@ using namespace ov;
namespace {
struct SchedulingCoreTypeTestCase {
ov::hint::SchedulingCoreType input_type;
std::vector<std::vector<int>> proc_type_table;
std::vector<std::vector<int>> result_table;
};
class SchedulingCoreTypeTests : public CommonTestUtils::TestsCommon,
public testing::WithParamInterface<std::tuple<SchedulingCoreTypeTestCase>> {
public:
void SetUp() override {
const auto& test_data = std::get<0>(GetParam());
std::vector<std::vector<int>> test_result_table =
ov::apply_scheduling_core_type(test_data.input_type, test_data.proc_type_table);
ASSERT_EQ(test_data.result_table, test_result_table);
}
};
SchedulingCoreTypeTestCase _2sockets_ALL = {
ov::hint::SchedulingCoreType::ANY_CORE,
{{208, 104, 0, 104}, {104, 52, 0, 52}, {104, 52, 0, 52}},
{{208, 104, 0, 104}, {104, 52, 0, 52}, {104, 52, 0, 52}},
};
SchedulingCoreTypeTestCase _2sockets_P_CORE_ONLY = {
ov::hint::SchedulingCoreType::PCORE_ONLY,
{{208, 104, 0, 104}, {104, 52, 0, 52}, {104, 52, 0, 52}},
{{208, 104, 0, 104}, {104, 52, 0, 52}, {104, 52, 0, 52}},
};
SchedulingCoreTypeTestCase _2sockets_E_CORE_ONLY = {
ov::hint::SchedulingCoreType::ECORE_ONLY,
{{208, 104, 0, 104}, {104, 52, 0, 52}, {104, 52, 0, 52}},
{{208, 104, 0, 104}, {104, 52, 0, 52}, {104, 52, 0, 52}},
};
SchedulingCoreTypeTestCase _1sockets_ALL = {
ov::hint::SchedulingCoreType::ANY_CORE,
{{20, 6, 8, 6}},
{{20, 6, 8, 6}},
};
SchedulingCoreTypeTestCase _1sockets_P_CORE_ONLY = {
ov::hint::SchedulingCoreType::PCORE_ONLY,
{{20, 6, 8, 6}},
{{12, 6, 0, 6}},
};
SchedulingCoreTypeTestCase _1sockets_E_CORE_ONLY = {
ov::hint::SchedulingCoreType::ECORE_ONLY,
{{20, 6, 8, 6}},
{{8, 0, 8, 0}},
};
TEST_P(SchedulingCoreTypeTests, SchedulingCoreType) {}
INSTANTIATE_TEST_SUITE_P(SchedulingCoreTypeTable,
SchedulingCoreTypeTests,
testing::Values(_2sockets_ALL,
_2sockets_P_CORE_ONLY,
_2sockets_E_CORE_ONLY,
_1sockets_ALL,
_1sockets_P_CORE_ONLY,
_1sockets_E_CORE_ONLY));
struct UseHTTestCase {
bool use_ht_value;
bool use_ht_changed;
@ -29,9 +95,7 @@ public:
const auto& test_data = std::get<0>(GetParam());
std::vector<std::vector<int>> test_result_table =
ov::apply_hyper_threading(test_data.use_ht_value,
test_data.use_ht_changed,
test_data.proc_type_table);
ov::apply_hyper_threading(test_data.use_ht_value, test_data.use_ht_changed, test_data.proc_type_table);
ASSERT_EQ(test_data.result_table, test_result_table);
}

View File

@ -121,6 +121,7 @@ using OVClassLoadNetworkTest = OVClassQueryNetworkTest;
using OVClassSetGlobalConfigTest = OVClassBaseTestP;
using OVClassSetModelPriorityConfigTest = OVClassBaseTestP;
using OVClassSetExecutionModeHintConfigTest = OVClassBaseTestP;
using OVClassSetSchedulingCoreTypeHintConfigTest = OVClassBaseTestP;
using OVClassSetUseHyperThreadingHintConfigTest = OVClassBaseTestP;
using OVClassSetTBBForceTerminatePropertyTest = OVClassBaseTestP;
using OVClassSetLogLevelConfigTest = OVClassBaseTestP;
@ -612,6 +613,25 @@ TEST_P(OVClassSetExecutionModeHintConfigTest, SetConfigNoThrow) {
ASSERT_EQ(ov::hint::ExecutionMode::PERFORMANCE, ie.get_property(target_device, ov::hint::execution_mode));
}
TEST_P(OVClassSetSchedulingCoreTypeHintConfigTest, SetConfigNoThrow) {
ov::Core ie = createCoreWithTemplate();
OV_ASSERT_PROPERTY_SUPPORTED(ov::hint::scheduling_core_type);
ov::hint::SchedulingCoreType defaultMode{};
ASSERT_NO_THROW(defaultMode = ie.get_property(target_device, ov::hint::scheduling_core_type));
(void)defaultMode;
ASSERT_EQ(ov::hint::SchedulingCoreType::ANY_CORE, ie.get_property(target_device, ov::hint::scheduling_core_type));
ie.set_property(target_device, ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::PCORE_ONLY));
ASSERT_EQ(ov::hint::SchedulingCoreType::PCORE_ONLY, ie.get_property(target_device, ov::hint::scheduling_core_type));
ie.set_property(target_device, ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::ECORE_ONLY));
ASSERT_EQ(ov::hint::SchedulingCoreType::ECORE_ONLY, ie.get_property(target_device, ov::hint::scheduling_core_type));
ie.set_property(target_device, ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::ANY_CORE));
ASSERT_EQ(ov::hint::SchedulingCoreType::ANY_CORE, ie.get_property(target_device, ov::hint::scheduling_core_type));
}
TEST_P(OVClassSetUseHyperThreadingHintConfigTest, SetConfigNoThrow) {
ov::Core ie = createCoreWithTemplate();