Used new config for streams and threads (#10150)

* Used new config for streams and threads

* Fixed review coments in ba

* format fix

* fixed hello_query_device

* Added STL string io

* fixed tests

* Fixed test

* Fixed build

* fixed format

* Fixed build

* try fix win

* other any io specialization

* Fixed after merge

* renamed streams

* build fixed

* fixed build

* fixed format

* fix for old mac build

* Fixed type of exception

* test fix
This commit is contained in:
Anton Pankratov 2022-02-11 09:22:45 +03:00 committed by GitHub
parent 437bc3280d
commit 1621a5a0b5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 908 additions and 690 deletions

View File

@ -9,7 +9,7 @@ uint32_t n_streams = 2;
int64_t available_device_mem_size = 3221225472; int64_t available_device_mem_size = 3221225472;
ov::AnyMap options = { ov::AnyMap options = {
ov::hint::model(model), // Required. Set the address of the target network. If this is not set, the MAX_BATCH_SIZE returns 1. ov::hint::model(model), // Required. Set the address of the target network. If this is not set, the MAX_BATCH_SIZE returns 1.
ov::streams::num(n_streams), // Optional. Set only when you want to estimate max batch size for a specific throughtput streams. Default is 1 or throughtput streams set by SetConfig. ov::num_streams(n_streams), // Optional. Set only when you want to estimate max batch size for a specific throughtput streams. Default is 1 or throughtput streams set by SetConfig.
ov::intel_gpu::hint::available_device_mem(available_device_mem_size) // Optional. Set only when you want to limit the available device mem size. ov::intel_gpu::hint::available_device_mem(available_device_mem_size) // Optional. Set only when you want to limit the available device mem size.
}; };

View File

@ -124,12 +124,12 @@ Options:
Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency
estimations the number of streams should be set to 1. estimations the number of streams should be set to 1.
-nthreads "<integer>" Optional. Number of threads to use for inference on the CPU (including HETERO and MULTI cases). -nthreads "<integer>" Optional. Number of threads to use for inference on the CPU (including HETERO and MULTI cases).
-enforcebf16="<true/false>" Optional. By default floating point operations execution in bfloat16 precision are enforced if supported by platform. -pin ("YES"|"CORE")/"HYBRID_AWARE"/"NUMA"/("NO"|"NONE")
-pin "YES"/"HYBRID_AWARE"/"NUMA"/"NO"
Optional. Explicit inference threads binding options (leave empty to let the OpenVINO to make a choice): Optional. Explicit inference threads binding options (leave empty to let the OpenVINO to make a choice):
enabling threads->cores pinning ("YES", which is already default for a conventional CPU), enabling threads->cores pinning ("YES", which is already default for a conventional CPU),
letting the runtime to decide on the threads->different core types ("HYBRID_AWARE", which is default on the hybrid CPUs) letting the runtime to decide on the threads->different core types ("HYBRID_AWARE", which is default on the hybrid CPUs)
threads->(NUMA)nodes ("NUMA") or completely disable ("NO") CPU inference threads pinning. threads->(NUMA)nodes ("NUMA") or completely disable ("NO") CPU inference threads pinning.
-infer_precision device_name:infer_precision1,device_name:infer_precision2 Optional. Hint to specifies inference precision
-ip "U8"/"FP16"/"FP32" Optional. Specifies precision for all input layers of the network. -ip "U8"/"FP16"/"FP32" Optional. Specifies precision for all input layers of the network.
-op "U8"/"FP16"/"FP32" Optional. Specifies precision for all output layers of the network. -op "U8"/"FP16"/"FP32" Optional. Specifies precision for all output layers of the network.
-iop Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. -iop Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required.

View File

@ -191,8 +191,8 @@ static const char cache_dir_message[] = "Optional. Enables caching of loaded mod
static const char load_from_file_message[] = "Optional. Loads model from file directly without ReadNetwork." static const char load_from_file_message[] = "Optional. Loads model from file directly without ReadNetwork."
" All CNNNetwork options (like re-shape) will be ignored"; " All CNNNetwork options (like re-shape) will be ignored";
// @brief message for quantization bits // @brief message for inference_precision
static const char gna_qb_message[] = "Optional. Weight bits for quantization: 8 or 16 (default)"; static const char inference_precision_message[] = "Optional. Inference precission";
static constexpr char inputs_precision_message[] = "Optional. Specifies precision for all input layers of the network."; static constexpr char inputs_precision_message[] = "Optional. Specifies precision for all input layers of the network.";
@ -275,9 +275,6 @@ DEFINE_string(nstreams, "", infer_num_streams_message);
/// @brief The percentile which will be reported in latency metric /// @brief The percentile which will be reported in latency metric
DEFINE_uint32(latency_percentile, 50, infer_latency_percentile_message); DEFINE_uint32(latency_percentile, 50, infer_latency_percentile_message);
/// @brief Enforces bf16 execution with bfloat16 precision on systems having this capability
DEFINE_bool(enforcebf16, false, enforce_bf16_message);
/// @brief Define parameter for batch size <br> /// @brief Define parameter for batch size <br>
/// Default is 0 (that means don't specify) /// Default is 0 (that means don't specify)
DEFINE_uint32(b, 0, batch_size_message); DEFINE_uint32(b, 0, batch_size_message);
@ -329,8 +326,8 @@ DEFINE_string(data_shape, "", data_shape_message);
/// @brief Define flag for layout shape <br> /// @brief Define flag for layout shape <br>
DEFINE_string(layout, "", layout_message); DEFINE_string(layout, "", layout_message);
/// @brief Define flag for quantization bits (default 16) /// @brief Define flag for inference precision
DEFINE_int32(qb, 16, gna_qb_message); DEFINE_string(infer_precision, "f32", inference_precision_message);
/// @brief Specify precision for all input layers of the network /// @brief Specify precision for all input layers of the network
DEFINE_string(ip, "", inputs_precision_message); DEFINE_string(ip, "", inputs_precision_message);
@ -391,7 +388,6 @@ static void show_usage() {
std::cout << std::endl << " device-specific performance options:" << std::endl; std::cout << std::endl << " device-specific performance options:" << std::endl;
std::cout << " -nstreams \"<integer>\" " << infer_num_streams_message << std::endl; std::cout << " -nstreams \"<integer>\" " << infer_num_streams_message << std::endl;
std::cout << " -nthreads \"<integer>\" " << infer_num_threads_message << std::endl; std::cout << " -nthreads \"<integer>\" " << infer_num_threads_message << std::endl;
std::cout << " -enforcebf16=<true/false> " << enforce_bf16_message << std::endl;
std::cout << " -pin \"YES\"/\"HYBRID_AWARE\"/\"NO\"/\"NUMA\" " << infer_threads_pinning_message << std::endl; std::cout << " -pin \"YES\"/\"HYBRID_AWARE\"/\"NO\"/\"NUMA\" " << infer_threads_pinning_message << std::endl;
#ifdef HAVE_DEVICE_MEM_SUPPORT #ifdef HAVE_DEVICE_MEM_SUPPORT
std::cout << " -use_device_mem " << use_device_mem_message << std::endl; std::cout << " -use_device_mem " << use_device_mem_message << std::endl;
@ -405,7 +401,7 @@ static void show_usage() {
std::cout << " -pcseq " << pcseq_message << std::endl; std::cout << " -pcseq " << pcseq_message << std::endl;
std::cout << " -dump_config " << dump_config_message << std::endl; std::cout << " -dump_config " << dump_config_message << std::endl;
std::cout << " -load_config " << load_config_message << std::endl; std::cout << " -load_config " << load_config_message << std::endl;
std::cout << " -qb " << gna_qb_message << std::endl; std::cout << " -infer_precision \"<element type>\"" << inference_precision_message << std::endl;
std::cout << " -ip <value> " << inputs_precision_message << std::endl; std::cout << " -ip <value> " << inputs_precision_message << std::endl;
std::cout << " -op <value> " << outputs_precision_message << std::endl; std::cout << " -op <value> " << outputs_precision_message << std::endl;
std::cout << " -iop \"<value>\" " << iop_message << std::endl; std::cout << " -iop \"<value>\" " << iop_message << std::endl;

View File

@ -157,7 +157,9 @@ int main(int argc, char* argv[]) {
auto devices = parse_devices(device_name); auto devices = parse_devices(device_name);
// Parse nstreams per device // Parse nstreams per device
std::map<std::string, std::string> device_nstreams = parse_nstreams_value_per_device(devices, FLAGS_nstreams); std::map<std::string, std::string> device_nstreams = parse_value_per_device(devices, FLAGS_nstreams);
std::map<std::string, std::string> device_infer_precision =
parse_value_per_device(devices, FLAGS_infer_precision);
// Load device config file if specified // Load device config file if specified
std::map<std::string, ov::AnyMap> config; std::map<std::string, ov::AnyMap> config;
@ -243,9 +245,7 @@ int main(int argc, char* argv[]) {
bool perf_counts = false; bool perf_counts = false;
// Update config per device according to command line parameters // Update config per device according to command line parameters
for (auto& device : devices) { for (auto& device : devices) {
if (!config.count(device)) auto& device_config = config[device];
config[device] = {};
auto& device_config = config.at(device);
// high-level performance modes // high-level performance modes
if (ov_perf_hint != ov::hint::PerformanceMode::UNDEFINED) { if (ov_perf_hint != ov::hint::PerformanceMode::UNDEFINED) {
@ -276,24 +276,28 @@ int main(int argc, char* argv[]) {
} }
perf_counts = (device_config.at(ov::enable_profiling.name()).as<bool>()) ? true : perf_counts; perf_counts = (device_config.at(ov::enable_profiling.name()).as<bool>()) ? true : perf_counts;
auto supported_properties = core.get_property(device, ov::supported_properties);
auto supported = [&](const std::string& key) {
return std::find(std::begin(supported_properties), std::end(supported_properties), key) !=
std::end(supported_properties);
};
// the rest are individual per-device settings (overriding the values set with perf modes) // the rest are individual per-device settings (overriding the values set with perf modes)
auto setThroughputStreams = [&]() { auto setThroughputStreams = [&]() {
std::string key = getDeviceTypeFromName(device) + "_THROUGHPUT_STREAMS"; std::string key = getDeviceTypeFromName(device) + "_THROUGHPUT_STREAMS";
if (device_nstreams.count(device)) { auto it_device_nstreams = device_nstreams.find(device);
if (it_device_nstreams != device_nstreams.end()) {
// set to user defined value // set to user defined value
auto supported_properties = core.get_property(device, ov::supported_properties); auto supported_properties = core.get_property(device, ov::supported_properties);
if (std::find(supported_properties.begin(), supported_properties.end(), key) != if (supported(key)) {
supported_properties.end()) { device_config[key] = it_device_nstreams->second;
device_config[key] = device_nstreams.at(device); } else if (supported(ov::num_streams.name())) {
} else if (std::find(supported_properties.begin(),
supported_properties.end(),
ov::streams::num.name()) != supported_properties.end()) {
// Use API 2.0 key for streams // Use API 2.0 key for streams
key = ov::streams::num.name(); key = ov::num_streams.name();
device_config[key] = device_nstreams.at(device); device_config[key] = it_device_nstreams->second;
} else { } else {
throw std::logic_error("Device " + device + " doesn't support config key '" + key + "' " + throw std::logic_error("Device " + device + " doesn't support config key '" + key + "' " +
"and '" + ov::streams::num.name() + "'!" + "and '" + ov::num_streams.name() + "'!" +
"Please specify -nstreams for correct devices in format " "Please specify -nstreams for correct devices in format "
"<dev1>:<nstreams1>,<dev2>:<nstreams2>" + "<dev1>:<nstreams1>,<dev2>:<nstreams2>" +
" or via configuration file."); " or via configuration file.");
@ -309,45 +313,66 @@ int main(int argc, char* argv[]) {
<< slog::endl; << slog::endl;
if (std::string::npos == device.find("MYRIAD")) { // MYRIAD sets the default number of if (std::string::npos == device.find("MYRIAD")) { // MYRIAD sets the default number of
// streams implicitly (without _AUTO) // streams implicitly (without _AUTO)
auto supported_properties = core.get_property(device, ov::supported_properties); if (supported(key)) {
if (std::find(supported_properties.begin(), supported_properties.end(), key) !=
supported_properties.end()) {
device_config[key] = std::string(getDeviceTypeFromName(device) + "_THROUGHPUT_AUTO"); device_config[key] = std::string(getDeviceTypeFromName(device) + "_THROUGHPUT_AUTO");
} else if (std::find(supported_properties.begin(), } else if (supported(ov::num_streams.name())) {
supported_properties.end(),
ov::streams::num.name()) != supported_properties.end()) {
// Use API 2.0 key for streams // Use API 2.0 key for streams
key = ov::streams::num.name(); key = ov::num_streams.name();
device_config[key] = std::to_string(ov::streams::AUTO); device_config[key] = ov::NumStreams::AUTO;
} }
} }
} }
if (device_config.count(key)) auto it_streams = device_config.find(ov::num_streams.name());
device_nstreams[device] = device_config.at(key).as<std::string>(); if (it_streams != device_config.end())
device_nstreams[device] = it_streams->second.as<std::string>();
}; };
auto set_infer_precision = [&] {
auto it_device_infer_precision = device_infer_precision.find(device);
if (it_device_infer_precision != device_infer_precision.end()) {
// set to user defined value
if (!supported(ov::hint::inference_precision.name())) {
throw std::logic_error("Device " + device + " doesn't support config key '" +
ov::hint::inference_precision.name() + "'! " +
"Please specify -infer_precision for correct devices in format "
"<dev1>:<infer_precision1>,<dev2>:<infer_precision2>" +
" or via configuration file.");
}
device_config.emplace(ov::hint::inference_precision(it_device_infer_precision->second));
}
};
auto fix_pin_option = [](const std::string& str) -> std::string {
if (str == "NO")
return "NONE";
else if (str == "YES")
return "CORE";
else
return str;
};
if (supported(ov::inference_num_threads.name()) && isFlagSetInCommandLine("nthreads")) {
device_config.emplace(ov::inference_num_threads(FLAGS_nthreads));
}
if (supported(ov::affinity.name()) && isFlagSetInCommandLine("pin")) {
device_config.emplace(ov::affinity(fix_pin_option(FLAGS_pin)));
}
if (device.find("CPU") != std::string::npos) { // CPU supports few special performance-oriented keys if (device.find("CPU") != std::string::npos) { // CPU supports few special performance-oriented keys
// limit threading for CPU portion of inference // limit threading for CPU portion of inference
if (isFlagSetInCommandLine("nthreads")) if (!isFlagSetInCommandLine("pin")) {
device_config[CONFIG_KEY(CPU_THREADS_NUM)] = std::to_string(FLAGS_nthreads); auto it_affinity = device_config.find(ov::affinity.name());
if (it_affinity != device_config.end() && (device_name.find("MULTI") != std::string::npos) &&
if (isFlagSetInCommandLine("enforcebf16"))
device_config[CONFIG_KEY(ENFORCE_BF16)] = FLAGS_enforcebf16 ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO);
if (isFlagSetInCommandLine("pin")) {
// set to user defined value
device_config[CONFIG_KEY(CPU_BIND_THREAD)] = FLAGS_pin;
} else if (!device_config.count(CONFIG_KEY(CPU_BIND_THREAD))) {
if ((device_name.find("MULTI") != std::string::npos) &&
(device_name.find("GPU") != std::string::npos)) { (device_name.find("GPU") != std::string::npos)) {
slog::warn << "Turn off threads pinning for " << device slog::warn << "Turn off threads pinning for " << device
<< " device since multi-scenario with GPU device is used." << slog::endl; << " device since multi-scenario with GPU device is used." << slog::endl;
device_config[CONFIG_KEY(CPU_BIND_THREAD)] = CONFIG_VALUE(NO); it_affinity->second = ov::Affinity::NONE;
} }
} }
// for CPU execution, more throughput-oriented execution via streams // for CPU execution, more throughput-oriented execution via streams
setThroughputStreams(); setThroughputStreams();
set_infer_precision();
} else if (device.find("GPU") != std::string::npos) { } else if (device.find("GPU") != std::string::npos) {
// for GPU execution, more throughput-oriented execution via streams // for GPU execution, more throughput-oriented execution via streams
setThroughputStreams(); setThroughputStreams();
@ -365,25 +390,7 @@ int main(int argc, char* argv[]) {
device_config.emplace(ov::log::level(ov::log::Level::WARNING)); device_config.emplace(ov::log::level(ov::log::Level::WARNING));
setThroughputStreams(); setThroughputStreams();
} else if (device.find("GNA") != std::string::npos) { } else if (device.find("GNA") != std::string::npos) {
if (FLAGS_qb == 8) set_infer_precision();
device_config[GNA_CONFIG_KEY(PRECISION)] = "I8";
else
device_config[GNA_CONFIG_KEY(PRECISION)] = "I16";
} else {
auto supported_properties = core.get_property(device, ov::supported_properties);
auto supported = [&](const std::string& key) {
return std::find(std::begin(supported_properties), std::end(supported_properties), key) !=
std::end(supported_properties);
};
if (supported(CONFIG_KEY(CPU_THREADS_NUM)) && isFlagSetInCommandLine("nthreads")) {
device_config[CONFIG_KEY(CPU_THREADS_NUM)] = std::to_string(FLAGS_nthreads);
}
if (supported(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) && isFlagSetInCommandLine("nstreams")) {
device_config[CONFIG_KEY(CPU_THROUGHPUT_STREAMS)] = FLAGS_nstreams;
}
if (supported(CONFIG_KEY(CPU_BIND_THREAD)) && isFlagSetInCommandLine("pin")) {
device_config[CONFIG_KEY(CPU_BIND_THREAD)] = FLAGS_pin;
}
} }
} }
@ -669,7 +676,7 @@ int main(int argc, char* argv[]) {
const std::string key = getDeviceTypeFromName(ds.first) + "_THROUGHPUT_STREAMS"; const std::string key = getDeviceTypeFromName(ds.first) + "_THROUGHPUT_STREAMS";
device_nstreams[ds.first] = core.get_property(ds.first, key).as<std::string>(); device_nstreams[ds.first] = core.get_property(ds.first, key).as<std::string>();
} catch (const ov::Exception&) { } catch (const ov::Exception&) {
device_nstreams[ds.first] = core.get_property(ds.first, ov::streams::num.name()).as<std::string>(); device_nstreams[ds.first] = core.get_property(ds.first, ov::num_streams.name()).as<std::string>();
} }
} }

View File

@ -118,8 +118,8 @@ std::vector<std::string> parse_devices(const std::string& device_string) {
return devices; return devices;
} }
std::map<std::string, std::string> parse_nstreams_value_per_device(const std::vector<std::string>& devices, std::map<std::string, std::string> parse_value_per_device(const std::vector<std::string>& devices,
const std::string& values_string) { const std::string& values_string) {
// Format: <device1>:<value1>,<device2>:<value2> or just <value> // Format: <device1>:<value1>,<device2>:<value2> or just <value>
std::map<std::string, std::string> result; std::map<std::string, std::string> result;
auto device_value_strings = split(values_string, ','); auto device_value_strings = split(values_string, ',');

View File

@ -56,8 +56,8 @@ using PartialShapes = std::map<std::string, ngraph::PartialShape>;
std::vector<std::string> parse_devices(const std::string& device_string); std::vector<std::string> parse_devices(const std::string& device_string);
uint32_t device_default_device_duration_in_seconds(const std::string& device); uint32_t device_default_device_duration_in_seconds(const std::string& device);
std::map<std::string, std::string> parse_nstreams_value_per_device(const std::vector<std::string>& devices, std::map<std::string, std::string> parse_value_per_device(const std::vector<std::string>& devices,
const std::string& values_string); const std::string& values_string);
std::string get_shape_string(const ov::Shape& shape); std::string get_shape_string(const ov::Shape& shape);
std::string get_shapes_string(const benchmark_app::PartialShapes& shapes); std::string get_shapes_string(const benchmark_app::PartialShapes& shapes);
size_t get_batch_size(const benchmark_app::InputsInfo& inputs_info); size_t get_batch_size(const benchmark_app::InputsInfo& inputs_info);

View File

@ -24,81 +24,9 @@
void print_any_value(const ov::Any& value) { void print_any_value(const ov::Any& value) {
if (value.empty()) { if (value.empty()) {
slog::info << "EMPTY VALUE" << slog::endl; slog::info << "EMPTY VALUE" << slog::endl;
} else if (value.is<bool>()) { } else {
slog::info << std::boolalpha << value.as<bool>() << std::noboolalpha << slog::endl;
} else if (value.is<int>()) {
slog::info << value.as<int>() << slog::endl;
} else if (value.is<unsigned int>()) {
slog::info << value.as<unsigned int>() << slog::endl;
} else if (value.is<uint64_t>()) {
slog::info << value.as<uint64_t>() << slog::endl;
} else if (value.is<float>()) {
slog::info << value.as<float>() << slog::endl;
} else if (value.is<std::string>()) {
std::string stringValue = value.as<std::string>(); std::string stringValue = value.as<std::string>();
slog::info << (stringValue.empty() ? "\"\"" : stringValue) << slog::endl; slog::info << (stringValue.empty() ? "\"\"" : stringValue) << slog::endl;
} else if (value.is<std::vector<std::string>>()) {
slog::info << value.as<std::vector<std::string>>() << slog::endl;
} else if (value.is<std::vector<int>>()) {
slog::info << value.as<std::vector<int>>() << slog::endl;
} else if (value.is<std::vector<float>>()) {
slog::info << value.as<std::vector<float>>() << slog::endl;
} else if (value.is<std::vector<unsigned int>>()) {
slog::info << value.as<std::vector<unsigned int>>() << slog::endl;
} else if (value.is<std::tuple<unsigned int, unsigned int, unsigned int>>()) {
auto values = value.as<std::tuple<unsigned int, unsigned int, unsigned int>>();
slog::info << "{ ";
slog::info << std::get<0>(values) << ", ";
slog::info << std::get<1>(values) << ", ";
slog::info << std::get<2>(values);
slog::info << " }";
slog::info << slog::endl;
} else if (value.is<InferenceEngine::Metrics::DeviceType>()) {
auto v = value.as<InferenceEngine::Metrics::DeviceType>();
slog::info << v << slog::endl;
} else if (value.is<std::map<InferenceEngine::Precision, float>>()) {
auto values = value.as<std::map<InferenceEngine::Precision, float>>();
slog::info << "{ ";
for (auto& kv : values) {
slog::info << kv.first << ": " << kv.second << "; ";
}
slog::info << " }";
slog::info << slog::endl;
} else if (value.is<std::tuple<unsigned int, unsigned int>>()) {
auto values = value.as<std::tuple<unsigned int, unsigned int>>();
slog::info << "{ ";
slog::info << std::get<0>(values) << ", ";
slog::info << std::get<1>(values);
slog::info << " }";
slog::info << slog::endl;
} else if (value.is<std::map<ov::element::Type, float>>()) {
auto values = value.as<std::map<ov::element::Type, float>>();
slog::info << "{ ";
for (auto& kv : values) {
slog::info << kv.first << ": " << kv.second << "; ";
}
slog::info << " }";
slog::info << slog::endl;
} else if (value.is<std::map<std::string, uint64_t>>()) {
auto values = value.as<std::map<std::string, uint64_t>>();
slog::info << "{ ";
for (auto& kv : values) {
slog::info << kv.first << ": " << kv.second << "; ";
}
slog::info << " }";
slog::info << slog::endl;
} else if (value.is<ov::hint::PerformanceMode>()) {
auto values = value.as<std::string>();
slog::info << (values.empty() ? "\"\"" : values) << slog::endl;
} else {
std::stringstream strm;
value.print(strm);
auto str = strm.str();
if (str.empty()) {
slog::info << "UNSUPPORTED TYPE" << slog::endl;
} else {
slog::info << str << slog::endl;
}
} }
} }

View File

@ -13,6 +13,40 @@
namespace ov { namespace ov {
namespace util { namespace util {
template <class T>
struct ValueTyped {
template <class U>
static auto test(U*) -> decltype(std::declval<typename U::value_type&>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename, typename>
struct Read;
template <typename T, typename std::enable_if<ValueTyped<T>::value, bool>::type = true>
inline typename T::value_type from_string(const std::string& val, const T&) {
std::stringstream ss(val);
typename T::value_type value;
Read<typename T::value_type, void>{}(ss, value);
return value;
}
template <typename>
struct Write;
template <typename T>
inline std::string to_string(const T& value) {
std::stringstream ss;
Write<T>{}(ss, value);
return ss.str();
}
template <typename T> template <typename T>
std::string join(const T& v, const std::string& sep = ", ") { std::string join(const T& v, const std::string& sep = ", ") {
std::ostringstream ss; std::ostringstream ss;

View File

@ -26,6 +26,239 @@ class ExecutableNetwork;
} // namespace InferenceEngine } // namespace InferenceEngine
namespace ov { namespace ov {
/** @cond INTERNAL */
class Any;
namespace util {
template <typename T, typename = void>
struct Read;
template <class T>
struct Istreamable {
template <class U>
static auto test(U*) -> decltype(std::declval<std::istream&>() >> std::declval<U&>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <class T>
struct Readable {
template <class U>
static auto test(U*) -> decltype(read(std::declval<std::istream&>(), std::declval<U&>()), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename T, typename>
struct Read {
template <typename U>
auto operator()(std::istream&, U&) const ->
typename std::enable_if<std::is_same<T, U>::value && !Istreamable<U>::value && !Readable<U>::value>::type {
OPENVINO_UNREACHABLE("Could read type without std::istream& operator>>(std::istream&, T)",
" defined or ov::util::Read<T> class specialization, T: ",
typeid(T).name());
}
template <typename U>
auto operator()(std::istream& is, U& value) const ->
typename std::enable_if<std::is_same<T, U>::value && Istreamable<U>::value && !Readable<U>::value>::type {
is >> value;
}
};
template <>
struct OPENVINO_API Read<bool> {
void operator()(std::istream& is, bool& value) const;
};
template <>
struct OPENVINO_API Read<Any> {
void operator()(std::istream& is, Any& any) const;
};
template <>
struct OPENVINO_API Read<int> {
void operator()(std::istream& is, int& value) const;
};
template <>
struct OPENVINO_API Read<long> {
void operator()(std::istream& is, long& value) const;
};
template <>
struct OPENVINO_API Read<long long> {
void operator()(std::istream& is, long long& value) const;
};
template <>
struct OPENVINO_API Read<unsigned> {
void operator()(std::istream& is, unsigned& value) const;
};
template <>
struct OPENVINO_API Read<unsigned long> {
void operator()(std::istream& is, unsigned long& value) const;
};
template <>
struct OPENVINO_API Read<unsigned long long> {
void operator()(std::istream& is, unsigned long long& value) const;
};
template <>
struct OPENVINO_API Read<float> {
void operator()(std::istream& is, float& value) const;
};
template <>
struct OPENVINO_API Read<double> {
void operator()(std::istream& is, double& value) const;
};
template <>
struct OPENVINO_API Read<long double> {
void operator()(std::istream& is, long double& value) const;
};
template <>
struct OPENVINO_API Read<std::tuple<unsigned int, unsigned int, unsigned int>> {
void operator()(std::istream& is, std::tuple<unsigned int, unsigned int, unsigned int>& tuple) const;
};
template <>
struct OPENVINO_API Read<std::tuple<unsigned int, unsigned int>> {
void operator()(std::istream& is, std::tuple<unsigned int, unsigned int>& tuple) const;
};
template <typename T, typename A>
struct Read<std::vector<T, A>, typename std::enable_if<std::is_default_constructible<T>::value>::type> {
void operator()(std::istream& is, std::vector<T, A>& vec) const {
while (is.good()) {
T v;
Read<T>{}(is, v);
vec.push_back(std::move(v));
}
}
};
template <typename K, typename T, typename C, typename A>
struct Read<
std::map<K, T, C, A>,
typename std::enable_if<std::is_default_constructible<K>::value && std::is_default_constructible<T>::value>::type> {
void operator()(std::istream& is, std::map<K, T, C, A>& map) const {
while (is.good()) {
K k;
T v;
Read<K>{}(is, k);
Read<T>{}(is, v);
map.emplace(std::move(k), std::move(v));
}
}
};
template <typename T>
struct Write;
template <class T>
struct Ostreamable {
template <class U>
static auto test(U*) -> decltype(std::declval<std::ostream&>() << std::declval<U>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <class T>
struct Writable {
template <class U>
static auto test(U*) -> decltype(write(std::declval<std::ostream&>(), std::declval<const U&>()), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <typename T>
struct Write {
template <typename U>
auto operator()(std::ostream& os, const U&) const ->
typename std::enable_if<std::is_same<T, U>::value && !Ostreamable<U>::value && !Writable<U>::value>::type {}
template <typename U>
auto operator()(std::ostream& os, const U& value) const ->
typename std::enable_if<std::is_same<T, U>::value && Ostreamable<U>::value && !Writable<U>::value>::type {
os << value;
}
};
template <>
struct OPENVINO_API Write<bool> {
void operator()(std::ostream& is, const bool& b) const;
};
template <>
struct OPENVINO_API Write<Any> {
void operator()(std::ostream& is, const Any& any) const;
};
template <>
struct OPENVINO_API Write<std::tuple<unsigned int, unsigned int, unsigned int>> {
void operator()(std::ostream& os, const std::tuple<unsigned int, unsigned int, unsigned int>& tuple) const;
};
template <>
struct OPENVINO_API Write<std::tuple<unsigned int, unsigned int>> {
void operator()(std::ostream& os, const std::tuple<unsigned int, unsigned int>& tuple) const;
};
template <typename T, typename A>
struct Write<std::vector<T, A>> {
void operator()(std::ostream& os, const std::vector<T, A>& vec) const {
if (!vec.empty()) {
std::size_t i = 0;
for (auto&& v : vec) {
Write<T>{}(os, v);
if (i < (vec.size() - 1))
os << ' ';
++i;
}
}
}
};
template <typename K, typename T, typename C, typename A>
struct Write<std::map<K, T, C, A>> {
void operator()(std::ostream& os, const std::map<K, T, C, A>& map) const {
if (!map.empty()) {
std::size_t i = 0;
for (auto&& v : map) {
Write<K>{}(os, v.first);
os << ' ';
Write<T>{}(os, v.second);
if (i < (map.size() - 1))
os << ' ';
++i;
}
}
}
};
} // namespace util
/** @endcond */
class Node; class Node;
class RuntimeAttribute; class RuntimeAttribute;
@ -44,34 +277,6 @@ class OPENVINO_API Any {
template <typename T> template <typename T>
using decay_t = typename std::decay<T>::type; using decay_t = typename std::decay<T>::type;
template <typename T>
struct IsNullPointer : std::is_same<std::nullptr_t, typename std::remove_cv<T>::type> {};
template <class T>
struct Ostreamable {
template <class U>
static auto test(U*) -> decltype(std::declval<std::ostream&>() << std::declval<U>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <class U>
static typename std::enable_if<Ostreamable<U>::value && !std::is_same<bool, U>::value>::type print_impl(
std::ostream& os,
const U& value) {
os << value;
}
static void print_impl(std::ostream& os, const bool& b);
template <class U>
static typename std::enable_if<!Ostreamable<U>::value>::type print_impl(std::ostream&, const U&) {}
template <typename T> template <typename T>
struct EqualityComparable { struct EqualityComparable {
static void* conv(bool); static void* conv(bool);
@ -113,7 +318,7 @@ class OPENVINO_API Any {
template <class U> template <class U>
[[noreturn]] static typename std::enable_if<!EqualityComparable<U>::value, bool>::type equal_impl(const U&, [[noreturn]] static typename std::enable_if<!EqualityComparable<U>::value, bool>::type equal_impl(const U&,
const U&) { const U&) {
throw ov::Exception{"Could not compare types without equality operator"}; OPENVINO_UNREACHABLE("Could not compare types without equality operator");
} }
template <typename T> template <typename T>
@ -167,42 +372,6 @@ class OPENVINO_API Any {
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value; constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
}; };
template <class T>
struct Istreamable {
template <class U>
static auto test(U*) -> decltype(std::declval<std::istream&>() >> std::declval<U&>(), std::true_type()) {
return {};
}
template <typename>
static auto test(...) -> std::false_type {
return {};
}
constexpr static const auto value = std::is_same<std::true_type, decltype(test<T>(nullptr))>::value;
};
template <class U>
static typename std::enable_if<Istreamable<U>::value && !std::is_same<bool, U>::value>::type read_impl(
std::istream& is,
U& value) {
is >> value;
}
static void read_impl(std::istream& is, bool& value);
static void read_impl(std::istream& is, int& value);
static void read_impl(std::istream& is, long& value);
static void read_impl(std::istream& is, long long& value);
static void read_impl(std::istream& is, unsigned& value);
static void read_impl(std::istream& is, unsigned long& value);
static void read_impl(std::istream& is, unsigned long long& value);
static void read_impl(std::istream& is, float& value);
static void read_impl(std::istream& is, double& value);
static void read_impl(std::istream& is, long double& value);
template <class U>
static typename std::enable_if<!Istreamable<U>::value>::type read_impl(std::istream&, U&) {
throw ov::Exception{"Could read type without std::istream& operator>>(std::istream&, T) defined"};
}
static bool equal(std::type_index lhs, std::type_index rhs); static bool equal(std::type_index lhs, std::type_index rhs);
/** /**
@ -315,7 +484,7 @@ class OPENVINO_API Any {
} }
void read(std::istream&) override { void read(std::istream&) override {
throw ov::Exception{"Pointer to runtime attribute is not readable from std::istream"}; OPENVINO_UNREACHABLE("Pointer to runtime attribute is not readable from std::istream");
} }
T runtime_attribute; T runtime_attribute;
@ -364,11 +533,11 @@ class OPENVINO_API Any {
} }
void print(std::ostream& os) const override { void print(std::ostream& os) const override {
print_impl(os, value); util::Write<T>{}(os, value);
} }
void read(std::istream& is) override { void read(std::istream& is) override {
read_impl(is, value); util::Read<T>{}(is, value);
} }
T value; T value;
@ -506,17 +675,20 @@ public:
} else { } else {
auto runtime_attribute = _impl->as_runtime_attribute(); auto runtime_attribute = _impl->as_runtime_attribute();
if (runtime_attribute == nullptr) { if (runtime_attribute == nullptr) {
throw ov::Exception{ OPENVINO_UNREACHABLE("Any does not contains pointer to runtime_attribute. It contains ",
std::string{"Any does not contains pointer to runtime_attribute. It contains "} + _impl->type_info().name());
_impl->type_info().name()};
} }
auto vptr = std::dynamic_pointer_cast<typename T::element_type>(runtime_attribute); auto vptr = std::dynamic_pointer_cast<typename T::element_type>(runtime_attribute);
if (vptr == nullptr && T::element_type::get_type_info_static() != runtime_attribute->get_type_info() && if (vptr == nullptr && T::element_type::get_type_info_static() != runtime_attribute->get_type_info() &&
T::element_type::get_type_info_static() != RuntimeAttribute::get_type_info_static()) { T::element_type::get_type_info_static() != RuntimeAttribute::get_type_info_static()) {
throw ov::Exception{std::string{"Could not cast Any runtime_attribute to "} + typeid(T).name() + OPENVINO_UNREACHABLE("Could not cast Any runtime_attribute to ",
" from " + _impl->type_info().name() + "; from " + typeid(T).name(),
static_cast<std::string>(runtime_attribute->get_type_info()) + " to " + " from ",
static_cast<std::string>(T::element_type::get_type_info_static())}; _impl->type_info().name(),
"; from ",
static_cast<std::string>(runtime_attribute->get_type_info()),
" to ",
static_cast<std::string>(T::element_type::get_type_info_static()));
} }
vptr = std::static_pointer_cast<typename T::element_type>(runtime_attribute); vptr = std::static_pointer_cast<typename T::element_type>(runtime_attribute);
_temp_impl = std::make_shared<Impl<decay_t<T>>>(vptr); _temp_impl = std::make_shared<Impl<decay_t<T>>>(vptr);
@ -542,17 +714,20 @@ public:
} else { } else {
auto runtime_attribute = _impl->as_runtime_attribute(); auto runtime_attribute = _impl->as_runtime_attribute();
if (runtime_attribute == nullptr) { if (runtime_attribute == nullptr) {
throw ov::Exception{ OPENVINO_UNREACHABLE("Any does not contains pointer to runtime_attribute. It contains ",
std::string{"Any does not contains pointer to runtime_attribute. It contains "} + _impl->type_info().name());
_impl->type_info().name()};
} }
auto vptr = std::dynamic_pointer_cast<typename T::element_type>(runtime_attribute); auto vptr = std::dynamic_pointer_cast<typename T::element_type>(runtime_attribute);
if (vptr == nullptr && T::element_type::get_type_info_static() != runtime_attribute->get_type_info() && if (vptr == nullptr && T::element_type::get_type_info_static() != runtime_attribute->get_type_info() &&
T::element_type::get_type_info_static() != RuntimeAttribute::get_type_info_static()) { T::element_type::get_type_info_static() != RuntimeAttribute::get_type_info_static()) {
throw ov::Exception{std::string{"Could not cast Any runtime_attribute to "} + typeid(T).name() + OPENVINO_UNREACHABLE("Could not cast Any runtime_attribute to ",
" from " + _impl->type_info().name() + "; from " + typeid(T).name(),
static_cast<std::string>(runtime_attribute->get_type_info()) + " to " + " from ",
static_cast<std::string>(T::element_type::get_type_info_static())}; _impl->type_info().name(),
"; from ",
static_cast<std::string>(runtime_attribute->get_type_info()),
" to ",
static_cast<std::string>(T::element_type::get_type_info_static()));
} }
vptr = std::static_pointer_cast<typename T::element_type>(runtime_attribute); vptr = std::static_pointer_cast<typename T::element_type>(runtime_attribute);
_temp_impl = std::make_shared<Impl<decay_t<T>>>(vptr); _temp_impl = std::make_shared<Impl<decay_t<T>>>(vptr);
@ -585,7 +760,7 @@ public:
return *static_cast<decay_t<T>*>(_impl->addressof()); return *static_cast<decay_t<T>*>(_impl->addressof());
} }
} }
throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name());
} }
/** /**
@ -612,7 +787,7 @@ public:
return *static_cast<const decay_t<T>*>(_impl->addressof()); return *static_cast<const decay_t<T>*>(_impl->addressof());
} }
} }
throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name());
} }
/** /**
@ -634,7 +809,7 @@ public:
return *static_cast<decay_t<T>*>(_impl->addressof()); return *static_cast<decay_t<T>*>(_impl->addressof());
} }
} }
throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name());
} }
/** /**
@ -656,7 +831,7 @@ public:
return *static_cast<const decay_t<T>*>(_impl->addressof()); return *static_cast<const decay_t<T>*>(_impl->addressof());
} }
} }
throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name());
} }
/** /**
@ -666,13 +841,17 @@ public:
*/ */
template <class T> template <class T>
typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() & { typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() & {
impl_check(); if (_impl != nullptr) {
if (_impl->is(typeid(decay_t<T>))) { if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<decay_t<T>*>(_impl->addressof()); return *static_cast<decay_t<T>*>(_impl->addressof());
} else {
std::stringstream strm;
print(strm);
_str = strm.str();
return _str;
}
} else { } else {
std::stringstream strm; _str = {};
print(strm);
_str = strm.str();
return _str; return _str;
} }
} }
@ -684,13 +863,17 @@ public:
*/ */
template <class T> template <class T>
const typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() const& { const typename std::enable_if<std::is_same<T, std::string>::value, T>::type& as() const& {
impl_check(); if (_impl != nullptr) {
if (_impl->is(typeid(decay_t<T>))) { if (_impl->is(typeid(decay_t<T>))) {
return *static_cast<const decay_t<T>*>(_impl->addressof()); return *static_cast<const decay_t<T>*>(_impl->addressof());
} else {
std::stringstream strm;
print(strm);
_str = strm.str();
return _str;
}
} else { } else {
std::stringstream strm; _str = {};
print(strm);
_str = strm.str();
return _str; return _str;
} }
} }
@ -805,6 +988,7 @@ public:
} }
}; };
/** @cond INTERNAL */
namespace util { namespace util {
template <> template <>
struct AsTypePtr<Any> { struct AsTypePtr<Any> {
@ -819,6 +1003,7 @@ struct AsTypePtr<Any> {
} }
}; };
} // namespace util } // namespace util
/** @endcond */
using AnyMap = std::map<std::string, Any>; using AnyMap = std::map<std::string, Any>;

View File

@ -166,6 +166,9 @@ OPENVINO_API Type fundamental_type_for(const Type& type);
OPENVINO_API OPENVINO_API
std::ostream& operator<<(std::ostream& out, const ov::element::Type& obj); std::ostream& operator<<(std::ostream& out, const ov::element::Type& obj);
OPENVINO_API
std::istream& operator>>(std::istream& out, ov::element::Type& obj);
} // namespace element } // namespace element
template <> template <>

View File

@ -121,7 +121,9 @@ const Any::Base* Any::operator->() const {
return _impl.get(); return _impl.get();
} }
void Any::read_impl(std::istream& is, bool& value) { namespace util {
void Read<bool>::operator()(std::istream& is, bool& value) const {
std::string str; std::string str;
is >> str; is >> str;
if (str == "YES") { if (str == "YES") {
@ -136,33 +138,33 @@ void Any::read_impl(std::istream& is, bool& value) {
template <typename F> template <typename F>
static auto stream_to(std::istream& is, F&& f) -> decltype(f(std::declval<const std::string&>())) { static auto stream_to(std::istream& is, F&& f) -> decltype(f(std::declval<const std::string&>())) {
std::string str; std::string str;
is >> str; Read<std::string>{}(is, str);
try { try {
return f(str); return f(str);
} catch (std::exception& e) { } catch (std::exception& e) {
OPENVINO_UNREACHABLE(std::string{"Could not convert to: "} + OPENVINO_UNREACHABLE(std::string{"Could not convert to: "} +
typeid(decltype(f(std::declval<const std::string&>()))).name() + " from string " + str + typeid(decltype(f(std::declval<const std::string&>()))).name() + " from string \"" + str +
": " + e.what()); "\": " + e.what());
} }
} }
void Any::read_impl(std::istream& is, int& value) { void Read<int>::operator()(std::istream& is, int& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
return std::stoi(str); return std::stoi(str);
}); });
} }
void Any::read_impl(std::istream& is, long& value) { void Read<long>::operator()(std::istream& is, long& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
return std::stol(str); return std::stol(str);
}); });
} }
void Any::read_impl(std::istream& is, long long& value) { void Read<long long>::operator()(std::istream& is, long long& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
return std::stoll(str); return std::stoll(str);
}); });
} }
void Any::read_impl(std::istream& is, unsigned& value) { void Read<unsigned>::operator()(std::istream& is, unsigned& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
auto ul = std::stoul(str); auto ul = std::stoul(str);
if (ul > std::numeric_limits<unsigned>::max()) { if (ul > std::numeric_limits<unsigned>::max()) {
@ -171,34 +173,70 @@ void Any::read_impl(std::istream& is, unsigned& value) {
return static_cast<unsigned>(ul); return static_cast<unsigned>(ul);
}); });
} }
void Any::read_impl(std::istream& is, unsigned long& value) { void Read<unsigned long>::operator()(std::istream& is, unsigned long& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
return std::stoul(str); return std::stoul(str);
}); });
} }
void Any::read_impl(std::istream& is, unsigned long long& value) { void Read<unsigned long long>::operator()(std::istream& is, unsigned long long& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
return std::stoull(str); return std::stoull(str);
}); });
} }
void Any::read_impl(std::istream& is, float& value) { void Read<float>::operator()(std::istream& is, float& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
return std::stof(str); return std::stof(str);
}); });
} }
void Any::read_impl(std::istream& is, double& value) { void Read<double>::operator()(std::istream& is, double& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
return std::stod(str); return std::stod(str);
}); });
} }
void Any::read_impl(std::istream& is, long double& value) { void Read<long double>::operator()(std::istream& is, long double& value) const {
value = stream_to(is, [](const std::string& str) { value = stream_to(is, [](const std::string& str) {
return std::stold(str); return std::stold(str);
}); });
} }
void Any::print_impl(std::ostream& os, const bool& b) { void Read<std::tuple<unsigned int, unsigned int, unsigned int>>::operator()(
std::istream& is,
std::tuple<unsigned int, unsigned int, unsigned int>& tuple) const {
Read<unsigned int>{}(is, std::get<0>(tuple));
Read<unsigned int>{}(is, std::get<1>(tuple));
Read<unsigned int>{}(is, std::get<2>(tuple));
}
void Read<std::tuple<unsigned int, unsigned int>>::operator()(std::istream& is,
std::tuple<unsigned int, unsigned int>& tuple) const {
Read<unsigned int>{}(is, std::get<0>(tuple));
Read<unsigned int>{}(is, std::get<1>(tuple));
}
void Read<Any>::operator()(std::istream& is, Any& any) const {
any.read(is);
}
void Write<bool>::operator()(std::ostream& os, const bool& b) const {
os << (b ? "YES" : "NO"); os << (b ? "YES" : "NO");
} }
void Write<std::tuple<unsigned int, unsigned int, unsigned int>>::operator()(
std::ostream& os,
const std::tuple<unsigned int, unsigned int, unsigned int>& tuple) const {
os << std::get<0>(tuple) << " " << std::get<1>(tuple) << " " << std::get<2>(tuple);
}
void Write<std::tuple<unsigned int, unsigned int>>::operator()(
std::ostream& os,
const std::tuple<unsigned int, unsigned int>& tuple) const {
os << std::get<0>(tuple) << " " << std::get<1>(tuple);
}
void Write<Any>::operator()(std::ostream& os, const Any& any) const {
any.print(os);
}
} // namespace util
} // namespace ov } // namespace ov

View File

@ -240,6 +240,41 @@ std::ostream& ov::element::operator<<(std::ostream& out, const ov::element::Type
return out << obj.get_type_name(); return out << obj.get_type_name();
} }
std::istream& ov::element::operator>>(std::istream& in, ov::element::Type& obj) {
static const std::unordered_map<std::string, ov::element::Type> legacy = {
{"BOOL", ov::element::boolean},
{"BF16", ov::element::bf16},
{"I4", ov::element::i4},
{"I8", ov::element::i8},
{"I16", ov::element::i16},
{"I32", ov::element::i32},
{"I64", ov::element::i64},
{"U4", ov::element::u4},
{"U8", ov::element::u8},
{"U16", ov::element::u16},
{"U32", ov::element::u32},
{"U64", ov::element::u64},
{"FP32", ov::element::f32},
{"FP64", ov::element::f64},
{"FP16", ov::element::f16},
{"BIN", ov::element::u1},
};
std::string str;
in >> str;
auto it_legacy = legacy.find(str);
if (it_legacy != legacy.end()) {
obj = it_legacy->second;
return in;
}
for (auto&& type : Type::get_known_types()) {
if (type->get_type_name() == str) {
obj = *type;
break;
}
}
return in;
}
bool ov::element::Type::compatible(const ov::element::Type& t) const { bool ov::element::Type::compatible(const ov::element::Type& t) const {
return (is_dynamic() || t.is_dynamic() || *this == t); return (is_dynamic() || t.is_dynamic() || *this == t);
} }

View File

@ -298,7 +298,7 @@ void PrintTo(const Any& object, std::ostream* stream) {
object.print(*stream); object.print(*stream);
} }
TEST_F(AnyTests, PrintToEmptyAnyDoesNothing) { TEST_F(AnyTests, PrintToEmpty) {
Any p; Any p;
std::stringstream stream; std::stringstream stream;
ASSERT_NO_THROW(p.print(stream)); ASSERT_NO_THROW(p.print(stream));
@ -354,49 +354,51 @@ TEST_F(AnyTests, PrintToStringAny) {
ASSERT_EQ(stream.str(), value); ASSERT_EQ(stream.str(), value);
} }
TEST_F(AnyTests, PrintToVectorOfIntsAnyDoesNothing) { TEST_F(AnyTests, PrintToVectorOfInts) {
Any p = std::vector<int>{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; Any p = std::vector<int>{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
std::stringstream stream; std::stringstream stream;
ASSERT_NO_THROW(p.print(stream)); ASSERT_NO_THROW(p.print(stream));
ASSERT_EQ(stream.str(), std::string{}); ASSERT_EQ(stream.str(), std::string{"-5 -4 -3 -2 -1 0 1 2 3 4 5"});
} }
TEST_F(AnyTests, PrintToVectorOfUIntsAnyDoesNothing) { TEST_F(AnyTests, PrintToVectorOfUInts) {
Any p = std::vector<unsigned int>{0, 1, 2, 3, 4, 5}; Any p = std::vector<unsigned int>{0, 1, 2, 3, 4, 5};
std::stringstream stream; std::stringstream stream;
ASSERT_NO_THROW(p.print(stream)); ASSERT_NO_THROW(p.print(stream));
ASSERT_EQ(stream.str(), std::string{}); ASSERT_EQ(stream.str(), std::string{"0 1 2 3 4 5"});
} }
TEST_F(AnyTests, PrintToVectorOfSize_tAnyDoesNothing) { TEST_F(AnyTests, PrintToVectorOfFloats) {
Any p = std::vector<std::size_t>{0, 1, 2, 3, 4, 5}; auto ref_vec = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f};
std::stringstream stream; std::stringstream stream;
ASSERT_NO_THROW(p.print(stream)); {
ASSERT_EQ(stream.str(), std::string{}); Any p = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f};
ASSERT_NO_THROW(p.print(stream));
ASSERT_EQ(stream.str(), std::string{"0 1.1 2.2 3.3 4.4 5.5"});
}
{
Any p = stream.str();
ASSERT_EQ((p.as<std::vector<float>>()), ref_vec);
}
} }
TEST_F(AnyTests, PrintToVectorOfFloatsAnyDoesNothing) { TEST_F(AnyTests, PrintToVectorOfStrings) {
Any p = std::vector<float>{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f};
std::stringstream stream;
ASSERT_NO_THROW(p.print(stream));
ASSERT_EQ(stream.str(), std::string{});
}
TEST_F(AnyTests, PrintToVectorOfStringsAnyDoesNothing) {
Any p = std::vector<std::string>{"zero", "one", "two", "three", "four", "five"}; Any p = std::vector<std::string>{"zero", "one", "two", "three", "four", "five"};
std::stringstream stream; std::stringstream stream;
ASSERT_NO_THROW(p.print(stream)); ASSERT_NO_THROW(p.print(stream));
ASSERT_EQ(stream.str(), std::string{}); ASSERT_EQ(stream.str(), std::string{"zero one two three four five"});
} }
TEST_F(AnyTests, PrintToMapOfAnysDoesNothing) { TEST_F(AnyTests, PrintToMapOfAnys) {
std::map<std::string, Any> refMap; std::map<std::string, Any> refMap;
refMap["testParamInt"] = 4; refMap["testParamInt"] = 4;
refMap["testParamString"] = "test"; refMap["testParamString"] = "test";
Any p = refMap;
std::stringstream stream; std::stringstream stream;
ASSERT_NO_THROW(p.print(stream)); {
ASSERT_EQ(stream.str(), std::string{}); Any p = refMap;
ASSERT_NO_THROW(p.print(stream));
ASSERT_EQ(stream.str(), std::string{"testParamInt 4 testParamString test"});
}
} }
TEST_F(AnyTests, constructFromVariantImpl) { TEST_F(AnyTests, constructFromVariantImpl) {

View File

@ -14,27 +14,6 @@
namespace InferenceEngine { namespace InferenceEngine {
/**
* @brief A namespace with helper functions
* @ingroup ie_dev_api_plugin_api
*/
namespace util {
template <typename T, ov::PropertyMutability M>
inline T string_to_property(const std::string& val, const ov::util::BaseProperty<T, M>& property) {
std::stringstream ss(val);
T value;
ss >> value;
return value;
}
template <typename T>
inline std::string property_to_string(const T& property) {
std::stringstream ss;
ss << property;
return ss.str();
}
} // namespace util
/** /**
* @brief A namespace with internal plugin configuration keys * @brief A namespace with internal plugin configuration keys
* @ingroup ie_dev_api_plugin_api * @ingroup ie_dev_api_plugin_api

View File

@ -52,22 +52,3 @@ namespace ie = InferenceEngine;
using SupportedOpsMap = std::map<std::string, std::string>; using SupportedOpsMap = std::map<std::string, std::string>;
} // namespace ov } // namespace ov
namespace std {
inline ostream& operator<<(ostream& os, const map<string, float>& m) {
for (auto&& it : m) {
os << it.first << " " << it.second << " ";
}
return os;
}
inline istream& operator>>(istream& is, map<string, float>& m) {
m.clear();
string key;
float value;
while (is >> key >> value) {
m.emplace(key, value);
}
return is;
}
} // namespace std

View File

@ -50,8 +50,6 @@ class OPENVINO_RUNTIME_API CompiledModel {
friend class ov::Core; friend class ov::Core;
friend class ov::InferRequest; friend class ov::InferRequest;
void get_property(const std::string& name, ov::Any& to) const;
public: public:
/** /**
* @brief Default constructor. * @brief Default constructor.
@ -202,9 +200,7 @@ public:
*/ */
template <typename T, PropertyMutability mutability> template <typename T, PropertyMutability mutability>
T get_property(const ov::Property<T, mutability>& property) const { T get_property(const ov::Property<T, mutability>& property) const {
auto to = Any::make<T>(); return get_property(property.name()).template as<T>();
get_property(property.name(), to);
return to.template as<T>();
} }
/** /**

View File

@ -42,8 +42,6 @@ class OPENVINO_RUNTIME_API Core {
class Impl; class Impl;
std::shared_ptr<Impl> _impl; std::shared_ptr<Impl> _impl;
void get_property(const std::string& device_name, const std::string& name, const AnyMap& arguments, Any& to) const;
public: public:
/** @brief Constructs an OpenVINO Core instance using the XML configuration file with /** @brief Constructs an OpenVINO Core instance using the XML configuration file with
* devices and their plugins description. * devices and their plugins description.
@ -531,9 +529,7 @@ public:
*/ */
template <typename T, PropertyMutability M> template <typename T, PropertyMutability M>
T get_property(const std::string& deviceName, const ov::Property<T, M>& property) const { T get_property(const std::string& deviceName, const ov::Property<T, M>& property) const {
auto to = Any::make<T>(); return get_property(deviceName, property.name(), {}).template as<T>();
get_property(deviceName, property.name(), {}, to);
return to.template as<T>();
} }
/** /**
@ -551,9 +547,7 @@ public:
*/ */
template <typename T, PropertyMutability M> template <typename T, PropertyMutability M>
T get_property(const std::string& deviceName, const ov::Property<T, M>& property, const AnyMap& arguments) const { T get_property(const std::string& deviceName, const ov::Property<T, M>& property, const AnyMap& arguments) const {
auto to = Any::make<T>(); return get_property(deviceName, property.name(), arguments).template as<T>();
get_property(deviceName, property.name(), arguments, to);
return to.template as<T>();
} }
/** /**
@ -574,9 +568,7 @@ public:
util::EnableIfAllStringAny<T, Args...> get_property(const std::string& deviceName, util::EnableIfAllStringAny<T, Args...> get_property(const std::string& deviceName,
const ov::Property<T, M>& property, const ov::Property<T, M>& property,
Args&&... args) const { Args&&... args) const {
auto to = Any::make<T>(); return get_property(deviceName, property.name(), AnyMap{std::forward<Args>(args)...}).template as<T>();
get_property(deviceName, property.name(), AnyMap{std::forward<Args>(args)...}, to);
return to.template as<T>();
} }
/** /**

View File

@ -134,7 +134,45 @@ inline std::ostream& operator<<(std::ostream& os, const BaseProperty<T, M>& prop
* @tparam T type of value used to set or get property * @tparam T type of value used to set or get property
*/ */
template <typename T, PropertyMutability mutability_ = PropertyMutability::RW> template <typename T, PropertyMutability mutability_ = PropertyMutability::RW>
struct Property : public util::BaseProperty<T, mutability_> { class Property : public util::BaseProperty<T, mutability_> {
template <typename V>
struct Forward {
template <typename U,
typename std::enable_if<std::is_same<U, const std::string&>::value &&
std::is_convertible<V, std::string>::value,
bool>::type = true>
explicit operator U() {
return value;
}
template <typename U,
typename std::enable_if<std::is_same<U, const std::string&>::value &&
!std::is_convertible<V, std::string>::value,
bool>::type = true>
explicit operator U() {
return Any{value}.as<std::string>();
}
template <typename U,
typename std::enable_if<!std::is_same<U, const std::string&>::value &&
std::is_convertible<V, std::string>::value,
bool>::type = true>
explicit operator U() {
return Any{value}.as<U>();
}
template <typename U,
typename std::enable_if<!std::is_same<U, const std::string&>::value &&
!std::is_convertible<V, std::string>::value,
bool>::type = true>
explicit operator U() {
return value;
}
V&& value;
};
public:
using util::BaseProperty<T, mutability_>::BaseProperty; using util::BaseProperty<T, mutability_>::BaseProperty;
/** /**
* @brief Constructs property * @brief Constructs property
@ -144,7 +182,7 @@ struct Property : public util::BaseProperty<T, mutability_> {
*/ */
template <typename... Args> template <typename... Args>
inline std::pair<std::string, Any> operator()(Args&&... args) const { inline std::pair<std::string, Any> operator()(Args&&... args) const {
return {this->name(), Any::make<T>(std::forward<Args>(args)...)}; return {this->name(), Any::make<T>(Forward<Args>{std::forward<Args>(args)}...)};
} }
}; };
@ -590,23 +628,69 @@ constexpr static const auto EXPORT_IMPORT = "EXPORT_IMPORT"; //!< Device suppor
} // namespace capability } // namespace capability
} // namespace device } // namespace device
namespace streams {
/** /**
* @brief Special value for ov::execution::streams::num property. * @brief Class to represent number of streams in streams executor
* Creates bare minimum of streams to improve the performance
*/ */
static constexpr const int32_t AUTO = -1; struct NumStreams {
/** using Base = std::tuple<int32_t>; //!< NumStreams is representable as int32_t
* @brief Special value for ov::execution::streams::num property.
* Creates as many streams as needed to accommodate NUMA and avoid associated penalties /**
*/ * @brief Special value for ov::execution::num_streams property.
static constexpr const int32_t NUMA = -2; */
enum Special {
AUTO = -1, //!< Creates bare minimum of streams to improve the performance
NUMA = -2, //!< Creates as many streams as needed to accommodate NUMA and avoid associated penalties
};
NumStreams() : num{AUTO} {};
NumStreams(const int32_t num_) : num{num_} {}
operator int32_t() {
return num;
}
operator int32_t() const {
return num;
}
int32_t num = 0;
};
/** @cond INTERNAL */
inline std::ostream& operator<<(std::ostream& os, const NumStreams& num_streams) {
switch (num_streams.num) {
case NumStreams::AUTO:
return os << "AUTO";
case NumStreams::NUMA:
return os << "NUMA";
default:
return os << num_streams.num;
}
}
inline std::istream& operator>>(std::istream& is, NumStreams& num_streams) {
std::string str;
is >> str;
if (str == "AUTO") {
num_streams = {NumStreams::AUTO};
} else if (str == "NUMA") {
num_streams = {NumStreams::NUMA};
} else {
try {
num_streams = {std::stoi(str)};
} catch (const std::exception& e) {
throw ov::Exception{std::string{"Could not read number of streams from str: "} + str + "; " + e.what()};
}
}
return is;
}
/** @endcond */
/** /**
* @brief The number of executor logical partitions * @brief The number of executor logical partitions
*/ */
static constexpr Property<int32_t, PropertyMutability::RW> num{"NUM_STREAMS"}; static constexpr Property<NumStreams, PropertyMutability::RW> num_streams{"NUM_STREAMS"};
} // namespace streams
/** /**
* @brief Maximum number of threads that can be used for inference tasks * @brief Maximum number of threads that can be used for inference tasks

View File

@ -11,50 +11,10 @@
namespace ov { namespace ov {
std::map<std::string, std::string> any_copy(const ov::AnyMap& params) { std::map<std::string, std::string> any_copy(const ov::AnyMap& params) {
std::function<std::string(const Any&)> to_config_string = [&](const Any& any) -> std::string {
if (any.is<bool>()) {
return any.as<bool>() ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO);
} else if (any.is<AnyMap>()) {
std::stringstream strm;
for (auto&& val : any.as<AnyMap>()) {
strm << val.first << " " << to_config_string(val.second) << " ";
}
return strm.str();
} else {
std::stringstream strm;
any.print(strm);
return strm.str();
}
};
std::map<std::string, std::string> result; std::map<std::string, std::string> result;
for (auto&& value : params) { for (auto&& value : params) {
result.emplace(value.first, to_config_string(value.second)); result.emplace(value.first, value.second.as<std::string>());
} }
return result; return result;
} }
void any_lexical_cast(const ov::Any& from, ov::Any& to) {
if (!from.is<std::string>()) {
to = from;
} else {
auto str = from.as<std::string>();
if (to.is<std::string>()) {
to = from;
} else if (to.is<bool>()) {
if (str == CONFIG_VALUE(YES)) {
to = true;
} else if (str == CONFIG_VALUE(NO)) {
to = false;
} else {
OPENVINO_UNREACHABLE("Unsupported lexical cast to bool from: ", str);
}
} else {
std::stringstream strm(str);
to.read(strm);
if (strm.fail()) {
OPENVINO_UNREACHABLE("Unsupported lexical cast to ", to.type_info().name(), " from: ", str);
}
}
}
}
} // namespace ov } // namespace ov

View File

@ -15,7 +15,4 @@
namespace ov { namespace ov {
std::map<std::string, std::string> any_copy(const ov::AnyMap& config_map); std::map<std::string, std::string> any_copy(const ov::AnyMap& config_map);
void any_lexical_cast(const Any& any, ov::Any& to);
} // namespace ov } // namespace ov

View File

@ -250,10 +250,6 @@ Any CompiledModel::get_property(const std::string& name) const {
}); });
} }
void CompiledModel::get_property(const std::string& name, Any& to) const {
any_lexical_cast(get_property(name), to);
}
RemoteContext CompiledModel::get_context() const { RemoteContext CompiledModel::get_context() const {
OV_EXEC_NET_CALL_STATEMENT(return {_impl->GetContext(), _so}); OV_EXEC_NET_CALL_STATEMENT(return {_impl->GetContext(), _so});
} }

View File

@ -272,21 +272,12 @@ public:
template <typename T, PropertyMutability M> template <typename T, PropertyMutability M>
T get_property(const ov::Property<T, M>& property) const { T get_property(const ov::Property<T, M>& property) const {
auto to = Any::make<T>(); return get_property(property.name(), {}).template as<T>();
get_property(property.name(), {}, to);
return to.template as<T>();
} }
template <typename T, PropertyMutability M> template <typename T, PropertyMutability M>
T get_property(const ov::Property<T, M>& property, const AnyMap& arguments) const { T get_property(const ov::Property<T, M>& property, const AnyMap& arguments) const {
auto to = Any::make<T>(); return get_property(property.name(), arguments).template as<T>();
get_property(property.name(), arguments, to);
return to.template as<T>();
}
private:
void get_property(const std::string& name, const AnyMap& arguments, Any& to) const {
any_lexical_cast(get_property(name, arguments), to);
} }
}; };

View File

@ -1094,12 +1094,13 @@ public:
for (auto&& config : configs) { for (auto&& config : configs) {
auto parsed = parseDeviceNameIntoConfig(config.first); auto parsed = parseDeviceNameIntoConfig(config.first);
if (deviceName.find(parsed._deviceName) != std::string::npos) { if (deviceName.find(parsed._deviceName) != std::string::npos) {
std::string key, value;
std::stringstream strm(config.second); std::stringstream strm(config.second);
while (strm >> key >> value) { std::map<std::string, std::string> device_configs;
util::Read<std::map<std::string, std::string>>{}(strm, device_configs);
for (auto&& device_config : device_configs) {
if (supportedConfigKeys.end() != if (supportedConfigKeys.end() !=
std::find(supportedConfigKeys.begin(), supportedConfigKeys.end(), key)) { std::find(supportedConfigKeys.begin(), supportedConfigKeys.end(), device_config.first)) {
supportedConfig[key] = value; supportedConfig[device_config.first] = device_config.second;
} }
} }
for (auto&& config : parsed._config) { for (auto&& config : parsed._config) {
@ -1757,13 +1758,6 @@ Any Core::get_property(const std::string& deviceName, const std::string& name, c
}); });
} }
void Core::get_property(const std::string& deviceName,
const std::string& name,
const AnyMap& arguments,
ov::Any& to) const {
any_lexical_cast(get_property(deviceName, name, arguments), to);
}
std::vector<std::string> Core::get_available_devices() const { std::vector<std::string> Core::get_available_devices() const {
OV_CORE_CALL_STATEMENT(return _impl->GetAvailableDevices();); OV_CORE_CALL_STATEMENT(return _impl->GetAvailableDevices(););
} }

View File

@ -26,7 +26,7 @@ std::vector<std::string> IStreamsExecutor::Config::SupportedKeys() const {
CONFIG_KEY(CPU_BIND_THREAD), CONFIG_KEY(CPU_BIND_THREAD),
CONFIG_KEY(CPU_THREADS_NUM), CONFIG_KEY(CPU_THREADS_NUM),
CONFIG_KEY_INTERNAL(CPU_THREADS_PER_STREAM), CONFIG_KEY_INTERNAL(CPU_THREADS_PER_STREAM),
ov::streams::num.name(), ov::num_streams.name(),
ov::inference_num_threads.name(), ov::inference_num_threads.name(),
ov::affinity.name(), ov::affinity.name(),
}; };
@ -107,20 +107,22 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri
} }
_streams = val_i; _streams = val_i;
} }
} else if (key == ov::streams::num) { } else if (key == ov::num_streams) {
int32_t streams = std::stoi(value); ov::NumStreams streams;
if (streams == ov::streams::NUMA) { std::stringstream strm{value};
strm >> streams;
if (streams.num == ov::NumStreams::NUMA) {
_streams = static_cast<int32_t>(getAvailableNUMANodes().size()); _streams = static_cast<int32_t>(getAvailableNUMANodes().size());
} else if (streams == ov::streams::AUTO) { } else if (streams.num == ov::NumStreams::AUTO) {
// bare minimum of streams (that evenly divides available number of cores) // bare minimum of streams (that evenly divides available number of cores)
_streams = GetDefaultNumStreams(); _streams = GetDefaultNumStreams();
} else if (streams >= 0) { } else if (streams.num >= 0) {
_streams = streams; _streams = streams.num;
} else { } else {
OPENVINO_UNREACHABLE("Wrong value for property key ", OPENVINO_UNREACHABLE("Wrong value for property key ",
ov::streams::num.name(), ov::num_streams.name(),
". Expected non negative numbers (#streams) or ", ". Expected non negative numbers (#streams) or ",
"ov::streams::NUMA|ov::streams::AUTO, Got: ", "ov::NumStreams::NUMA|ov::NumStreams::AUTO, Got: ",
streams); streams);
} }
} else if (key == CONFIG_KEY(CPU_THREADS_NUM) || key == ov::inference_num_threads) { } else if (key == CONFIG_KEY(CPU_THREADS_NUM) || key == ov::inference_num_threads) {
@ -179,8 +181,8 @@ Parameter IStreamsExecutor::Config::GetConfig(const std::string& key) const {
} }
} else if (key == CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) { } else if (key == CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) {
return {std::to_string(_streams)}; return {std::to_string(_streams)};
} else if (key == ov::streams::num) { } else if (key == ov::num_streams) {
return decltype(ov::streams::num)::value_type{_streams}; return decltype(ov::num_streams)::value_type{_streams};
} else if (key == CONFIG_KEY(CPU_THREADS_NUM)) { } else if (key == CONFIG_KEY(CPU_THREADS_NUM)) {
return {std::to_string(_threads)}; return {std::to_string(_threads)};
} else if (key == ov::inference_num_threads) { } else if (key == ov::inference_num_threads) {

View File

@ -288,7 +288,7 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name)
RO_property(ov::supported_properties.name()), RO_property(ov::supported_properties.name()),
RO_property(ov::model_name.name()), RO_property(ov::model_name.name()),
RO_property(ov::optimal_number_of_infer_requests.name()), RO_property(ov::optimal_number_of_infer_requests.name()),
RO_property(ov::streams::num.name()), RO_property(ov::num_streams.name()),
RO_property(ov::affinity.name()), RO_property(ov::affinity.name()),
RO_property(ov::inference_num_threads.name()), RO_property(ov::inference_num_threads.name()),
RO_property(ov::enable_profiling.name()), RO_property(ov::enable_profiling.name()),
@ -304,9 +304,9 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name)
} else if (name == ov::optimal_number_of_infer_requests) { } else if (name == ov::optimal_number_of_infer_requests) {
const auto streams = config.streamExecutorConfig._streams; const auto streams = config.streamExecutorConfig._streams;
return static_cast<uint32_t>(streams); // ov::optimal_number_of_infer_requests has no negative values return static_cast<uint32_t>(streams); // ov::optimal_number_of_infer_requests has no negative values
} else if (name == ov::streams::num) { } else if (name == ov::num_streams) {
const auto streams = config.streamExecutorConfig._streams; const auto streams = config.streamExecutorConfig._streams;
return static_cast<int32_t>(streams); // ov::streams::num has special negative values (AUTO = -1, NUMA = -2) return static_cast<int32_t>(streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2)
} else if (name == ov::affinity) { } else if (name == ov::affinity) {
const auto affinity = config.streamExecutorConfig._threadBindingType; const auto affinity = config.streamExecutorConfig._threadBindingType;
switch (affinity) { switch (affinity) {

View File

@ -717,10 +717,10 @@ Parameter Engine::GetConfig(const std::string& name, const std::map<std::string,
if (name == ov::optimal_number_of_infer_requests) { if (name == ov::optimal_number_of_infer_requests) {
const auto streams = engConfig.streamExecutorConfig._streams; const auto streams = engConfig.streamExecutorConfig._streams;
return static_cast<uint32_t>(streams); // ov::optimal_number_of_infer_requests has no negative values return decltype(ov::optimal_number_of_infer_requests)::value_type(streams); // ov::optimal_number_of_infer_requests has no negative values
} else if (name == ov::streams::num) { } else if (name == ov::num_streams) {
const auto streams = engConfig.streamExecutorConfig._streams; const auto streams = engConfig.streamExecutorConfig._streams;
return static_cast<int32_t>(streams); // ov::streams::num has special negative values (AUTO = -1, NUMA = -2) return decltype(ov::num_streams)::value_type(streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2)
} else if (name == ov::affinity) { } else if (name == ov::affinity) {
const auto affinity = engConfig.streamExecutorConfig._threadBindingType; const auto affinity = engConfig.streamExecutorConfig._threadBindingType;
switch (affinity) { switch (affinity) {
@ -736,19 +736,20 @@ Parameter Engine::GetConfig(const std::string& name, const std::map<std::string,
return ov::Affinity::NONE; return ov::Affinity::NONE;
} else if (name == ov::inference_num_threads) { } else if (name == ov::inference_num_threads) {
const auto num_threads = engConfig.streamExecutorConfig._threads; const auto num_threads = engConfig.streamExecutorConfig._threads;
return num_threads; return decltype(ov::inference_num_threads)::value_type(num_threads);
} else if (name == ov::enable_profiling.name()) { } else if (name == ov::enable_profiling.name()) {
const bool perfCount = engConfig.collectPerfCounters; const bool perfCount = engConfig.collectPerfCounters;
return perfCount ? "YES" : "NO"; return decltype(ov::enable_profiling)::value_type(perfCount);
} else if (name == ov::hint::inference_precision) { } else if (name == ov::hint::inference_precision) {
const auto enforceBF16 = engConfig.enforceBF16; const auto enforceBF16 = engConfig.enforceBF16;
return enforceBF16 ? ov::element::bf16 : ov::element::f32; return decltype(ov::hint::inference_precision)::value_type(
enforceBF16 ? ov::element::bf16 : ov::element::f32);
} else if (name == ov::hint::performance_mode) { } else if (name == ov::hint::performance_mode) {
const auto perfHint = engConfig.perfHintsConfig.ovPerfHint; const auto perfHint = engConfig.perfHintsConfig.ovPerfHint;
return perfHint; return ov::Any{perfHint}.as<decltype(ov::hint::performance_mode)::value_type>();
} else if (name == ov::hint::num_requests) { } else if (name == ov::hint::num_requests) {
const auto perfHintNumRequests = engConfig.perfHintsConfig.ovPerfHintNumRequests; const auto perfHintNumRequests = engConfig.perfHintsConfig.ovPerfHintNumRequests;
return perfHintNumRequests; return decltype(ov::hint::num_requests)::value_type(perfHintNumRequests);
} }
/* Internally legacy parameters are used with new API as part of migration procedure. /* Internally legacy parameters are used with new API as part of migration procedure.
* This fallback can be removed as soon as migration completed */ * This fallback can be removed as soon as migration completed */
@ -836,7 +837,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map<std::string,
RO_property(ov::device::capabilities.name()) RO_property(ov::device::capabilities.name())
}; };
// the whole config is RW before network is loaded. // the whole config is RW before network is loaded.
std::vector<ov::PropertyName> rwProperties {RW_property(ov::streams::num.name()), std::vector<ov::PropertyName> rwProperties {RW_property(ov::num_streams.name()),
RW_property(ov::affinity.name()), RW_property(ov::affinity.name()),
RW_property(ov::inference_num_threads.name()), RW_property(ov::inference_num_threads.name()),
RW_property(ov::enable_profiling.name()), RW_property(ov::enable_profiling.name()),

View File

@ -12,6 +12,7 @@
#include "ie_common.h" #include "ie_common.h"
#include <caseless.hpp> #include <caseless.hpp>
#include <unordered_map> #include <unordered_map>
#include <openvino/util/common_util.hpp>
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace InferenceEngine::details; using namespace InferenceEngine::details;
@ -43,19 +44,6 @@ static const std::set<std::string> supportedTargets = {
"" ""
}; };
inline std::istream& operator>>(std::istream& is, ov::element::Type& p) {
std::string str;
is >> str;
if ((str == "i8") || (str == "I8")) {
p = ov::element::i8;
} else if ((str == "i16") || (str == "I16")) {
p = ov::element::i16;
} else {
throw ov::Exception{"Unsupported precision: " + str};
}
return is;
}
void Config::UpdateFromMap(const std::map<std::string, std::string>& config) { void Config::UpdateFromMap(const std::map<std::string, std::string>& config) {
for (auto&& item : config) { for (auto&& item : config) {
auto key = item.first; auto key = item.first;
@ -109,7 +97,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& config) {
}; };
if (key == ov::intel_gna::scale_factors_per_input) { if (key == ov::intel_gna::scale_factors_per_input) {
inputScaleFactorsPerInput = InferenceEngine::util::string_to_property(value, ov::intel_gna::scale_factors_per_input); inputScaleFactorsPerInput = ov::util::from_string(value, ov::intel_gna::scale_factors_per_input);
for (auto&& sf : inputScaleFactorsPerInput) { for (auto&& sf : inputScaleFactorsPerInput) {
check_scale_factor(sf.second); check_scale_factor(sf.second);
} }
@ -162,7 +150,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START
} }
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
} else if (key == ov::intel_gna::execution_target || key == ov::intel_gna::compile_target) { } else if (key == ov::intel_gna::execution_target || key == ov::intel_gna::compile_target) {
auto target = InferenceEngine::util::string_to_property(value, ov::intel_gna::execution_target); auto target = ov::util::from_string(value, ov::intel_gna::execution_target);
std::string target_str = ""; std::string target_str = "";
if (ov::intel_gna::HWGeneration::GNA_2_0 == target) { if (ov::intel_gna::HWGeneration::GNA_2_0 == target) {
target_str = GNAConfigParams::GNA_TARGET_2_0; target_str = GNAConfigParams::GNA_TARGET_2_0;
@ -195,7 +183,7 @@ OPENVINO_SUPPRESS_DEPRECATED_END
THROW_GNA_EXCEPTION << "EXCLUSIVE_ASYNC_REQUESTS should be YES/NO, but not" << value; THROW_GNA_EXCEPTION << "EXCLUSIVE_ASYNC_REQUESTS should be YES/NO, but not" << value;
} }
} else if (key == ov::hint::performance_mode) { } else if (key == ov::hint::performance_mode) {
performance_mode = InferenceEngine::util::string_to_property(value, ov::hint::performance_mode); performance_mode = ov::util::from_string(value, ov::hint::performance_mode);
} else if (key == ov::hint::inference_precision) { } else if (key == ov::hint::inference_precision) {
std::stringstream ss(value); std::stringstream ss(value);
ss >> inference_precision; ss >> inference_precision;
@ -214,7 +202,7 @@ OPENVINO_SUPPRESS_DEPRECATED_END
} }
gnaPrecision = precision; gnaPrecision = precision;
} else if (key == ov::intel_gna::pwl_design_algorithm) { } else if (key == ov::intel_gna::pwl_design_algorithm) {
gnaFlags.pwl_design_algorithm = InferenceEngine::util::string_to_property(value, ov::intel_gna::pwl_design_algorithm); gnaFlags.pwl_design_algorithm = ov::util::from_string(value, ov::intel_gna::pwl_design_algorithm);
gnaFlags.uniformPwlDesign = (gnaFlags.pwl_design_algorithm == ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION) ? true : false; gnaFlags.uniformPwlDesign = (gnaFlags.pwl_design_algorithm == ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION) ? true : false;
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
} else if (key == GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)) { } else if (key == GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)) {
@ -289,7 +277,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
} else if (key == CONFIG_KEY(LOG_LEVEL) || key == ov::log::level) { } else if (key == CONFIG_KEY(LOG_LEVEL) || key == ov::log::level) {
if (value == PluginConfigParams::LOG_WARNING || value == PluginConfigParams::LOG_NONE) { if (value == PluginConfigParams::LOG_WARNING || value == PluginConfigParams::LOG_NONE) {
gnaFlags.log_level = InferenceEngine::util::string_to_property(value, ov::log::level); gnaFlags.log_level = ov::util::from_string(value, ov::log::level);
} else { } else {
log << "Currently only LOG_LEVEL = LOG_WARNING and LOG_NONE are supported, not " << value; log << "Currently only LOG_LEVEL = LOG_WARNING and LOG_NONE are supported, not " << value;
THROW_GNA_EXCEPTION << "Currently only LOG_LEVEL = LOG_WARNING and LOG_NONE are supported, not " << value; THROW_GNA_EXCEPTION << "Currently only LOG_LEVEL = LOG_WARNING and LOG_NONE are supported, not " << value;
@ -318,7 +306,7 @@ void Config::AdjustKeyMapValues() {
if (!inputScaleFactorsPerInput.empty()) { if (!inputScaleFactorsPerInput.empty()) {
keyConfigMap[ov::intel_gna::scale_factors_per_input.name()] = keyConfigMap[ov::intel_gna::scale_factors_per_input.name()] =
InferenceEngine::util::property_to_string(inputScaleFactorsPerInput); ov::util::to_string(inputScaleFactorsPerInput);
} else { } else {
if (inputScaleFactors.empty()) { if (inputScaleFactors.empty()) {
inputScaleFactors.push_back(1.0); inputScaleFactors.push_back(1.0);
@ -335,12 +323,12 @@ void Config::AdjustKeyMapValues() {
IE_SUPPRESS_DEPRECATED_END IE_SUPPRESS_DEPRECATED_END
std::string device_mode; std::string device_mode;
if (gnaFlags.sw_fp32) { if (gnaFlags.sw_fp32) {
device_mode = InferenceEngine::util::property_to_string(ov::intel_gna::ExecutionMode::SW_FP32); device_mode = ov::util::to_string(ov::intel_gna::ExecutionMode::SW_FP32);
} else { } else {
for (auto&& value : supported_values) { for (auto&& value : supported_values) {
if (value.second.first == pluginGna2AccMode && if (value.second.first == pluginGna2AccMode &&
value.second.second == swExactMode) { value.second.second == swExactMode) {
device_mode = InferenceEngine::util::property_to_string(value.first); device_mode = ov::util::to_string(value.first);
break; break;
} }
} }
@ -353,16 +341,16 @@ void Config::AdjustKeyMapValues() {
gnaFlags.compact_mode ? PluginConfigParams::YES : PluginConfigParams::NO; gnaFlags.compact_mode ? PluginConfigParams::YES : PluginConfigParams::NO;
keyConfigMap[CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)] = keyConfigMap[CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)] =
gnaFlags.exclusive_async_requests ? PluginConfigParams::YES: PluginConfigParams::NO; gnaFlags.exclusive_async_requests ? PluginConfigParams::YES: PluginConfigParams::NO;
keyConfigMap[ov::hint::performance_mode.name()] = InferenceEngine::util::property_to_string(performance_mode); keyConfigMap[ov::hint::performance_mode.name()] = ov::util::to_string(performance_mode);
if (inference_precision != ov::element::undefined) { if (inference_precision != ov::element::undefined) {
keyConfigMap[ov::hint::inference_precision.name()] = InferenceEngine::util::property_to_string(inference_precision); keyConfigMap[ov::hint::inference_precision.name()] = ov::util::to_string(inference_precision);
} else { } else {
keyConfigMap[GNA_CONFIG_KEY(PRECISION)] = gnaPrecision.name(); keyConfigMap[GNA_CONFIG_KEY(PRECISION)] = gnaPrecision.name();
} }
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
if (gnaFlags.pwl_design_algorithm != ov::intel_gna::PWLDesignAlgorithm::UNDEFINED) { if (gnaFlags.pwl_design_algorithm != ov::intel_gna::PWLDesignAlgorithm::UNDEFINED) {
keyConfigMap[ov::intel_gna::pwl_design_algorithm.name()] = keyConfigMap[ov::intel_gna::pwl_design_algorithm.name()] =
InferenceEngine::util::property_to_string(gnaFlags.pwl_design_algorithm); ov::util::to_string(gnaFlags.pwl_design_algorithm);
} else { } else {
keyConfigMap[GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)] = keyConfigMap[GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)] =
gnaFlags.uniformPwlDesign ? PluginConfigParams::YES: PluginConfigParams::NO; gnaFlags.uniformPwlDesign ? PluginConfigParams::YES: PluginConfigParams::NO;
@ -375,7 +363,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
keyConfigMap[ov::enable_profiling.name()] = keyConfigMap[ov::enable_profiling.name()] =
gnaFlags.performance_counting ? PluginConfigParams::YES: PluginConfigParams::NO; gnaFlags.performance_counting ? PluginConfigParams::YES: PluginConfigParams::NO;
keyConfigMap[ov::log::level.name()] = InferenceEngine::util::property_to_string(gnaFlags.log_level); keyConfigMap[ov::log::level.name()] = ov::util::to_string(gnaFlags.log_level);
} }
Parameter Config::GetParameter(const std::string& name) const { Parameter Config::GetParameter(const std::string& name) const {

View File

@ -24,6 +24,8 @@
#include <cmath> #include <cmath>
#include <algorithm> #include <algorithm>
#include <openvino/util/common_util.hpp>
using namespace InferenceEngine; using namespace InferenceEngine;
using namespace InferenceEngine::details; using namespace InferenceEngine::details;
@ -146,27 +148,27 @@ InferenceEngine::Parameter CompiledModel::GetConfig(const std::string &name) con
if (name == ov::enable_profiling) { if (name == ov::enable_profiling) {
return val == PluginConfigParams::YES ? true : false; return val == PluginConfigParams::YES ? true : false;
} else if (name == ov::hint::model_priority) { } else if (name == ov::hint::model_priority) {
return InferenceEngine::util::string_to_property(val, ov::hint::model_priority); return ov::util::from_string(val, ov::hint::model_priority);
} else if (name == ov::intel_gpu::hint::host_task_priority) { } else if (name == ov::intel_gpu::hint::host_task_priority) {
return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::host_task_priority); return ov::util::from_string(val, ov::intel_gpu::hint::host_task_priority);
} else if (name == ov::intel_gpu::hint::queue_priority) { } else if (name == ov::intel_gpu::hint::queue_priority) {
return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::queue_priority); return ov::util::from_string(val, ov::intel_gpu::hint::queue_priority);
} else if (name == ov::intel_gpu::hint::queue_throttle) { } else if (name == ov::intel_gpu::hint::queue_throttle) {
return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::queue_throttle); return ov::util::from_string(val, ov::intel_gpu::hint::queue_throttle);
} else if (name == ov::intel_gpu::enable_loop_unrolling) { } else if (name == ov::intel_gpu::enable_loop_unrolling) {
return val == PluginConfigParams::YES ? true : false; return val == PluginConfigParams::YES ? true : false;
} else if (name == ov::cache_dir) { } else if (name == ov::cache_dir) {
return InferenceEngine::util::string_to_property(val, ov::cache_dir); return ov::util::from_string(val, ov::cache_dir);
} else if (name == ov::hint::performance_mode) { } else if (name == ov::hint::performance_mode) {
return InferenceEngine::util::string_to_property(val, ov::hint::performance_mode); return ov::util::from_string(val, ov::hint::performance_mode);
} else if (name == ov::compilation_num_threads) { } else if (name == ov::compilation_num_threads) {
return InferenceEngine::util::string_to_property(val, ov::compilation_num_threads); return ov::util::from_string(val, ov::compilation_num_threads);
} else if (name == ov::streams::num) { } else if (name == ov::num_streams) {
return InferenceEngine::util::string_to_property(val, ov::streams::num); return ov::util::from_string(val, ov::num_streams);
} else if (name == ov::hint::num_requests) { } else if (name == ov::hint::num_requests) {
return InferenceEngine::util::string_to_property(val, ov::hint::num_requests); return ov::util::from_string(val, ov::hint::num_requests);
} else if (name == ov::device::id) { } else if (name == ov::device::id) {
return InferenceEngine::util::string_to_property(val, ov::device::id); return ov::util::from_string(val, ov::device::id);
} else { } else {
return val; return val;
} }
@ -200,7 +202,7 @@ InferenceEngine::Parameter CompiledModel::GetMetric(const std::string &name) con
ov::PropertyName{ov::cache_dir.name(), PropertyMutability::RO}, ov::PropertyName{ov::cache_dir.name(), PropertyMutability::RO},
ov::PropertyName{ov::hint::performance_mode.name(), PropertyMutability::RO}, ov::PropertyName{ov::hint::performance_mode.name(), PropertyMutability::RO},
ov::PropertyName{ov::compilation_num_threads.name(), PropertyMutability::RO}, ov::PropertyName{ov::compilation_num_threads.name(), PropertyMutability::RO},
ov::PropertyName{ov::streams::num.name(), PropertyMutability::RO}, ov::PropertyName{ov::num_streams.name(), PropertyMutability::RO},
ov::PropertyName{ov::hint::num_requests.name(), PropertyMutability::RO}, ov::PropertyName{ov::hint::num_requests.name(), PropertyMutability::RO},
ov::PropertyName{ov::device::id.name(), PropertyMutability::RO} ov::PropertyName{ov::device::id.name(), PropertyMutability::RO}
}; };

View File

@ -2,27 +2,30 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// //
#include "intel_gpu/plugin/device_config.hpp"
#include <ie_system_conf.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <cldnn/cldnn_config.hpp> #include <cldnn/cldnn_config.hpp>
#include <gpu/gpu_config.hpp> #include <gpu/gpu_config.hpp>
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "ie_api.h"
#include "file_utils.h"
#include "intel_gpu/plugin/device_config.hpp"
#include "intel_gpu/plugin/itt.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"
#include <ie_system_conf.h>
#include <thread> #include <thread>
#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp"
#include "file_utils.h"
#include "ie_api.h"
#include "intel_gpu/plugin/itt.hpp"
#include "openvino/runtime/intel_gpu/properties.hpp"
#include <openvino/util/common_util.hpp>
#ifdef _WIN32 #ifdef _WIN32
# include <direct.h> # include <direct.h>
#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT # ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT
# define mkdir(dir, mode) _wmkdir(dir) # define mkdir(dir, mode) _wmkdir(dir)
#else # else
# define mkdir(dir, mode) _mkdir(dir) # define mkdir(dir, mode) _mkdir(dir)
#endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT # endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT
#endif // _WIN32 #endif // _WIN32
using namespace InferenceEngine; using namespace InferenceEngine;
@ -67,8 +70,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
const auto hints = perfHintsConfig.SupportedKeys(); const auto hints = perfHintsConfig.SupportedKeys();
if (hints.end() != std::find(hints.begin(), hints.end(), key)) { if (hints.end() != std::find(hints.begin(), hints.end(), key)) {
perfHintsConfig.SetConfig(key, val); perfHintsConfig.SetConfig(key, val);
} else if (key.compare(PluginConfigParams::KEY_PERF_COUNT) == 0 || } else if (key.compare(PluginConfigParams::KEY_PERF_COUNT) == 0 || key == ov::enable_profiling) {
key == ov::enable_profiling) {
if (val.compare(PluginConfigParams::YES) == 0) { if (val.compare(PluginConfigParams::YES) == 0) {
useProfiling = true; useProfiling = true;
} else if (val.compare(PluginConfigParams::NO) == 0) { } else if (val.compare(PluginConfigParams::NO) == 0) {
@ -101,18 +103,18 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val; IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
} }
switch (uVal) { switch (uVal) {
case 0: case 0:
case 2: case 2:
queuePriority = cldnn::priority_mode_types::med; queuePriority = cldnn::priority_mode_types::med;
break; break;
case 1: case 1:
queuePriority = cldnn::priority_mode_types::low; queuePriority = cldnn::priority_mode_types::low;
break; break;
case 3: case 3:
queuePriority = cldnn::priority_mode_types::high; queuePriority = cldnn::priority_mode_types::high;
break; break;
default: default:
IE_THROW(ParameterMismatch) << "Unsupported queue priority value: " << uVal; IE_THROW(ParameterMismatch) << "Unsupported queue priority value: " << uVal;
} }
} else if (key == ov::intel_gpu::hint::queue_priority) { } else if (key == ov::intel_gpu::hint::queue_priority) {
std::stringstream ss(val); std::stringstream ss(val);
@ -124,33 +126,33 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
queuePriority = cldnn::priority_mode_types::med; queuePriority = cldnn::priority_mode_types::med;
else else
queuePriority = cldnn::priority_mode_types::low; queuePriority = cldnn::priority_mode_types::low;
} else if (key.compare(PluginConfigParams::KEY_MODEL_PRIORITY) == 0 || } else if (key.compare(PluginConfigParams::KEY_MODEL_PRIORITY) == 0 || key == ov::hint::model_priority) {
key == ov::hint::model_priority) {
if (val.compare(PluginConfigParams::MODEL_PRIORITY_HIGH) == 0 || if (val.compare(PluginConfigParams::MODEL_PRIORITY_HIGH) == 0 ||
val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH)) == 0) { val.compare(ov::util::to_string(ov::hint::Priority::HIGH)) == 0) {
queuePriority = cldnn::priority_mode_types::high; queuePriority = cldnn::priority_mode_types::high;
task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::BIG; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::BIG;
} else if (val.compare(PluginConfigParams::MODEL_PRIORITY_MED) == 0 || } else if (val.compare(PluginConfigParams::MODEL_PRIORITY_MED) == 0 ||
val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM)) == 0) { val.compare(ov::util::to_string(ov::hint::Priority::MEDIUM)) == 0) {
queuePriority = cldnn::priority_mode_types::med; queuePriority = cldnn::priority_mode_types::med;
task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY;
} else if (val.compare(PluginConfigParams::MODEL_PRIORITY_LOW) == 0 || } else if (val.compare(PluginConfigParams::MODEL_PRIORITY_LOW) == 0 ||
val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::LOW)) == 0) { val.compare(ov::util::to_string(ov::hint::Priority::LOW)) == 0) {
queuePriority = cldnn::priority_mode_types::low; queuePriority = cldnn::priority_mode_types::low;
task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::LITTLE; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::LITTLE;
} else { } else {
IE_THROW() << "Not found appropriate value for config key " << PluginConfigParams::KEY_MODEL_PRIORITY << ".\n"; IE_THROW() << "Not found appropriate value for config key " << PluginConfigParams::KEY_MODEL_PRIORITY
<< ".\n";
} }
if (getAvailableCoresTypes().size() > 1) { if (getAvailableCoresTypes().size() > 1) {
if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG ||
|| task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE) { task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE) {
task_exec_config._streams = std::min(task_exec_config._streams, task_exec_config._streams = std::min(task_exec_config._streams,
getNumberOfCores(task_exec_config._threadPreferredCoreType)); getNumberOfCores(task_exec_config._threadPreferredCoreType));
} }
} else { } else {
task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY;
task_exec_config._streams = std::min(task_exec_config._streams, task_exec_config._streams =
static_cast<int>(std::thread::hardware_concurrency())); std::min(task_exec_config._streams, static_cast<int>(std::thread::hardware_concurrency()));
} }
} else if (key.compare(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) == 0 || } else if (key.compare(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) == 0 ||
key.compare(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) == 0) { key.compare(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) == 0) {
@ -161,18 +163,18 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val; IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
} }
switch (uVal) { switch (uVal) {
case 0: case 0:
case 2: case 2:
queueThrottle = cldnn::throttle_mode_types::med; queueThrottle = cldnn::throttle_mode_types::med;
break; break;
case 1: case 1:
queueThrottle = cldnn::throttle_mode_types::low; queueThrottle = cldnn::throttle_mode_types::low;
break; break;
case 3: case 3:
queueThrottle = cldnn::throttle_mode_types::high; queueThrottle = cldnn::throttle_mode_types::high;
break; break;
default: default:
IE_THROW(ParameterMismatch) << "Unsupported queue throttle value: " << uVal; IE_THROW(ParameterMismatch) << "Unsupported queue throttle value: " << uVal;
} }
} else if (key == ov::intel_gpu::hint::queue_throttle) { } else if (key == ov::intel_gpu::hint::queue_throttle) {
std::stringstream ss(val); std::stringstream ss(val);
@ -221,8 +223,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
graph_dumps_dir = val; graph_dumps_dir = val;
createDirectory(graph_dumps_dir); createDirectory(graph_dumps_dir);
} }
} else if (key.compare(PluginConfigParams::KEY_CACHE_DIR) == 0 || } else if (key.compare(PluginConfigParams::KEY_CACHE_DIR) == 0 || key == ov::cache_dir) {
key == ov::cache_dir) {
if (!val.empty()) { if (!val.empty()) {
kernels_cache_dir = val; kernels_cache_dir = val;
createDirectory(kernels_cache_dir); createDirectory(kernels_cache_dir);
@ -240,10 +241,9 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
} else { } else {
IE_THROW(NotFound) << "Unsupported property value by plugin: " << val; IE_THROW(NotFound) << "Unsupported property value by plugin: " << val;
} }
} else if (key.compare(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == 0 || } else if (key.compare(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == 0 || key == ov::num_streams) {
key == ov::streams::num) {
if (val.compare(PluginConfigParams::GPU_THROUGHPUT_AUTO) == 0 || if (val.compare(PluginConfigParams::GPU_THROUGHPUT_AUTO) == 0 ||
val.compare(std::to_string(ov::streams::AUTO)) == 0) { val.compare(ov::num_streams(ov::NumStreams::AUTO).second.as<std::string>()) == 0) {
throughput_streams = GetDefaultNStreamsForThroughputMode(); throughput_streams = GetDefaultNStreamsForThroughputMode();
} else { } else {
int val_i; int val_i;
@ -251,21 +251,20 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
val_i = std::stoi(val); val_i = std::stoi(val);
} catch (const std::exception&) { } catch (const std::exception&) {
IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS
<< ". Expected only positive numbers (#streams) or " << ". Expected only positive numbers (#streams) or "
<< "PluginConfigParams::GPU_THROUGHPUT_AUTO"; << "PluginConfigParams::GPU_THROUGHPUT_AUTO";
} }
if (val_i > 0) if (val_i > 0)
throughput_streams = static_cast<uint16_t>(val_i); throughput_streams = static_cast<uint16_t>(val_i);
} }
} else if (key.compare(PluginConfigParams::KEY_DEVICE_ID) == 0 || } else if (key.compare(PluginConfigParams::KEY_DEVICE_ID) == 0 || key == ov::device::id) {
key == ov::device::id) {
// Validate if passed value is postivie number. // Validate if passed value is postivie number.
try { try {
int val_i = std::stoi(val); int val_i = std::stoi(val);
(void)val_i; (void)val_i;
} catch (const std::exception&) { } catch (const std::exception&) {
IE_THROW() << "Wrong value for property key " << ov::device::id.name() IE_THROW() << "Wrong value for property key " << ov::device::id.name()
<< ". DeviceIDs are only represented by positive numbers"; << ". DeviceIDs are only represented by positive numbers";
} }
// Set this value. // Set this value.
device_id = val; device_id = val;
@ -294,8 +293,7 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
} else { } else {
IE_THROW(NotFound) << "Unsupported KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS flag value: " << val; IE_THROW(NotFound) << "Unsupported KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS flag value: " << val;
} }
} else if (key.compare(GPUConfigParams::KEY_GPU_MAX_NUM_THREADS) == 0 || } else if (key.compare(GPUConfigParams::KEY_GPU_MAX_NUM_THREADS) == 0 || key == ov::compilation_num_threads) {
key == ov::compilation_num_threads) {
int max_threads = std::max(1, static_cast<int>(std::thread::hardware_concurrency())); int max_threads = std::max(1, static_cast<int>(std::thread::hardware_concurrency()));
try { try {
int val_i = std::stoi(val); int val_i = std::stoi(val);
@ -305,8 +303,8 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
task_exec_config._streams = std::min(task_exec_config._streams, val_i); task_exec_config._streams = std::min(task_exec_config._streams, val_i);
} catch (const std::exception&) { } catch (const std::exception&) {
IE_THROW() << "Wrong value for property key " << GPUConfigParams::KEY_GPU_MAX_NUM_THREADS << ": " << val IE_THROW() << "Wrong value for property key " << GPUConfigParams::KEY_GPU_MAX_NUM_THREADS << ": " << val
<< "\nSpecify the number of threads use for build as an integer." << "\nSpecify the number of threads use for build as an integer."
<< "\nOut of range value will be set as a default value, maximum concurrent threads."; << "\nOut of range value will be set as a default value, maximum concurrent threads.";
} }
} else if (key.compare(GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING) == 0 || } else if (key.compare(GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING) == 0 ||
key == ov::intel_gpu::enable_loop_unrolling) { key == ov::intel_gpu::enable_loop_unrolling) {
@ -320,13 +318,13 @@ void Config::UpdateFromMap(const std::map<std::string, std::string>& configMap)
} else if (key.compare(GPUConfigParams::KEY_GPU_HOST_TASK_PRIORITY) == 0 || } else if (key.compare(GPUConfigParams::KEY_GPU_HOST_TASK_PRIORITY) == 0 ||
key == ov::intel_gpu::hint::host_task_priority) { key == ov::intel_gpu::hint::host_task_priority) {
if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_HIGH) == 0 || if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_HIGH) == 0 ||
val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH)) == 0) { val.compare(ov::util::to_string(ov::hint::Priority::HIGH)) == 0) {
task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::BIG; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::BIG;
} else if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_MEDIUM) == 0 || } else if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_MEDIUM) == 0 ||
val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM)) == 0) { val.compare(ov::util::to_string(ov::hint::Priority::MEDIUM)) == 0) {
task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY;
} else if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_LOW) == 0 || } else if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_LOW) == 0 ||
val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::LOW)) == 0) { val.compare(ov::util::to_string(ov::hint::Priority::LOW)) == 0) {
task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::LITTLE; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::LITTLE;
} else { } else {
IE_THROW(NotFound) << "Unsupported host task priority by plugin: " << val; IE_THROW(NotFound) << "Unsupported host task priority by plugin: " << val;
@ -384,22 +382,35 @@ void Config::adjustKeyMapValues() {
{ {
if (queuePriority == cldnn::priority_mode_types::high && if (queuePriority == cldnn::priority_mode_types::high &&
(task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG || getAvailableCoresTypes().size() == 1)) { (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG ||
key_config_map[ov::hint::model_priority.name()] = InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH); getAvailableCoresTypes().size() == 1)) {
key_config_map[ov::hint::model_priority.name()] =
ov::util::to_string(ov::hint::Priority::HIGH);
} else if (queuePriority == cldnn::priority_mode_types::low && } else if (queuePriority == cldnn::priority_mode_types::low &&
(task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE || getAvailableCoresTypes().size() == 1)) { (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE ||
key_config_map[ov::hint::model_priority.name()] = InferenceEngine::util::property_to_string(ov::hint::Priority::LOW); getAvailableCoresTypes().size() == 1)) {
} else if (queuePriority == cldnn::priority_mode_types::med && task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::ANY) { key_config_map[ov::hint::model_priority.name()] =
key_config_map[ov::hint::model_priority.name()] = InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM); ov::util::to_string(ov::hint::Priority::LOW);
} else if (queuePriority == cldnn::priority_mode_types::med &&
task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::ANY) {
key_config_map[ov::hint::model_priority.name()] =
ov::util::to_string(ov::hint::Priority::MEDIUM);
} }
} }
{ {
std::string qp = "0"; std::string qp = "0";
switch (queuePriority) { switch (queuePriority) {
case cldnn::priority_mode_types::low: qp = "1"; break; case cldnn::priority_mode_types::low:
case cldnn::priority_mode_types::med: qp = "2"; break; qp = "1";
case cldnn::priority_mode_types::high: qp = "3"; break; break;
default: break; case cldnn::priority_mode_types::med:
qp = "2";
break;
case cldnn::priority_mode_types::high:
qp = "3";
break;
default:
break;
} }
key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY] = qp; key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY] = qp;
key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_PRIORITY] = qp; key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_PRIORITY] = qp;
@ -407,20 +418,27 @@ void Config::adjustKeyMapValues() {
{ {
std::string priority; std::string priority;
if (queuePriority == cldnn::priority_mode_types::high) if (queuePriority == cldnn::priority_mode_types::high)
priority = InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH); priority = ov::util::to_string(ov::hint::Priority::HIGH);
else if (queuePriority == cldnn::priority_mode_types::low) else if (queuePriority == cldnn::priority_mode_types::low)
priority = InferenceEngine::util::property_to_string(ov::hint::Priority::LOW); priority = ov::util::to_string(ov::hint::Priority::LOW);
else else
priority = InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM); priority = ov::util::to_string(ov::hint::Priority::MEDIUM);
key_config_map[ov::intel_gpu::hint::queue_priority.name()] = priority; key_config_map[ov::intel_gpu::hint::queue_priority.name()] = priority;
} }
{ {
std::string qt = "0"; std::string qt = "0";
switch (queueThrottle) { switch (queueThrottle) {
case cldnn::throttle_mode_types::low: qt = "1"; break; case cldnn::throttle_mode_types::low:
case cldnn::throttle_mode_types::med: qt = "2"; break; qt = "1";
case cldnn::throttle_mode_types::high: qt = "3"; break; break;
default: break; case cldnn::throttle_mode_types::med:
qt = "2";
break;
case cldnn::throttle_mode_types::high:
qt = "3";
break;
default:
break;
} }
key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE] = qt; key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE] = qt;
key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE] = qt; key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE] = qt;
@ -428,31 +446,40 @@ void Config::adjustKeyMapValues() {
{ {
std::string throttleLevel; std::string throttleLevel;
if (queueThrottle == cldnn::throttle_mode_types::high) if (queueThrottle == cldnn::throttle_mode_types::high)
throttleLevel = InferenceEngine::util::property_to_string(ov::intel_gpu::hint::ThrottleLevel::HIGH); throttleLevel = ov::util::to_string(ov::intel_gpu::hint::ThrottleLevel::HIGH);
else if (queueThrottle == cldnn::throttle_mode_types::low) else if (queueThrottle == cldnn::throttle_mode_types::low)
throttleLevel = InferenceEngine::util::property_to_string(ov::intel_gpu::hint::ThrottleLevel::LOW); throttleLevel = ov::util::to_string(ov::intel_gpu::hint::ThrottleLevel::LOW);
else else
throttleLevel = InferenceEngine::util::property_to_string(ov::intel_gpu::hint::ThrottleLevel::MEDIUM); throttleLevel = ov::util::to_string(ov::intel_gpu::hint::ThrottleLevel::MEDIUM);
key_config_map[ov::intel_gpu::hint::queue_throttle.name()] = throttleLevel; key_config_map[ov::intel_gpu::hint::queue_throttle.name()] = throttleLevel;
} }
{ {
std::string hostTaskPriority; std::string hostTaskPriority;
if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE) if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE)
hostTaskPriority = InferenceEngine::util::property_to_string(ov::hint::Priority::LOW); hostTaskPriority = ov::util::to_string(ov::hint::Priority::LOW);
else if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG) else if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG)
hostTaskPriority = InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH); hostTaskPriority = ov::util::to_string(ov::hint::Priority::HIGH);
else else
hostTaskPriority = InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM); hostTaskPriority = ov::util::to_string(ov::hint::Priority::MEDIUM);
key_config_map[ov::intel_gpu::hint::host_task_priority.name()] = hostTaskPriority; key_config_map[ov::intel_gpu::hint::host_task_priority.name()] = hostTaskPriority;
} }
{ {
std::string tm = PluginConfigParams::TUNING_DISABLED; std::string tm = PluginConfigParams::TUNING_DISABLED;
switch (tuningConfig.mode) { switch (tuningConfig.mode) {
case cldnn::tuning_mode::tuning_tune_and_cache: tm = PluginConfigParams::TUNING_CREATE; break; case cldnn::tuning_mode::tuning_tune_and_cache:
case cldnn::tuning_mode::tuning_use_cache: tm = PluginConfigParams::TUNING_USE_EXISTING; break; tm = PluginConfigParams::TUNING_CREATE;
case cldnn::tuning_mode::tuning_use_and_update: tm = PluginConfigParams::TUNING_UPDATE; break; break;
case cldnn::tuning_mode::tuning_retune_and_cache: tm = PluginConfigParams::TUNING_RETUNE; break; case cldnn::tuning_mode::tuning_use_cache:
default: break; tm = PluginConfigParams::TUNING_USE_EXISTING;
break;
case cldnn::tuning_mode::tuning_use_and_update:
tm = PluginConfigParams::TUNING_UPDATE;
break;
case cldnn::tuning_mode::tuning_retune_and_cache:
tm = PluginConfigParams::TUNING_RETUNE;
break;
default:
break;
} }
key_config_map[PluginConfigParams::KEY_TUNING_MODE] = tm; key_config_map[PluginConfigParams::KEY_TUNING_MODE] = tm;
key_config_map[PluginConfigParams::KEY_TUNING_FILE] = tuningConfig.cache_file_path; key_config_map[PluginConfigParams::KEY_TUNING_FILE] = tuningConfig.cache_file_path;
@ -464,7 +491,7 @@ void Config::adjustKeyMapValues() {
key_config_map[ov::cache_dir.name()] = kernels_cache_dir; key_config_map[ov::cache_dir.name()] = kernels_cache_dir;
key_config_map[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(throughput_streams); key_config_map[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(throughput_streams);
key_config_map[ov::streams::num.name()] = std::to_string(throughput_streams); key_config_map[ov::num_streams.name()] = std::to_string(throughput_streams);
key_config_map[PluginConfigParams::KEY_DEVICE_ID] = device_id; key_config_map[PluginConfigParams::KEY_DEVICE_ID] = device_id;
key_config_map[ov::device::id.name()] = device_id; key_config_map[ov::device::id.name()] = device_id;
@ -490,18 +517,18 @@ void Config::adjustKeyMapValues() {
} }
bool Config::isNewApiProperty(std::string property) { bool Config::isNewApiProperty(std::string property) {
static const std::set<std::string> new_api_keys { static const std::set<std::string> new_api_keys{
ov::intel_gpu::hint::queue_priority.name(), ov::intel_gpu::hint::queue_priority.name(),
ov::intel_gpu::hint::queue_throttle.name(), ov::intel_gpu::hint::queue_throttle.name(),
ov::compilation_num_threads.name(), ov::compilation_num_threads.name(),
ov::streams::num.name(), ov::num_streams.name(),
}; };
return new_api_keys.find(property) != new_api_keys.end(); return new_api_keys.find(property) != new_api_keys.end();
} }
std::string Config::ConvertPropertyToLegacy(const std::string& key, const std::string& value) { std::string Config::ConvertPropertyToLegacy(const std::string& key, const std::string& value) {
if (key == PluginConfigParams::KEY_MODEL_PRIORITY) { if (key == PluginConfigParams::KEY_MODEL_PRIORITY) {
auto priority = InferenceEngine::util::string_to_property(value, ov::hint::model_priority); auto priority = ov::util::from_string(value, ov::hint::model_priority);
if (priority == ov::hint::Priority::HIGH) if (priority == ov::hint::Priority::HIGH)
return PluginConfigParams::MODEL_PRIORITY_HIGH; return PluginConfigParams::MODEL_PRIORITY_HIGH;
else if (priority == ov::hint::Priority::MEDIUM) else if (priority == ov::hint::Priority::MEDIUM)
@ -509,7 +536,7 @@ std::string Config::ConvertPropertyToLegacy(const std::string& key, const std::s
else if (priority == ov::hint::Priority::LOW) else if (priority == ov::hint::Priority::LOW)
return PluginConfigParams::MODEL_PRIORITY_LOW; return PluginConfigParams::MODEL_PRIORITY_LOW;
} else if (key == GPUConfigParams::KEY_GPU_HOST_TASK_PRIORITY) { } else if (key == GPUConfigParams::KEY_GPU_HOST_TASK_PRIORITY) {
auto priority = InferenceEngine::util::string_to_property(value, ov::intel_gpu::hint::host_task_priority); auto priority = ov::util::from_string(value, ov::intel_gpu::hint::host_task_priority);
if (priority == ov::hint::Priority::HIGH) if (priority == ov::hint::Priority::HIGH)
return GPUConfigParams::GPU_HOST_TASK_PRIORITY_HIGH; return GPUConfigParams::GPU_HOST_TASK_PRIORITY_HIGH;
else if (priority == ov::hint::Priority::MEDIUM) else if (priority == ov::hint::Priority::MEDIUM)

View File

@ -29,6 +29,7 @@
#include <transformations/rt_info/fused_names_attribute.hpp> #include <transformations/rt_info/fused_names_attribute.hpp>
#include "openvino/pass/serialize.hpp" #include "openvino/pass/serialize.hpp"
#include <openvino/util/common_util.hpp>
#include "intel_gpu/runtime/device_query.hpp" #include "intel_gpu/runtime/device_query.hpp"
#include "intel_gpu/runtime/debug_configuration.hpp" #include "intel_gpu/runtime/debug_configuration.hpp"
@ -214,14 +215,14 @@ std::map<std::string, std::string> Plugin::ConvertPerfHintsToConfig(
: plugin_config.perfHintsConfig.ovPerfHint; : plugin_config.perfHintsConfig.ovPerfHint;
//checking streams (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig) //checking streams (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig)
const auto streams = config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == config.end() && const auto streams = config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == config.end() &&
config.find(ov::streams::num.name()) == config.end(); config.find(ov::num_streams.name()) == config.end();
if (streams && !streamsSet) { if (streams && !streamsSet) {
if (mode_name == CONFIG_VALUE(LATENCY)) { if (mode_name == CONFIG_VALUE(LATENCY)) {
config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(1); config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(1);
config[ov::streams::num.name()] = std::to_string(1); config[ov::num_streams.name()] = std::to_string(1);
} else if (mode_name == CONFIG_VALUE(THROUGHPUT)) { } else if (mode_name == CONFIG_VALUE(THROUGHPUT)) {
config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = CONFIG_VALUE(GPU_THROUGHPUT_AUTO); config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = CONFIG_VALUE(GPU_THROUGHPUT_AUTO);
config[ov::streams::num.name()] = std::to_string(ov::streams::AUTO); config[ov::num_streams.name()] = ov::Any::make<ov::NumStreams>(ov::NumStreams::AUTO).as<std::string>();
//disabling the throttling temporarily to set the validation (that is switching to the hints) perf baseline //disabling the throttling temporarily to set the validation (that is switching to the hints) perf baseline
//checking throttling (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig) //checking throttling (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig)
// const auto bInConfig = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() || // const auto bInConfig = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() ||
@ -337,7 +338,7 @@ InferenceEngine::RemoteContext::Ptr Plugin::GetDefaultContext(const AnyMap& para
void Plugin::SetConfig(const std::map<std::string, std::string> &config) { void Plugin::SetConfig(const std::map<std::string, std::string> &config) {
streamsSet = config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) != config.end() || streamsSet = config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) != config.end() ||
config.find(ov::streams::num.name()) != config.end(); config.find(ov::num_streams.name()) != config.end();
throttlingSet = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() || throttlingSet = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() ||
config.find(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) != config.end() || config.find(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) != config.end() ||
config.find(ov::intel_gpu::hint::queue_throttle.name()) != config.end(); config.find(ov::intel_gpu::hint::queue_throttle.name()) != config.end();
@ -543,28 +544,28 @@ Parameter Plugin::GetConfig(const std::string& name, const std::map<std::string,
if (name == ov::enable_profiling) { if (name == ov::enable_profiling) {
return val == PluginConfigParams::YES ? true : false; return val == PluginConfigParams::YES ? true : false;
} else if (name == ov::hint::model_priority) { } else if (name == ov::hint::model_priority) {
return InferenceEngine::util::string_to_property(val, ov::hint::model_priority); return ov::util::from_string(val, ov::hint::model_priority);
} else if (name == ov::intel_gpu::hint::host_task_priority) { } else if (name == ov::intel_gpu::hint::host_task_priority) {
return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::host_task_priority); return ov::util::from_string(val, ov::intel_gpu::hint::host_task_priority);
} else if (name == ov::intel_gpu::hint::queue_priority) { } else if (name == ov::intel_gpu::hint::queue_priority) {
return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::queue_priority); return ov::util::from_string(val, ov::intel_gpu::hint::queue_priority);
} else if (name == ov::intel_gpu::hint::queue_throttle) { } else if (name == ov::intel_gpu::hint::queue_throttle) {
return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::queue_throttle); return ov::util::from_string(val, ov::intel_gpu::hint::queue_throttle);
} else if (name == ov::intel_gpu::enable_loop_unrolling) { } else if (name == ov::intel_gpu::enable_loop_unrolling) {
return val == PluginConfigParams::YES ? true : false; return val == PluginConfigParams::YES ? true : false;
} else if (name == ov::cache_dir) { } else if (name == ov::cache_dir) {
return InferenceEngine::util::string_to_property(val, ov::cache_dir); return ov::util::from_string(val, ov::cache_dir);
} else if (name == ov::hint::performance_mode) { } else if (name == ov::hint::performance_mode) {
return InferenceEngine::util::string_to_property(val, ov::hint::performance_mode); return ov::util::from_string(val, ov::hint::performance_mode);
} else if (name == ov::compilation_num_threads) { } else if (name == ov::compilation_num_threads) {
return InferenceEngine::util::string_to_property(val, ov::compilation_num_threads); return ov::util::from_string(val, ov::compilation_num_threads);
} else if (name == ov::streams::num) { } else if (name == ov::num_streams) {
return InferenceEngine::util::string_to_property(val, ov::streams::num); return ov::util::from_string(val, ov::num_streams);
} else if (name == ov::hint::num_requests) { } else if (name == ov::hint::num_requests) {
auto temp = InferenceEngine::util::string_to_property(val, ov::hint::num_requests);; auto temp = ov::util::from_string(val, ov::hint::num_requests);;
return temp; return temp;
} else if (name == ov::device::id) { } else if (name == ov::device::id) {
return InferenceEngine::util::string_to_property(val, ov::device::id); return ov::util::from_string(val, ov::device::id);
} else { } else {
return val; return val;
} }
@ -687,7 +688,7 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
ov::PropertyName{ov::cache_dir.name(), PropertyMutability::RW}, ov::PropertyName{ov::cache_dir.name(), PropertyMutability::RW},
ov::PropertyName{ov::hint::performance_mode.name(), PropertyMutability::RW}, ov::PropertyName{ov::hint::performance_mode.name(), PropertyMutability::RW},
ov::PropertyName{ov::compilation_num_threads.name(), PropertyMutability::RW}, ov::PropertyName{ov::compilation_num_threads.name(), PropertyMutability::RW},
ov::PropertyName{ov::streams::num.name(), PropertyMutability::RW}, ov::PropertyName{ov::num_streams.name(), PropertyMutability::RW},
ov::PropertyName{ov::hint::num_requests.name(), PropertyMutability::RW}, ov::PropertyName{ov::hint::num_requests.name(), PropertyMutability::RW},
ov::PropertyName{ov::device::id.name(), PropertyMutability::RW}, ov::PropertyName{ov::device::id.name(), PropertyMutability::RW},
}; };
@ -899,18 +900,17 @@ Parameter Plugin::GetMetric(const std::string& name, const std::map<std::string,
} }
auto it_streams = options.find("GPU_THROUGHPUT_STREAMS") != options.end() ? options.find("GPU_THROUGHPUT_STREAMS") : auto it_streams = options.find("GPU_THROUGHPUT_STREAMS") != options.end() ? options.find("GPU_THROUGHPUT_STREAMS") :
options.find(ov::streams::num.name()) != options.end() ? options.find(ov::streams::num.name()) : options.find(ov::num_streams.name()) != options.end() ? options.find(ov::num_streams.name()) :
options.end(); options.end();
if (it_streams != options.end()) { if (it_streams != options.end()) {
if (it_streams->second.is<int32_t>()) { if (it_streams->second.is<int32_t>()) {
n_streams = it_streams->second.as<int32_t>(); n_streams = it_streams->second.as<int32_t>();
if (n_streams == ov::streams::AUTO)
n_streams = config.GetDefaultNStreamsForThroughputMode();
} else if (it_streams->second.is<uint32_t>()) { } else if (it_streams->second.is<uint32_t>()) {
n_streams = it_streams->second.as<uint32_t>(); n_streams = it_streams->second.as<uint32_t>();
} else if (it_streams->second.is<std::string>()) { } else if (it_streams->second.is<std::string>()) {
std::string n_streams_str = it_streams->second.as<std::string>(); std::string n_streams_str = it_streams->second.as<std::string>();
if (n_streams_str != CONFIG_VALUE(GPU_THROUGHPUT_AUTO)) { if (n_streams_str != CONFIG_VALUE(GPU_THROUGHPUT_AUTO) &&
n_streams_str != util::to_string(ov::NumStreams(ov::NumStreams::AUTO))) {
IE_THROW() << "[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\""; IE_THROW() << "[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\"";
} }
n_streams = config.GetDefaultNStreamsForThroughputMode(); n_streams = config.GetDefaultNStreamsForThroughputMode();

View File

@ -33,7 +33,7 @@ void OvThroughputStreamsOption::validate(const PluginConfiguration& configuratio
} }
std::string OvThroughputStreamsOption::key() { std::string OvThroughputStreamsOption::key() {
return ov::streams::num.name(); return ov::num_streams.name();
} }
details::Access OvThroughputStreamsOption::access() { details::Access OvThroughputStreamsOption::access() {
@ -46,7 +46,7 @@ details::Category OvThroughputStreamsOption::category() {
std::string OvThroughputStreamsOption::defaultValue() { std::string OvThroughputStreamsOption::defaultValue() {
std::stringstream ss; std::stringstream ss;
ss << ov::streams::AUTO; ss << ov::NumStreams::AUTO;
return ss.str(); return ss.str();
} }

View File

@ -116,7 +116,7 @@ RangeType MyriadMetrics::RangeForAsyncInferRequests(
auto throughput_streams_str = config.find(InferenceEngine::MYRIAD_THROUGHPUT_STREAMS); auto throughput_streams_str = config.find(InferenceEngine::MYRIAD_THROUGHPUT_STREAMS);
if (throughput_streams_str == config.end()) { if (throughput_streams_str == config.end()) {
throughput_streams_str = config.find(ov::streams::num.name()); throughput_streams_str = config.find(ov::num_streams.name());
} }
if (throughput_streams_str != config.end()) { if (throughput_streams_str != config.end()) {
try { try {

View File

@ -87,14 +87,14 @@ TEST(OVClassBasicTest, smoke_SetConfigInferenceNumThreads) {
int32_t value = 0; int32_t value = 0;
int32_t num_threads = 1; int32_t num_threads = 1;
ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads));
ASSERT_EQ(num_threads, value); ASSERT_EQ(num_threads, value);
num_threads = 4; num_threads = 4;
ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads));
ASSERT_EQ(num_threads, value); ASSERT_EQ(num_threads, value);
} }
@ -104,19 +104,19 @@ TEST(OVClassBasicTest, smoke_SetConfigStreamsNum) {
int32_t num_streams = 1; int32_t num_streams = 1;
auto setGetProperty = [&ie](int32_t& getProperty, int32_t setProperty){ auto setGetProperty = [&ie](int32_t& getProperty, int32_t setProperty){
ASSERT_NO_THROW(ie.set_property("CPU", ov::streams::num(setProperty))); OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::num_streams(setProperty)));
ASSERT_NO_THROW(getProperty = ie.get_property("CPU", ov::streams::num)); OV_ASSERT_NO_THROW(getProperty = ie.get_property("CPU", ov::num_streams));
}; };
setGetProperty(value, num_streams); setGetProperty(value, num_streams);
ASSERT_EQ(num_streams, value); ASSERT_EQ(num_streams, value);
num_streams = ov::streams::NUMA; num_streams = ov::NumStreams::NUMA;
setGetProperty(value, num_streams); setGetProperty(value, num_streams);
ASSERT_GT(value, 0); // value has been configured automatically ASSERT_GT(value, 0); // value has been configured automatically
num_streams = ov::streams::AUTO; num_streams = ov::NumStreams::AUTO;
setGetProperty(value, num_streams); setGetProperty(value, num_streams);
ASSERT_GT(value, 0); // value has been configured automatically ASSERT_GT(value, 0); // value has been configured automatically
@ -132,12 +132,12 @@ TEST(OVClassBasicTest, smoke_SetConfigAffinity) {
#else #else
auto defaultBindThreadParameter = ov::Affinity::CORE; auto defaultBindThreadParameter = ov::Affinity::CORE;
#endif #endif
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity));
ASSERT_EQ(defaultBindThreadParameter, value); ASSERT_EQ(defaultBindThreadParameter, value);
const ov::Affinity affinity = ov::Affinity::HYBRID_AWARE; const ov::Affinity affinity = ov::Affinity::HYBRID_AWARE;
ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity));
ASSERT_EQ(affinity, value); ASSERT_EQ(affinity, value);
} }
@ -146,13 +146,13 @@ TEST(OVClassBasicTest, smoke_SetConfigHintInferencePrecision) {
auto value = ov::element::f32; auto value = ov::element::f32;
const auto precision = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; const auto precision = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32;
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision));
ASSERT_EQ(precision, value); ASSERT_EQ(precision, value);
const auto forcedPrecision = ov::element::f32; const auto forcedPrecision = ov::element::f32;
ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forcedPrecision))); OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forcedPrecision)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision));
ASSERT_EQ(precision, forcedPrecision); ASSERT_EQ(precision, forcedPrecision);
} }
@ -161,13 +161,13 @@ TEST(OVClassBasicTest, smoke_SetConfigEnableProfiling) {
bool value; bool value;
const bool enableProfilingDefault = false; const bool enableProfilingDefault = false;
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling));
ASSERT_EQ(enableProfilingDefault, value); ASSERT_EQ(enableProfilingDefault, value);
const bool enableProfiling = true; const bool enableProfiling = true;
ASSERT_NO_THROW(ie.set_property("CPU", ov::enable_profiling(enableProfiling))); OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::enable_profiling(enableProfiling)));
ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling));
ASSERT_EQ(enableProfiling, value); ASSERT_EQ(enableProfiling, value);
} }

View File

@ -70,9 +70,9 @@ INSTANTIATE_TEST_SUITE_P(nightly_OVClassGetConfigTest, OVClassGetConfigTest, ::t
TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedScaleFactors) { TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedScaleFactors) {
ov::Core core; ov::Core core;
float sf1, sf2; float sf1, sf2;
ASSERT_NO_THROW(core.set_property({{"GNA_SCALE_FACTOR_0", "1634.0"}, {"GNA_SCALE_FACTOR_1", "2000.0"}})); OV_ASSERT_NO_THROW(core.set_property({{"GNA_SCALE_FACTOR_0", "1634.0"}, {"GNA_SCALE_FACTOR_1", "2000.0"}}));
ASSERT_NO_THROW(sf1 = std::stof(core.get_property("GNA", "GNA_SCALE_FACTOR_0").as<std::string>())); OV_ASSERT_NO_THROW(sf1 = std::stof(core.get_property("GNA", "GNA_SCALE_FACTOR_0").as<std::string>()));
ASSERT_NO_THROW(sf2 = std::stof(core.get_property("GNA", "GNA_SCALE_FACTOR_1").as<std::string>())); OV_ASSERT_NO_THROW(sf2 = std::stof(core.get_property("GNA", "GNA_SCALE_FACTOR_1").as<std::string>()));
ASSERT_FLOAT_EQ(1634.0, sf1); ASSERT_FLOAT_EQ(1634.0, sf1);
ASSERT_FLOAT_EQ(2000.0, sf2); ASSERT_FLOAT_EQ(2000.0, sf2);
@ -84,16 +84,16 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedScaleFactorsPerInput) {
ov::Core core; ov::Core core;
std::map<std::string, float> scale_factors_per_input; std::map<std::string, float> scale_factors_per_input;
ASSERT_NO_THROW(core.set_property("GNA", OV_ASSERT_NO_THROW(core.set_property("GNA",
ov::intel_gna::scale_factors_per_input(std::map<std::string, float>{{"input_0", 1634.0f}, {"input_1", 2000.0f}}))); ov::intel_gna::scale_factors_per_input(std::map<std::string, float>{{"input_0", 1634.0f}, {"input_1", 2000.0f}})));
ASSERT_NO_THROW(scale_factors_per_input = core.get_property("GNA", ov::intel_gna::scale_factors_per_input)); OV_ASSERT_NO_THROW(scale_factors_per_input = core.get_property("GNA", ov::intel_gna::scale_factors_per_input));
ASSERT_EQ(2, scale_factors_per_input.size()); ASSERT_EQ(2, scale_factors_per_input.size());
ASSERT_FLOAT_EQ(1634.0f, scale_factors_per_input["input_0"]); ASSERT_FLOAT_EQ(1634.0f, scale_factors_per_input["input_0"]);
ASSERT_FLOAT_EQ(2000.0f, scale_factors_per_input["input_1"]); ASSERT_FLOAT_EQ(2000.0f, scale_factors_per_input["input_1"]);
ASSERT_NO_THROW(core.set_property("GNA", OV_ASSERT_NO_THROW(core.set_property("GNA",
ov::intel_gna::scale_factors_per_input(std::map<std::string, float>{{"0", 1.0f}}))); ov::intel_gna::scale_factors_per_input(std::map<std::string, float>{{"0", 1.0f}})));
ASSERT_NO_THROW(scale_factors_per_input = core.get_property("GNA", ov::intel_gna::scale_factors_per_input)); OV_ASSERT_NO_THROW(scale_factors_per_input = core.get_property("GNA", ov::intel_gna::scale_factors_per_input));
ASSERT_EQ(1, scale_factors_per_input.size()); ASSERT_EQ(1, scale_factors_per_input.size());
ASSERT_FLOAT_EQ(1.0f, scale_factors_per_input["0"]); ASSERT_FLOAT_EQ(1.0f, scale_factors_per_input["0"]);
} }
@ -102,23 +102,23 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedPrecisionHint) {
ov::Core core; ov::Core core;
ov::element::Type precision; ov::element::Type precision;
ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision));
ASSERT_EQ(ov::element::undefined, precision); ASSERT_EQ(ov::element::undefined, precision);
ASSERT_NO_THROW(core.set_property("GNA", ov::hint::inference_precision(ov::element::i8))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::inference_precision(ov::element::i8)));
ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision));
ASSERT_EQ(ov::element::i8, precision); ASSERT_EQ(ov::element::i8, precision);
ASSERT_NO_THROW(core.set_property("GNA", ov::hint::inference_precision(ov::element::i16))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::inference_precision(ov::element::i16)));
ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision));
ASSERT_EQ(ov::element::i16, precision); ASSERT_EQ(ov::element::i16, precision);
ASSERT_NO_THROW(core.set_property("GNA", {{ov::hint::inference_precision.name(), "I8"}})); OV_ASSERT_NO_THROW(core.set_property("GNA", {{ov::hint::inference_precision.name(), "I8"}}));
ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision));
ASSERT_EQ(ov::element::i8, precision); ASSERT_EQ(ov::element::i8, precision);
ASSERT_NO_THROW(core.set_property("GNA", {{ov::hint::inference_precision.name(), "I16"}})); OV_ASSERT_NO_THROW(core.set_property("GNA", {{ov::hint::inference_precision.name(), "I16"}}));
ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision));
ASSERT_EQ(ov::element::i16, precision); ASSERT_EQ(ov::element::i16, precision);
ASSERT_THROW(core.set_property("GNA", { ov::hint::inference_precision(ov::element::i8), ASSERT_THROW(core.set_property("GNA", { ov::hint::inference_precision(ov::element::i8),
@ -132,15 +132,15 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedPerformanceHint) {
ov::Core core; ov::Core core;
ov::hint::PerformanceMode mode; ov::hint::PerformanceMode mode;
ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); OV_ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode));
ASSERT_EQ(ov::hint::PerformanceMode::UNDEFINED, mode); ASSERT_EQ(ov::hint::PerformanceMode::UNDEFINED, mode);
ASSERT_NO_THROW(core.set_property("GNA", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)));
ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); OV_ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode));
ASSERT_EQ(ov::hint::PerformanceMode::LATENCY, mode); ASSERT_EQ(ov::hint::PerformanceMode::LATENCY, mode);
ASSERT_NO_THROW(core.set_property("GNA", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)));
ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); OV_ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode));
ASSERT_EQ(ov::hint::PerformanceMode::THROUGHPUT, mode); ASSERT_EQ(ov::hint::PerformanceMode::THROUGHPUT, mode);
ASSERT_THROW(core.set_property("GNA", {{ov::hint::performance_mode.name(), "ABC"}}), ov::Exception); ASSERT_THROW(core.set_property("GNA", {{ov::hint::performance_mode.name(), "ABC"}}), ov::Exception);
@ -150,24 +150,24 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedNumRequests) {
ov::Core core; ov::Core core;
uint32_t num_requests; uint32_t num_requests;
ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(8))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(8)));
ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); OV_ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests));
ASSERT_EQ(8, num_requests); ASSERT_EQ(8, num_requests);
ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(1))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(1)));
ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); OV_ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests));
ASSERT_EQ(1, num_requests); ASSERT_EQ(1, num_requests);
ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(1000))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(1000)));
ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); OV_ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests));
ASSERT_EQ(127, num_requests); // maximum value ASSERT_EQ(127, num_requests); // maximum value
ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(0))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(0)));
ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); OV_ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests));
ASSERT_EQ(1, num_requests); // minimum value ASSERT_EQ(1, num_requests); // minimum value
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
ASSERT_NO_THROW(core.set_property("GNA", {ov::hint::num_requests(8), {GNA_CONFIG_KEY(LIB_N_THREADS), "8"}})); OV_ASSERT_NO_THROW(core.set_property("GNA", {ov::hint::num_requests(8), {GNA_CONFIG_KEY(LIB_N_THREADS), "8"}}));
ASSERT_THROW(core.set_property("GNA", {ov::hint::num_requests(4), {GNA_CONFIG_KEY(LIB_N_THREADS), "8"}}), ov::Exception); ASSERT_THROW(core.set_property("GNA", {ov::hint::num_requests(4), {GNA_CONFIG_KEY(LIB_N_THREADS), "8"}}), ov::Exception);
OPENVINO_SUPPRESS_DEPRECATED_END OPENVINO_SUPPRESS_DEPRECATED_END
ASSERT_THROW(core.set_property("GNA", {{ov::hint::num_requests.name(), "ABC"}}), ov::Exception); ASSERT_THROW(core.set_property("GNA", {{ov::hint::num_requests.name(), "ABC"}}), ov::Exception);
@ -177,31 +177,31 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedExecutionMode) {
ov::Core core; ov::Core core;
auto execution_mode = ov::intel_gna::ExecutionMode::AUTO; auto execution_mode = ov::intel_gna::ExecutionMode::AUTO;
ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode));
ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_EXACT, execution_mode); ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_EXACT, execution_mode);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_FP32))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_FP32)));
ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode));
ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_FP32, execution_mode); ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_FP32, execution_mode);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT)));
ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode));
ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_EXACT, execution_mode); ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_EXACT, execution_mode);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK)));
ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode));
ASSERT_EQ(ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK, execution_mode); ASSERT_EQ(ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK, execution_mode);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW)));
ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode));
ASSERT_EQ(ov::intel_gna::ExecutionMode::HW, execution_mode); ASSERT_EQ(ov::intel_gna::ExecutionMode::HW, execution_mode);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::AUTO))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::AUTO)));
ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode));
ASSERT_EQ(ov::intel_gna::ExecutionMode::AUTO, execution_mode); ASSERT_EQ(ov::intel_gna::ExecutionMode::AUTO, execution_mode);
ASSERT_THROW(core.set_property("GNA", {{ov::intel_gna::execution_mode.name(), "ABC"}}), ov::Exception); ASSERT_THROW(core.set_property("GNA", {{ov::intel_gna::execution_mode.name(), "ABC"}}), ov::Exception);
ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode));
ASSERT_EQ(ov::intel_gna::ExecutionMode::AUTO, execution_mode); ASSERT_EQ(ov::intel_gna::ExecutionMode::AUTO, execution_mode);
} }
@ -210,30 +210,30 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedTargetDevice) {
auto execution_target = ov::intel_gna::HWGeneration::UNDEFINED; auto execution_target = ov::intel_gna::HWGeneration::UNDEFINED;
auto compile_target = ov::intel_gna::HWGeneration::UNDEFINED; auto compile_target = ov::intel_gna::HWGeneration::UNDEFINED;
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_2_0))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_2_0)));
ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); OV_ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target));
ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, execution_target); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, execution_target);
ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); OV_ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target));
ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, compile_target); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, compile_target);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_3_0))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_3_0)));
ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); OV_ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target));
ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, execution_target); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, execution_target);
ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); OV_ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target));
ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, compile_target); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, compile_target);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::GNA_3_0))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::GNA_3_0)));
ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); OV_ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target));
ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, execution_target); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, execution_target);
ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); OV_ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target));
ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, compile_target); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, compile_target);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::UNDEFINED))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::UNDEFINED)));
ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); OV_ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target));
ASSERT_EQ(ov::intel_gna::HWGeneration::UNDEFINED, execution_target); ASSERT_EQ(ov::intel_gna::HWGeneration::UNDEFINED, execution_target);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::UNDEFINED))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::UNDEFINED)));
ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::execution_target)); OV_ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::execution_target));
ASSERT_EQ(ov::intel_gna::HWGeneration::UNDEFINED, compile_target); ASSERT_EQ(ov::intel_gna::HWGeneration::UNDEFINED, compile_target);
ASSERT_THROW(core.set_property("GNA", {ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_2_0), ASSERT_THROW(core.set_property("GNA", {ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_2_0),
@ -249,22 +249,22 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedPwlAlgorithm) {
auto pwl_algo = ov::intel_gna::PWLDesignAlgorithm::UNDEFINED; auto pwl_algo = ov::intel_gna::PWLDesignAlgorithm::UNDEFINED;
float pwl_max_error = 0.0f; float pwl_max_error = 0.0f;
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::RECURSIVE_DESCENT))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::RECURSIVE_DESCENT)));
ASSERT_NO_THROW(pwl_algo = core.get_property("GNA", ov::intel_gna::pwl_design_algorithm)); OV_ASSERT_NO_THROW(pwl_algo = core.get_property("GNA", ov::intel_gna::pwl_design_algorithm));
ASSERT_EQ(ov::intel_gna::PWLDesignAlgorithm::RECURSIVE_DESCENT, pwl_algo); ASSERT_EQ(ov::intel_gna::PWLDesignAlgorithm::RECURSIVE_DESCENT, pwl_algo);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION)));
ASSERT_NO_THROW(pwl_algo = core.get_property("GNA", ov::intel_gna::pwl_design_algorithm)); OV_ASSERT_NO_THROW(pwl_algo = core.get_property("GNA", ov::intel_gna::pwl_design_algorithm));
ASSERT_EQ(ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION, pwl_algo); ASSERT_EQ(ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION, pwl_algo);
ASSERT_THROW(core.set_property("GNA", {{ov::intel_gna::pwl_design_algorithm.name(), "ABC"}}), ov::Exception); ASSERT_THROW(core.set_property("GNA", {{ov::intel_gna::pwl_design_algorithm.name(), "ABC"}}), ov::Exception);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_max_error_percent(0.05))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_max_error_percent(0.05)));
ASSERT_NO_THROW(pwl_max_error = core.get_property("GNA", ov::intel_gna::pwl_max_error_percent)); OV_ASSERT_NO_THROW(pwl_max_error = core.get_property("GNA", ov::intel_gna::pwl_max_error_percent));
ASSERT_FLOAT_EQ(0.05, pwl_max_error); ASSERT_FLOAT_EQ(0.05, pwl_max_error);
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_max_error_percent(100.0f))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_max_error_percent(100.0f)));
ASSERT_NO_THROW(pwl_max_error = core.get_property("GNA", ov::intel_gna::pwl_max_error_percent)); OV_ASSERT_NO_THROW(pwl_max_error = core.get_property("GNA", ov::intel_gna::pwl_max_error_percent));
ASSERT_FLOAT_EQ(100.0f, pwl_max_error); ASSERT_FLOAT_EQ(100.0f, pwl_max_error);
OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_START
@ -279,12 +279,12 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedLogLevel) {
ov::Core core; ov::Core core;
auto level = ov::log::Level::NO; auto level = ov::log::Level::NO;
ASSERT_NO_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::WARNING))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::WARNING)));
ASSERT_NO_THROW(level = core.get_property("GNA", ov::log::level)); OV_ASSERT_NO_THROW(level = core.get_property("GNA", ov::log::level));
ASSERT_EQ(ov::log::Level::WARNING, level); ASSERT_EQ(ov::log::Level::WARNING, level);
ASSERT_NO_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::NO))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::NO)));
ASSERT_NO_THROW(level = core.get_property("GNA", ov::log::level)); OV_ASSERT_NO_THROW(level = core.get_property("GNA", ov::log::level));
ASSERT_EQ(ov::log::Level::NO, level); ASSERT_EQ(ov::log::Level::NO, level);
ASSERT_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::ERR)), ov::Exception); ASSERT_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::ERR)), ov::Exception);
@ -298,8 +298,8 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedFwModelPath) {
ov::Core core; ov::Core core;
std::string path = ""; std::string path = "";
ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::firmware_model_image_path("model.bin"))); OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::firmware_model_image_path("model.bin")));
ASSERT_NO_THROW(path = core.get_property("GNA", ov::intel_gna::firmware_model_image_path)); OV_ASSERT_NO_THROW(path = core.get_property("GNA", ov::intel_gna::firmware_model_image_path));
ASSERT_EQ("model.bin", path); ASSERT_EQ("model.bin", path);
} }

View File

@ -447,7 +447,7 @@ TEST_P(OVClassGetMetricTest_GPU_MAX_BATCH_SIZE_STREAM_DEVICE_MEM, GetMetricAndPr
uint32_t n_streams = 2; uint32_t n_streams = 2;
int64_t available_device_mem_size = 1073741824; int64_t available_device_mem_size = 1073741824;
ov::AnyMap _options = {ov::hint::model(simpleNetwork), ov::AnyMap _options = {ov::hint::model(simpleNetwork),
ov::streams::num(n_streams), ov::num_streams(n_streams),
ov::intel_gpu::hint::available_device_mem(available_device_mem_size)}; ov::intel_gpu::hint::available_device_mem(available_device_mem_size)};
ASSERT_NO_THROW(p = ie.get_property(deviceName, ov::max_batch_size.name(), _options)); ASSERT_NO_THROW(p = ie.get_property(deviceName, ov::max_batch_size.name(), _options));