diff --git a/docs/snippets/GPU_Metric1.cpp b/docs/snippets/GPU_Metric1.cpp index c4995e3cc99..64e959233e5 100644 --- a/docs/snippets/GPU_Metric1.cpp +++ b/docs/snippets/GPU_Metric1.cpp @@ -9,7 +9,7 @@ uint32_t n_streams = 2; int64_t available_device_mem_size = 3221225472; ov::AnyMap options = { ov::hint::model(model), // Required. Set the address of the target network. If this is not set, the MAX_BATCH_SIZE returns 1. - ov::streams::num(n_streams), // Optional. Set only when you want to estimate max batch size for a specific throughtput streams. Default is 1 or throughtput streams set by SetConfig. + ov::num_streams(n_streams), // Optional. Set only when you want to estimate max batch size for a specific throughtput streams. Default is 1 or throughtput streams set by SetConfig. ov::intel_gpu::hint::available_device_mem(available_device_mem_size) // Optional. Set only when you want to limit the available device mem size. }; diff --git a/samples/cpp/benchmark_app/README.md b/samples/cpp/benchmark_app/README.md index 436c3dc0434..81345b1b2a0 100644 --- a/samples/cpp/benchmark_app/README.md +++ b/samples/cpp/benchmark_app/README.md @@ -124,12 +124,12 @@ Options: Also, using nstreams>1 is inherently throughput-oriented option, while for the best-latency estimations the number of streams should be set to 1. -nthreads "" Optional. Number of threads to use for inference on the CPU (including HETERO and MULTI cases). - -enforcebf16="" Optional. By default floating point operations execution in bfloat16 precision are enforced if supported by platform. - -pin "YES"/"HYBRID_AWARE"/"NUMA"/"NO" + -pin ("YES"|"CORE")/"HYBRID_AWARE"/"NUMA"/("NO"|"NONE") Optional. Explicit inference threads binding options (leave empty to let the OpenVINO to make a choice): enabling threads->cores pinning ("YES", which is already default for a conventional CPU), letting the runtime to decide on the threads->different core types ("HYBRID_AWARE", which is default on the hybrid CPUs) threads->(NUMA)nodes ("NUMA") or completely disable ("NO") CPU inference threads pinning. + -infer_precision device_name:infer_precision1,device_name:infer_precision2 Optional. Hint to specifies inference precision -ip "U8"/"FP16"/"FP32" Optional. Specifies precision for all input layers of the network. -op "U8"/"FP16"/"FP32" Optional. Specifies precision for all output layers of the network. -iop Optional. Specifies precision for input and output layers by name. Example: -iop "input:FP16, output:FP16". Notice that quotes are required. diff --git a/samples/cpp/benchmark_app/benchmark_app.hpp b/samples/cpp/benchmark_app/benchmark_app.hpp index ae3b785f767..6dad11a637f 100644 --- a/samples/cpp/benchmark_app/benchmark_app.hpp +++ b/samples/cpp/benchmark_app/benchmark_app.hpp @@ -191,8 +191,8 @@ static const char cache_dir_message[] = "Optional. Enables caching of loaded mod static const char load_from_file_message[] = "Optional. Loads model from file directly without ReadNetwork." " All CNNNetwork options (like re-shape) will be ignored"; -// @brief message for quantization bits -static const char gna_qb_message[] = "Optional. Weight bits for quantization: 8 or 16 (default)"; +// @brief message for inference_precision +static const char inference_precision_message[] = "Optional. Inference precission"; static constexpr char inputs_precision_message[] = "Optional. Specifies precision for all input layers of the network."; @@ -275,9 +275,6 @@ DEFINE_string(nstreams, "", infer_num_streams_message); /// @brief The percentile which will be reported in latency metric DEFINE_uint32(latency_percentile, 50, infer_latency_percentile_message); -/// @brief Enforces bf16 execution with bfloat16 precision on systems having this capability -DEFINE_bool(enforcebf16, false, enforce_bf16_message); - /// @brief Define parameter for batch size
/// Default is 0 (that means don't specify) DEFINE_uint32(b, 0, batch_size_message); @@ -329,8 +326,8 @@ DEFINE_string(data_shape, "", data_shape_message); /// @brief Define flag for layout shape
DEFINE_string(layout, "", layout_message); -/// @brief Define flag for quantization bits (default 16) -DEFINE_int32(qb, 16, gna_qb_message); +/// @brief Define flag for inference precision +DEFINE_string(infer_precision, "f32", inference_precision_message); /// @brief Specify precision for all input layers of the network DEFINE_string(ip, "", inputs_precision_message); @@ -391,7 +388,6 @@ static void show_usage() { std::cout << std::endl << " device-specific performance options:" << std::endl; std::cout << " -nstreams \"\" " << infer_num_streams_message << std::endl; std::cout << " -nthreads \"\" " << infer_num_threads_message << std::endl; - std::cout << " -enforcebf16= " << enforce_bf16_message << std::endl; std::cout << " -pin \"YES\"/\"HYBRID_AWARE\"/\"NO\"/\"NUMA\" " << infer_threads_pinning_message << std::endl; #ifdef HAVE_DEVICE_MEM_SUPPORT std::cout << " -use_device_mem " << use_device_mem_message << std::endl; @@ -405,7 +401,7 @@ static void show_usage() { std::cout << " -pcseq " << pcseq_message << std::endl; std::cout << " -dump_config " << dump_config_message << std::endl; std::cout << " -load_config " << load_config_message << std::endl; - std::cout << " -qb " << gna_qb_message << std::endl; + std::cout << " -infer_precision \"\"" << inference_precision_message << std::endl; std::cout << " -ip " << inputs_precision_message << std::endl; std::cout << " -op " << outputs_precision_message << std::endl; std::cout << " -iop \"\" " << iop_message << std::endl; diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index 5340d33db53..2554a3bdb58 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -157,7 +157,9 @@ int main(int argc, char* argv[]) { auto devices = parse_devices(device_name); // Parse nstreams per device - std::map device_nstreams = parse_nstreams_value_per_device(devices, FLAGS_nstreams); + std::map device_nstreams = parse_value_per_device(devices, FLAGS_nstreams); + std::map device_infer_precision = + parse_value_per_device(devices, FLAGS_infer_precision); // Load device config file if specified std::map config; @@ -243,9 +245,7 @@ int main(int argc, char* argv[]) { bool perf_counts = false; // Update config per device according to command line parameters for (auto& device : devices) { - if (!config.count(device)) - config[device] = {}; - auto& device_config = config.at(device); + auto& device_config = config[device]; // high-level performance modes if (ov_perf_hint != ov::hint::PerformanceMode::UNDEFINED) { @@ -276,24 +276,28 @@ int main(int argc, char* argv[]) { } perf_counts = (device_config.at(ov::enable_profiling.name()).as()) ? true : perf_counts; + auto supported_properties = core.get_property(device, ov::supported_properties); + + auto supported = [&](const std::string& key) { + return std::find(std::begin(supported_properties), std::end(supported_properties), key) != + std::end(supported_properties); + }; // the rest are individual per-device settings (overriding the values set with perf modes) auto setThroughputStreams = [&]() { std::string key = getDeviceTypeFromName(device) + "_THROUGHPUT_STREAMS"; - if (device_nstreams.count(device)) { + auto it_device_nstreams = device_nstreams.find(device); + if (it_device_nstreams != device_nstreams.end()) { // set to user defined value auto supported_properties = core.get_property(device, ov::supported_properties); - if (std::find(supported_properties.begin(), supported_properties.end(), key) != - supported_properties.end()) { - device_config[key] = device_nstreams.at(device); - } else if (std::find(supported_properties.begin(), - supported_properties.end(), - ov::streams::num.name()) != supported_properties.end()) { + if (supported(key)) { + device_config[key] = it_device_nstreams->second; + } else if (supported(ov::num_streams.name())) { // Use API 2.0 key for streams - key = ov::streams::num.name(); - device_config[key] = device_nstreams.at(device); + key = ov::num_streams.name(); + device_config[key] = it_device_nstreams->second; } else { throw std::logic_error("Device " + device + " doesn't support config key '" + key + "' " + - "and '" + ov::streams::num.name() + "'!" + + "and '" + ov::num_streams.name() + "'!" + "Please specify -nstreams for correct devices in format " ":,:" + " or via configuration file."); @@ -309,45 +313,66 @@ int main(int argc, char* argv[]) { << slog::endl; if (std::string::npos == device.find("MYRIAD")) { // MYRIAD sets the default number of // streams implicitly (without _AUTO) - auto supported_properties = core.get_property(device, ov::supported_properties); - if (std::find(supported_properties.begin(), supported_properties.end(), key) != - supported_properties.end()) { + if (supported(key)) { device_config[key] = std::string(getDeviceTypeFromName(device) + "_THROUGHPUT_AUTO"); - } else if (std::find(supported_properties.begin(), - supported_properties.end(), - ov::streams::num.name()) != supported_properties.end()) { + } else if (supported(ov::num_streams.name())) { // Use API 2.0 key for streams - key = ov::streams::num.name(); - device_config[key] = std::to_string(ov::streams::AUTO); + key = ov::num_streams.name(); + device_config[key] = ov::NumStreams::AUTO; } } } - if (device_config.count(key)) - device_nstreams[device] = device_config.at(key).as(); + auto it_streams = device_config.find(ov::num_streams.name()); + if (it_streams != device_config.end()) + device_nstreams[device] = it_streams->second.as(); }; + auto set_infer_precision = [&] { + auto it_device_infer_precision = device_infer_precision.find(device); + if (it_device_infer_precision != device_infer_precision.end()) { + // set to user defined value + if (!supported(ov::hint::inference_precision.name())) { + throw std::logic_error("Device " + device + " doesn't support config key '" + + ov::hint::inference_precision.name() + "'! " + + "Please specify -infer_precision for correct devices in format " + ":,:" + + " or via configuration file."); + } + device_config.emplace(ov::hint::inference_precision(it_device_infer_precision->second)); + } + }; + + auto fix_pin_option = [](const std::string& str) -> std::string { + if (str == "NO") + return "NONE"; + else if (str == "YES") + return "CORE"; + else + return str; + }; + + if (supported(ov::inference_num_threads.name()) && isFlagSetInCommandLine("nthreads")) { + device_config.emplace(ov::inference_num_threads(FLAGS_nthreads)); + } + if (supported(ov::affinity.name()) && isFlagSetInCommandLine("pin")) { + device_config.emplace(ov::affinity(fix_pin_option(FLAGS_pin))); + } + if (device.find("CPU") != std::string::npos) { // CPU supports few special performance-oriented keys // limit threading for CPU portion of inference - if (isFlagSetInCommandLine("nthreads")) - device_config[CONFIG_KEY(CPU_THREADS_NUM)] = std::to_string(FLAGS_nthreads); - - if (isFlagSetInCommandLine("enforcebf16")) - device_config[CONFIG_KEY(ENFORCE_BF16)] = FLAGS_enforcebf16 ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO); - - if (isFlagSetInCommandLine("pin")) { - // set to user defined value - device_config[CONFIG_KEY(CPU_BIND_THREAD)] = FLAGS_pin; - } else if (!device_config.count(CONFIG_KEY(CPU_BIND_THREAD))) { - if ((device_name.find("MULTI") != std::string::npos) && + if (!isFlagSetInCommandLine("pin")) { + auto it_affinity = device_config.find(ov::affinity.name()); + if (it_affinity != device_config.end() && (device_name.find("MULTI") != std::string::npos) && (device_name.find("GPU") != std::string::npos)) { slog::warn << "Turn off threads pinning for " << device << " device since multi-scenario with GPU device is used." << slog::endl; - device_config[CONFIG_KEY(CPU_BIND_THREAD)] = CONFIG_VALUE(NO); + it_affinity->second = ov::Affinity::NONE; } } // for CPU execution, more throughput-oriented execution via streams setThroughputStreams(); + set_infer_precision(); } else if (device.find("GPU") != std::string::npos) { // for GPU execution, more throughput-oriented execution via streams setThroughputStreams(); @@ -365,25 +390,7 @@ int main(int argc, char* argv[]) { device_config.emplace(ov::log::level(ov::log::Level::WARNING)); setThroughputStreams(); } else if (device.find("GNA") != std::string::npos) { - if (FLAGS_qb == 8) - device_config[GNA_CONFIG_KEY(PRECISION)] = "I8"; - else - device_config[GNA_CONFIG_KEY(PRECISION)] = "I16"; - } else { - auto supported_properties = core.get_property(device, ov::supported_properties); - auto supported = [&](const std::string& key) { - return std::find(std::begin(supported_properties), std::end(supported_properties), key) != - std::end(supported_properties); - }; - if (supported(CONFIG_KEY(CPU_THREADS_NUM)) && isFlagSetInCommandLine("nthreads")) { - device_config[CONFIG_KEY(CPU_THREADS_NUM)] = std::to_string(FLAGS_nthreads); - } - if (supported(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) && isFlagSetInCommandLine("nstreams")) { - device_config[CONFIG_KEY(CPU_THROUGHPUT_STREAMS)] = FLAGS_nstreams; - } - if (supported(CONFIG_KEY(CPU_BIND_THREAD)) && isFlagSetInCommandLine("pin")) { - device_config[CONFIG_KEY(CPU_BIND_THREAD)] = FLAGS_pin; - } + set_infer_precision(); } } @@ -669,7 +676,7 @@ int main(int argc, char* argv[]) { const std::string key = getDeviceTypeFromName(ds.first) + "_THROUGHPUT_STREAMS"; device_nstreams[ds.first] = core.get_property(ds.first, key).as(); } catch (const ov::Exception&) { - device_nstreams[ds.first] = core.get_property(ds.first, ov::streams::num.name()).as(); + device_nstreams[ds.first] = core.get_property(ds.first, ov::num_streams.name()).as(); } } diff --git a/samples/cpp/benchmark_app/utils.cpp b/samples/cpp/benchmark_app/utils.cpp index 5a6391ab76f..51d0443cfdc 100644 --- a/samples/cpp/benchmark_app/utils.cpp +++ b/samples/cpp/benchmark_app/utils.cpp @@ -118,8 +118,8 @@ std::vector parse_devices(const std::string& device_string) { return devices; } -std::map parse_nstreams_value_per_device(const std::vector& devices, - const std::string& values_string) { +std::map parse_value_per_device(const std::vector& devices, + const std::string& values_string) { // Format: :,: or just std::map result; auto device_value_strings = split(values_string, ','); diff --git a/samples/cpp/benchmark_app/utils.hpp b/samples/cpp/benchmark_app/utils.hpp index 2bd1fdadab8..6a514eafccf 100644 --- a/samples/cpp/benchmark_app/utils.hpp +++ b/samples/cpp/benchmark_app/utils.hpp @@ -56,8 +56,8 @@ using PartialShapes = std::map; std::vector parse_devices(const std::string& device_string); uint32_t device_default_device_duration_in_seconds(const std::string& device); -std::map parse_nstreams_value_per_device(const std::vector& devices, - const std::string& values_string); +std::map parse_value_per_device(const std::vector& devices, + const std::string& values_string); std::string get_shape_string(const ov::Shape& shape); std::string get_shapes_string(const benchmark_app::PartialShapes& shapes); size_t get_batch_size(const benchmark_app::InputsInfo& inputs_info); diff --git a/samples/cpp/hello_query_device/main.cpp b/samples/cpp/hello_query_device/main.cpp index 85fc513a64b..7c1314b1908 100644 --- a/samples/cpp/hello_query_device/main.cpp +++ b/samples/cpp/hello_query_device/main.cpp @@ -24,81 +24,9 @@ void print_any_value(const ov::Any& value) { if (value.empty()) { slog::info << "EMPTY VALUE" << slog::endl; - } else if (value.is()) { - slog::info << std::boolalpha << value.as() << std::noboolalpha << slog::endl; - } else if (value.is()) { - slog::info << value.as() << slog::endl; - } else if (value.is()) { - slog::info << value.as() << slog::endl; - } else if (value.is()) { - slog::info << value.as() << slog::endl; - } else if (value.is()) { - slog::info << value.as() << slog::endl; - } else if (value.is()) { + } else { std::string stringValue = value.as(); slog::info << (stringValue.empty() ? "\"\"" : stringValue) << slog::endl; - } else if (value.is>()) { - slog::info << value.as>() << slog::endl; - } else if (value.is>()) { - slog::info << value.as>() << slog::endl; - } else if (value.is>()) { - slog::info << value.as>() << slog::endl; - } else if (value.is>()) { - slog::info << value.as>() << slog::endl; - } else if (value.is>()) { - auto values = value.as>(); - slog::info << "{ "; - slog::info << std::get<0>(values) << ", "; - slog::info << std::get<1>(values) << ", "; - slog::info << std::get<2>(values); - slog::info << " }"; - slog::info << slog::endl; - } else if (value.is()) { - auto v = value.as(); - slog::info << v << slog::endl; - } else if (value.is>()) { - auto values = value.as>(); - slog::info << "{ "; - for (auto& kv : values) { - slog::info << kv.first << ": " << kv.second << "; "; - } - slog::info << " }"; - slog::info << slog::endl; - } else if (value.is>()) { - auto values = value.as>(); - slog::info << "{ "; - slog::info << std::get<0>(values) << ", "; - slog::info << std::get<1>(values); - slog::info << " }"; - slog::info << slog::endl; - } else if (value.is>()) { - auto values = value.as>(); - slog::info << "{ "; - for (auto& kv : values) { - slog::info << kv.first << ": " << kv.second << "; "; - } - slog::info << " }"; - slog::info << slog::endl; - } else if (value.is>()) { - auto values = value.as>(); - slog::info << "{ "; - for (auto& kv : values) { - slog::info << kv.first << ": " << kv.second << "; "; - } - slog::info << " }"; - slog::info << slog::endl; - } else if (value.is()) { - auto values = value.as(); - slog::info << (values.empty() ? "\"\"" : values) << slog::endl; - } else { - std::stringstream strm; - value.print(strm); - auto str = strm.str(); - if (str.empty()) { - slog::info << "UNSUPPORTED TYPE" << slog::endl; - } else { - slog::info << str << slog::endl; - } } } diff --git a/src/common/util/include/openvino/util/common_util.hpp b/src/common/util/include/openvino/util/common_util.hpp index 82cf13fbe97..a568455c6e0 100644 --- a/src/common/util/include/openvino/util/common_util.hpp +++ b/src/common/util/include/openvino/util/common_util.hpp @@ -13,6 +13,40 @@ namespace ov { namespace util { +template +struct ValueTyped { + template + static auto test(U*) -> decltype(std::declval(), std::true_type()) { + return {}; + } + template + static auto test(...) -> std::false_type { + return {}; + } + constexpr static const auto value = std::is_same(nullptr))>::value; +}; + +template +struct Read; + +template ::value, bool>::type = true> +inline typename T::value_type from_string(const std::string& val, const T&) { + std::stringstream ss(val); + typename T::value_type value; + Read{}(ss, value); + return value; +} + +template +struct Write; + +template +inline std::string to_string(const T& value) { + std::stringstream ss; + Write{}(ss, value); + return ss.str(); +} + template std::string join(const T& v, const std::string& sep = ", ") { std::ostringstream ss; diff --git a/src/core/include/openvino/core/any.hpp b/src/core/include/openvino/core/any.hpp index e2e1160b63c..73da173599a 100644 --- a/src/core/include/openvino/core/any.hpp +++ b/src/core/include/openvino/core/any.hpp @@ -26,6 +26,239 @@ class ExecutableNetwork; } // namespace InferenceEngine namespace ov { +/** @cond INTERNAL */ +class Any; +namespace util { +template +struct Read; + +template +struct Istreamable { + template + static auto test(U*) -> decltype(std::declval() >> std::declval(), std::true_type()) { + return {}; + } + template + static auto test(...) -> std::false_type { + return {}; + } + constexpr static const auto value = std::is_same(nullptr))>::value; +}; + +template +struct Readable { + template + static auto test(U*) -> decltype(read(std::declval(), std::declval()), std::true_type()) { + return {}; + } + template + static auto test(...) -> std::false_type { + return {}; + } + constexpr static const auto value = std::is_same(nullptr))>::value; +}; + +template +struct Read { + template + auto operator()(std::istream&, U&) const -> + typename std::enable_if::value && !Istreamable::value && !Readable::value>::type { + OPENVINO_UNREACHABLE("Could read type without std::istream& operator>>(std::istream&, T)", + " defined or ov::util::Read class specialization, T: ", + typeid(T).name()); + } + template + auto operator()(std::istream& is, U& value) const -> + typename std::enable_if::value && Istreamable::value && !Readable::value>::type { + is >> value; + } +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, bool& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, Any& any) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, int& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, long& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, long long& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, unsigned& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, unsigned long& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, unsigned long long& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, float& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, double& value) const; +}; + +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, long double& value) const; +}; + +template <> +struct OPENVINO_API Read> { + void operator()(std::istream& is, std::tuple& tuple) const; +}; + +template <> +struct OPENVINO_API Read> { + void operator()(std::istream& is, std::tuple& tuple) const; +}; + +template +struct Read, typename std::enable_if::value>::type> { + void operator()(std::istream& is, std::vector& vec) const { + while (is.good()) { + T v; + Read{}(is, v); + vec.push_back(std::move(v)); + } + } +}; + +template +struct Read< + std::map, + typename std::enable_if::value && std::is_default_constructible::value>::type> { + void operator()(std::istream& is, std::map& map) const { + while (is.good()) { + K k; + T v; + Read{}(is, k); + Read{}(is, v); + map.emplace(std::move(k), std::move(v)); + } + } +}; + +template +struct Write; + +template +struct Ostreamable { + template + static auto test(U*) -> decltype(std::declval() << std::declval(), std::true_type()) { + return {}; + } + template + static auto test(...) -> std::false_type { + return {}; + } + constexpr static const auto value = std::is_same(nullptr))>::value; +}; + +template +struct Writable { + template + static auto test(U*) -> decltype(write(std::declval(), std::declval()), std::true_type()) { + return {}; + } + template + static auto test(...) -> std::false_type { + return {}; + } + constexpr static const auto value = std::is_same(nullptr))>::value; +}; + +template +struct Write { + template + auto operator()(std::ostream& os, const U&) const -> + typename std::enable_if::value && !Ostreamable::value && !Writable::value>::type {} + template + auto operator()(std::ostream& os, const U& value) const -> + typename std::enable_if::value && Ostreamable::value && !Writable::value>::type { + os << value; + } +}; + +template <> +struct OPENVINO_API Write { + void operator()(std::ostream& is, const bool& b) const; +}; + +template <> +struct OPENVINO_API Write { + void operator()(std::ostream& is, const Any& any) const; +}; + +template <> +struct OPENVINO_API Write> { + void operator()(std::ostream& os, const std::tuple& tuple) const; +}; + +template <> +struct OPENVINO_API Write> { + void operator()(std::ostream& os, const std::tuple& tuple) const; +}; + +template +struct Write> { + void operator()(std::ostream& os, const std::vector& vec) const { + if (!vec.empty()) { + std::size_t i = 0; + for (auto&& v : vec) { + Write{}(os, v); + if (i < (vec.size() - 1)) + os << ' '; + ++i; + } + } + } +}; + +template +struct Write> { + void operator()(std::ostream& os, const std::map& map) const { + if (!map.empty()) { + std::size_t i = 0; + for (auto&& v : map) { + Write{}(os, v.first); + os << ' '; + Write{}(os, v.second); + if (i < (map.size() - 1)) + os << ' '; + ++i; + } + } + } +}; +} // namespace util +/** @endcond */ class Node; class RuntimeAttribute; @@ -44,34 +277,6 @@ class OPENVINO_API Any { template using decay_t = typename std::decay::type; - template - struct IsNullPointer : std::is_same::type> {}; - - template - struct Ostreamable { - template - static auto test(U*) -> decltype(std::declval() << std::declval(), std::true_type()) { - return {}; - } - template - static auto test(...) -> std::false_type { - return {}; - } - constexpr static const auto value = std::is_same(nullptr))>::value; - }; - - template - static typename std::enable_if::value && !std::is_same::value>::type print_impl( - std::ostream& os, - const U& value) { - os << value; - } - - static void print_impl(std::ostream& os, const bool& b); - - template - static typename std::enable_if::value>::type print_impl(std::ostream&, const U&) {} - template struct EqualityComparable { static void* conv(bool); @@ -113,7 +318,7 @@ class OPENVINO_API Any { template [[noreturn]] static typename std::enable_if::value, bool>::type equal_impl(const U&, const U&) { - throw ov::Exception{"Could not compare types without equality operator"}; + OPENVINO_UNREACHABLE("Could not compare types without equality operator"); } template @@ -167,42 +372,6 @@ class OPENVINO_API Any { constexpr static const auto value = std::is_same(nullptr))>::value; }; - template - struct Istreamable { - template - static auto test(U*) -> decltype(std::declval() >> std::declval(), std::true_type()) { - return {}; - } - template - static auto test(...) -> std::false_type { - return {}; - } - constexpr static const auto value = std::is_same(nullptr))>::value; - }; - - template - static typename std::enable_if::value && !std::is_same::value>::type read_impl( - std::istream& is, - U& value) { - is >> value; - } - - static void read_impl(std::istream& is, bool& value); - static void read_impl(std::istream& is, int& value); - static void read_impl(std::istream& is, long& value); - static void read_impl(std::istream& is, long long& value); - static void read_impl(std::istream& is, unsigned& value); - static void read_impl(std::istream& is, unsigned long& value); - static void read_impl(std::istream& is, unsigned long long& value); - static void read_impl(std::istream& is, float& value); - static void read_impl(std::istream& is, double& value); - static void read_impl(std::istream& is, long double& value); - - template - static typename std::enable_if::value>::type read_impl(std::istream&, U&) { - throw ov::Exception{"Could read type without std::istream& operator>>(std::istream&, T) defined"}; - } - static bool equal(std::type_index lhs, std::type_index rhs); /** @@ -315,7 +484,7 @@ class OPENVINO_API Any { } void read(std::istream&) override { - throw ov::Exception{"Pointer to runtime attribute is not readable from std::istream"}; + OPENVINO_UNREACHABLE("Pointer to runtime attribute is not readable from std::istream"); } T runtime_attribute; @@ -364,11 +533,11 @@ class OPENVINO_API Any { } void print(std::ostream& os) const override { - print_impl(os, value); + util::Write{}(os, value); } void read(std::istream& is) override { - read_impl(is, value); + util::Read{}(is, value); } T value; @@ -506,17 +675,20 @@ public: } else { auto runtime_attribute = _impl->as_runtime_attribute(); if (runtime_attribute == nullptr) { - throw ov::Exception{ - std::string{"Any does not contains pointer to runtime_attribute. It contains "} + - _impl->type_info().name()}; + OPENVINO_UNREACHABLE("Any does not contains pointer to runtime_attribute. It contains ", + _impl->type_info().name()); } auto vptr = std::dynamic_pointer_cast(runtime_attribute); if (vptr == nullptr && T::element_type::get_type_info_static() != runtime_attribute->get_type_info() && T::element_type::get_type_info_static() != RuntimeAttribute::get_type_info_static()) { - throw ov::Exception{std::string{"Could not cast Any runtime_attribute to "} + typeid(T).name() + - " from " + _impl->type_info().name() + "; from " + - static_cast(runtime_attribute->get_type_info()) + " to " + - static_cast(T::element_type::get_type_info_static())}; + OPENVINO_UNREACHABLE("Could not cast Any runtime_attribute to ", + typeid(T).name(), + " from ", + _impl->type_info().name(), + "; from ", + static_cast(runtime_attribute->get_type_info()), + " to ", + static_cast(T::element_type::get_type_info_static())); } vptr = std::static_pointer_cast(runtime_attribute); _temp_impl = std::make_shared>>(vptr); @@ -542,17 +714,20 @@ public: } else { auto runtime_attribute = _impl->as_runtime_attribute(); if (runtime_attribute == nullptr) { - throw ov::Exception{ - std::string{"Any does not contains pointer to runtime_attribute. It contains "} + - _impl->type_info().name()}; + OPENVINO_UNREACHABLE("Any does not contains pointer to runtime_attribute. It contains ", + _impl->type_info().name()); } auto vptr = std::dynamic_pointer_cast(runtime_attribute); if (vptr == nullptr && T::element_type::get_type_info_static() != runtime_attribute->get_type_info() && T::element_type::get_type_info_static() != RuntimeAttribute::get_type_info_static()) { - throw ov::Exception{std::string{"Could not cast Any runtime_attribute to "} + typeid(T).name() + - " from " + _impl->type_info().name() + "; from " + - static_cast(runtime_attribute->get_type_info()) + " to " + - static_cast(T::element_type::get_type_info_static())}; + OPENVINO_UNREACHABLE("Could not cast Any runtime_attribute to ", + typeid(T).name(), + " from ", + _impl->type_info().name(), + "; from ", + static_cast(runtime_attribute->get_type_info()), + " to ", + static_cast(T::element_type::get_type_info_static())); } vptr = std::static_pointer_cast(runtime_attribute); _temp_impl = std::make_shared>>(vptr); @@ -585,7 +760,7 @@ public: return *static_cast*>(_impl->addressof()); } } - throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; + OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name()); } /** @@ -612,7 +787,7 @@ public: return *static_cast*>(_impl->addressof()); } } - throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; + OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name()); } /** @@ -634,7 +809,7 @@ public: return *static_cast*>(_impl->addressof()); } } - throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; + OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name()); } /** @@ -656,7 +831,7 @@ public: return *static_cast*>(_impl->addressof()); } } - throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; + OPENVINO_UNREACHABLE("Bad cast from: ", _impl->type_info().name(), " to: ", typeid(T).name()); } /** @@ -666,13 +841,17 @@ public: */ template typename std::enable_if::value, T>::type& as() & { - impl_check(); - if (_impl->is(typeid(decay_t))) { - return *static_cast*>(_impl->addressof()); + if (_impl != nullptr) { + if (_impl->is(typeid(decay_t))) { + return *static_cast*>(_impl->addressof()); + } else { + std::stringstream strm; + print(strm); + _str = strm.str(); + return _str; + } } else { - std::stringstream strm; - print(strm); - _str = strm.str(); + _str = {}; return _str; } } @@ -684,13 +863,17 @@ public: */ template const typename std::enable_if::value, T>::type& as() const& { - impl_check(); - if (_impl->is(typeid(decay_t))) { - return *static_cast*>(_impl->addressof()); + if (_impl != nullptr) { + if (_impl->is(typeid(decay_t))) { + return *static_cast*>(_impl->addressof()); + } else { + std::stringstream strm; + print(strm); + _str = strm.str(); + return _str; + } } else { - std::stringstream strm; - print(strm); - _str = strm.str(); + _str = {}; return _str; } } @@ -805,6 +988,7 @@ public: } }; +/** @cond INTERNAL */ namespace util { template <> struct AsTypePtr { @@ -819,6 +1003,7 @@ struct AsTypePtr { } }; } // namespace util +/** @endcond */ using AnyMap = std::map; diff --git a/src/core/include/openvino/core/type/element_type.hpp b/src/core/include/openvino/core/type/element_type.hpp index e1bef5d022e..9de272a7300 100644 --- a/src/core/include/openvino/core/type/element_type.hpp +++ b/src/core/include/openvino/core/type/element_type.hpp @@ -166,6 +166,9 @@ OPENVINO_API Type fundamental_type_for(const Type& type); OPENVINO_API std::ostream& operator<<(std::ostream& out, const ov::element::Type& obj); + +OPENVINO_API +std::istream& operator>>(std::istream& out, ov::element::Type& obj); } // namespace element template <> diff --git a/src/core/src/any.cpp b/src/core/src/any.cpp index 59007c1c013..07cd26f5196 100644 --- a/src/core/src/any.cpp +++ b/src/core/src/any.cpp @@ -121,7 +121,9 @@ const Any::Base* Any::operator->() const { return _impl.get(); } -void Any::read_impl(std::istream& is, bool& value) { +namespace util { + +void Read::operator()(std::istream& is, bool& value) const { std::string str; is >> str; if (str == "YES") { @@ -136,33 +138,33 @@ void Any::read_impl(std::istream& is, bool& value) { template static auto stream_to(std::istream& is, F&& f) -> decltype(f(std::declval())) { std::string str; - is >> str; + Read{}(is, str); try { return f(str); } catch (std::exception& e) { OPENVINO_UNREACHABLE(std::string{"Could not convert to: "} + - typeid(decltype(f(std::declval()))).name() + " from string " + str + - ": " + e.what()); + typeid(decltype(f(std::declval()))).name() + " from string \"" + str + + "\": " + e.what()); } } -void Any::read_impl(std::istream& is, int& value) { +void Read::operator()(std::istream& is, int& value) const { value = stream_to(is, [](const std::string& str) { return std::stoi(str); }); } -void Any::read_impl(std::istream& is, long& value) { +void Read::operator()(std::istream& is, long& value) const { value = stream_to(is, [](const std::string& str) { return std::stol(str); }); } -void Any::read_impl(std::istream& is, long long& value) { +void Read::operator()(std::istream& is, long long& value) const { value = stream_to(is, [](const std::string& str) { return std::stoll(str); }); } -void Any::read_impl(std::istream& is, unsigned& value) { +void Read::operator()(std::istream& is, unsigned& value) const { value = stream_to(is, [](const std::string& str) { auto ul = std::stoul(str); if (ul > std::numeric_limits::max()) { @@ -171,34 +173,70 @@ void Any::read_impl(std::istream& is, unsigned& value) { return static_cast(ul); }); } -void Any::read_impl(std::istream& is, unsigned long& value) { +void Read::operator()(std::istream& is, unsigned long& value) const { value = stream_to(is, [](const std::string& str) { return std::stoul(str); }); } -void Any::read_impl(std::istream& is, unsigned long long& value) { +void Read::operator()(std::istream& is, unsigned long long& value) const { value = stream_to(is, [](const std::string& str) { return std::stoull(str); }); } -void Any::read_impl(std::istream& is, float& value) { +void Read::operator()(std::istream& is, float& value) const { value = stream_to(is, [](const std::string& str) { return std::stof(str); }); } -void Any::read_impl(std::istream& is, double& value) { +void Read::operator()(std::istream& is, double& value) const { value = stream_to(is, [](const std::string& str) { return std::stod(str); }); } -void Any::read_impl(std::istream& is, long double& value) { +void Read::operator()(std::istream& is, long double& value) const { value = stream_to(is, [](const std::string& str) { return std::stold(str); }); } -void Any::print_impl(std::ostream& os, const bool& b) { +void Read>::operator()( + std::istream& is, + std::tuple& tuple) const { + Read{}(is, std::get<0>(tuple)); + Read{}(is, std::get<1>(tuple)); + Read{}(is, std::get<2>(tuple)); +} + +void Read>::operator()(std::istream& is, + std::tuple& tuple) const { + Read{}(is, std::get<0>(tuple)); + Read{}(is, std::get<1>(tuple)); +} + +void Read::operator()(std::istream& is, Any& any) const { + any.read(is); +} + +void Write::operator()(std::ostream& os, const bool& b) const { os << (b ? "YES" : "NO"); } + +void Write>::operator()( + std::ostream& os, + const std::tuple& tuple) const { + os << std::get<0>(tuple) << " " << std::get<1>(tuple) << " " << std::get<2>(tuple); +} + +void Write>::operator()( + std::ostream& os, + const std::tuple& tuple) const { + os << std::get<0>(tuple) << " " << std::get<1>(tuple); +} + +void Write::operator()(std::ostream& os, const Any& any) const { + any.print(os); +} + +} // namespace util } // namespace ov diff --git a/src/core/src/type/element_type.cpp b/src/core/src/type/element_type.cpp index 2ae0671d2a6..fa8e51a3dbe 100644 --- a/src/core/src/type/element_type.cpp +++ b/src/core/src/type/element_type.cpp @@ -240,6 +240,41 @@ std::ostream& ov::element::operator<<(std::ostream& out, const ov::element::Type return out << obj.get_type_name(); } +std::istream& ov::element::operator>>(std::istream& in, ov::element::Type& obj) { + static const std::unordered_map legacy = { + {"BOOL", ov::element::boolean}, + {"BF16", ov::element::bf16}, + {"I4", ov::element::i4}, + {"I8", ov::element::i8}, + {"I16", ov::element::i16}, + {"I32", ov::element::i32}, + {"I64", ov::element::i64}, + {"U4", ov::element::u4}, + {"U8", ov::element::u8}, + {"U16", ov::element::u16}, + {"U32", ov::element::u32}, + {"U64", ov::element::u64}, + {"FP32", ov::element::f32}, + {"FP64", ov::element::f64}, + {"FP16", ov::element::f16}, + {"BIN", ov::element::u1}, + }; + std::string str; + in >> str; + auto it_legacy = legacy.find(str); + if (it_legacy != legacy.end()) { + obj = it_legacy->second; + return in; + } + for (auto&& type : Type::get_known_types()) { + if (type->get_type_name() == str) { + obj = *type; + break; + } + } + return in; +} + bool ov::element::Type::compatible(const ov::element::Type& t) const { return (is_dynamic() || t.is_dynamic() || *this == t); } diff --git a/src/core/tests/any.cpp b/src/core/tests/any.cpp index 0058311fd76..756fa426a88 100644 --- a/src/core/tests/any.cpp +++ b/src/core/tests/any.cpp @@ -298,7 +298,7 @@ void PrintTo(const Any& object, std::ostream* stream) { object.print(*stream); } -TEST_F(AnyTests, PrintToEmptyAnyDoesNothing) { +TEST_F(AnyTests, PrintToEmpty) { Any p; std::stringstream stream; ASSERT_NO_THROW(p.print(stream)); @@ -354,49 +354,51 @@ TEST_F(AnyTests, PrintToStringAny) { ASSERT_EQ(stream.str(), value); } -TEST_F(AnyTests, PrintToVectorOfIntsAnyDoesNothing) { +TEST_F(AnyTests, PrintToVectorOfInts) { Any p = std::vector{-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; std::stringstream stream; ASSERT_NO_THROW(p.print(stream)); - ASSERT_EQ(stream.str(), std::string{}); + ASSERT_EQ(stream.str(), std::string{"-5 -4 -3 -2 -1 0 1 2 3 4 5"}); } -TEST_F(AnyTests, PrintToVectorOfUIntsAnyDoesNothing) { +TEST_F(AnyTests, PrintToVectorOfUInts) { Any p = std::vector{0, 1, 2, 3, 4, 5}; std::stringstream stream; ASSERT_NO_THROW(p.print(stream)); - ASSERT_EQ(stream.str(), std::string{}); + ASSERT_EQ(stream.str(), std::string{"0 1 2 3 4 5"}); } -TEST_F(AnyTests, PrintToVectorOfSize_tAnyDoesNothing) { - Any p = std::vector{0, 1, 2, 3, 4, 5}; +TEST_F(AnyTests, PrintToVectorOfFloats) { + auto ref_vec = std::vector{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f}; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); - ASSERT_EQ(stream.str(), std::string{}); + { + Any p = std::vector{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f}; + ASSERT_NO_THROW(p.print(stream)); + ASSERT_EQ(stream.str(), std::string{"0 1.1 2.2 3.3 4.4 5.5"}); + } + { + Any p = stream.str(); + ASSERT_EQ((p.as>()), ref_vec); + } } -TEST_F(AnyTests, PrintToVectorOfFloatsAnyDoesNothing) { - Any p = std::vector{0.0f, 1.1f, 2.2f, 3.3f, 4.4f, 5.5f}; - std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); - ASSERT_EQ(stream.str(), std::string{}); -} - -TEST_F(AnyTests, PrintToVectorOfStringsAnyDoesNothing) { +TEST_F(AnyTests, PrintToVectorOfStrings) { Any p = std::vector{"zero", "one", "two", "three", "four", "five"}; std::stringstream stream; ASSERT_NO_THROW(p.print(stream)); - ASSERT_EQ(stream.str(), std::string{}); + ASSERT_EQ(stream.str(), std::string{"zero one two three four five"}); } -TEST_F(AnyTests, PrintToMapOfAnysDoesNothing) { +TEST_F(AnyTests, PrintToMapOfAnys) { std::map refMap; refMap["testParamInt"] = 4; refMap["testParamString"] = "test"; - Any p = refMap; std::stringstream stream; - ASSERT_NO_THROW(p.print(stream)); - ASSERT_EQ(stream.str(), std::string{}); + { + Any p = refMap; + ASSERT_NO_THROW(p.print(stream)); + ASSERT_EQ(stream.str(), std::string{"testParamInt 4 testParamString test"}); + } } TEST_F(AnyTests, constructFromVariantImpl) { diff --git a/src/inference/dev_api/cpp_interfaces/interface/ie_internal_plugin_config.hpp b/src/inference/dev_api/cpp_interfaces/interface/ie_internal_plugin_config.hpp index b6cfe79f82e..1e2d0ce4355 100644 --- a/src/inference/dev_api/cpp_interfaces/interface/ie_internal_plugin_config.hpp +++ b/src/inference/dev_api/cpp_interfaces/interface/ie_internal_plugin_config.hpp @@ -14,27 +14,6 @@ namespace InferenceEngine { -/** - * @brief A namespace with helper functions - * @ingroup ie_dev_api_plugin_api - */ -namespace util { -template -inline T string_to_property(const std::string& val, const ov::util::BaseProperty& property) { - std::stringstream ss(val); - T value; - ss >> value; - return value; -} - -template -inline std::string property_to_string(const T& property) { - std::stringstream ss; - ss << property; - return ss.str(); -} -} // namespace util - /** * @brief A namespace with internal plugin configuration keys * @ingroup ie_dev_api_plugin_api diff --git a/src/inference/include/openvino/runtime/common.hpp b/src/inference/include/openvino/runtime/common.hpp index 9c202315262..772d1ed7863 100644 --- a/src/inference/include/openvino/runtime/common.hpp +++ b/src/inference/include/openvino/runtime/common.hpp @@ -52,22 +52,3 @@ namespace ie = InferenceEngine; using SupportedOpsMap = std::map; } // namespace ov - -namespace std { -inline ostream& operator<<(ostream& os, const map& m) { - for (auto&& it : m) { - os << it.first << " " << it.second << " "; - } - return os; -} - -inline istream& operator>>(istream& is, map& m) { - m.clear(); - string key; - float value; - while (is >> key >> value) { - m.emplace(key, value); - } - return is; -} -} // namespace std diff --git a/src/inference/include/openvino/runtime/compiled_model.hpp b/src/inference/include/openvino/runtime/compiled_model.hpp index 3e0cbd9c410..5d81f69242c 100644 --- a/src/inference/include/openvino/runtime/compiled_model.hpp +++ b/src/inference/include/openvino/runtime/compiled_model.hpp @@ -50,8 +50,6 @@ class OPENVINO_RUNTIME_API CompiledModel { friend class ov::Core; friend class ov::InferRequest; - void get_property(const std::string& name, ov::Any& to) const; - public: /** * @brief Default constructor. @@ -202,9 +200,7 @@ public: */ template T get_property(const ov::Property& property) const { - auto to = Any::make(); - get_property(property.name(), to); - return to.template as(); + return get_property(property.name()).template as(); } /** diff --git a/src/inference/include/openvino/runtime/core.hpp b/src/inference/include/openvino/runtime/core.hpp index e78554e1110..90628bb49da 100644 --- a/src/inference/include/openvino/runtime/core.hpp +++ b/src/inference/include/openvino/runtime/core.hpp @@ -42,8 +42,6 @@ class OPENVINO_RUNTIME_API Core { class Impl; std::shared_ptr _impl; - void get_property(const std::string& device_name, const std::string& name, const AnyMap& arguments, Any& to) const; - public: /** @brief Constructs an OpenVINO Core instance using the XML configuration file with * devices and their plugins description. @@ -531,9 +529,7 @@ public: */ template T get_property(const std::string& deviceName, const ov::Property& property) const { - auto to = Any::make(); - get_property(deviceName, property.name(), {}, to); - return to.template as(); + return get_property(deviceName, property.name(), {}).template as(); } /** @@ -551,9 +547,7 @@ public: */ template T get_property(const std::string& deviceName, const ov::Property& property, const AnyMap& arguments) const { - auto to = Any::make(); - get_property(deviceName, property.name(), arguments, to); - return to.template as(); + return get_property(deviceName, property.name(), arguments).template as(); } /** @@ -574,9 +568,7 @@ public: util::EnableIfAllStringAny get_property(const std::string& deviceName, const ov::Property& property, Args&&... args) const { - auto to = Any::make(); - get_property(deviceName, property.name(), AnyMap{std::forward(args)...}, to); - return to.template as(); + return get_property(deviceName, property.name(), AnyMap{std::forward(args)...}).template as(); } /** diff --git a/src/inference/include/openvino/runtime/properties.hpp b/src/inference/include/openvino/runtime/properties.hpp index cc3bcc613c0..7908c4ccb53 100644 --- a/src/inference/include/openvino/runtime/properties.hpp +++ b/src/inference/include/openvino/runtime/properties.hpp @@ -134,7 +134,45 @@ inline std::ostream& operator<<(std::ostream& os, const BaseProperty& prop * @tparam T type of value used to set or get property */ template -struct Property : public util::BaseProperty { +class Property : public util::BaseProperty { + template + struct Forward { + template ::value && + std::is_convertible::value, + bool>::type = true> + explicit operator U() { + return value; + } + + template ::value && + !std::is_convertible::value, + bool>::type = true> + explicit operator U() { + return Any{value}.as(); + } + + template ::value && + std::is_convertible::value, + bool>::type = true> + explicit operator U() { + return Any{value}.as(); + } + + template ::value && + !std::is_convertible::value, + bool>::type = true> + explicit operator U() { + return value; + } + + V&& value; + }; + +public: using util::BaseProperty::BaseProperty; /** * @brief Constructs property @@ -144,7 +182,7 @@ struct Property : public util::BaseProperty { */ template inline std::pair operator()(Args&&... args) const { - return {this->name(), Any::make(std::forward(args)...)}; + return {this->name(), Any::make(Forward{std::forward(args)}...)}; } }; @@ -590,23 +628,69 @@ constexpr static const auto EXPORT_IMPORT = "EXPORT_IMPORT"; //!< Device suppor } // namespace capability } // namespace device -namespace streams { /** - * @brief Special value for ov::execution::streams::num property. - * Creates bare minimum of streams to improve the performance + * @brief Class to represent number of streams in streams executor */ -static constexpr const int32_t AUTO = -1; -/** - * @brief Special value for ov::execution::streams::num property. - * Creates as many streams as needed to accommodate NUMA and avoid associated penalties - */ -static constexpr const int32_t NUMA = -2; +struct NumStreams { + using Base = std::tuple; //!< NumStreams is representable as int32_t + + /** + * @brief Special value for ov::execution::num_streams property. + */ + enum Special { + AUTO = -1, //!< Creates bare minimum of streams to improve the performance + NUMA = -2, //!< Creates as many streams as needed to accommodate NUMA and avoid associated penalties + }; + + NumStreams() : num{AUTO} {}; + + NumStreams(const int32_t num_) : num{num_} {} + + operator int32_t() { + return num; + } + + operator int32_t() const { + return num; + } + + int32_t num = 0; +}; + +/** @cond INTERNAL */ +inline std::ostream& operator<<(std::ostream& os, const NumStreams& num_streams) { + switch (num_streams.num) { + case NumStreams::AUTO: + return os << "AUTO"; + case NumStreams::NUMA: + return os << "NUMA"; + default: + return os << num_streams.num; + } +} + +inline std::istream& operator>>(std::istream& is, NumStreams& num_streams) { + std::string str; + is >> str; + if (str == "AUTO") { + num_streams = {NumStreams::AUTO}; + } else if (str == "NUMA") { + num_streams = {NumStreams::NUMA}; + } else { + try { + num_streams = {std::stoi(str)}; + } catch (const std::exception& e) { + throw ov::Exception{std::string{"Could not read number of streams from str: "} + str + "; " + e.what()}; + } + } + return is; +} +/** @endcond */ /** * @brief The number of executor logical partitions */ -static constexpr Property num{"NUM_STREAMS"}; -} // namespace streams +static constexpr Property num_streams{"NUM_STREAMS"}; /** * @brief Maximum number of threads that can be used for inference tasks diff --git a/src/inference/src/any_copy.cpp b/src/inference/src/any_copy.cpp index 17f1c94bbc6..d379ae79b2a 100644 --- a/src/inference/src/any_copy.cpp +++ b/src/inference/src/any_copy.cpp @@ -11,50 +11,10 @@ namespace ov { std::map any_copy(const ov::AnyMap& params) { - std::function to_config_string = [&](const Any& any) -> std::string { - if (any.is()) { - return any.as() ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO); - } else if (any.is()) { - std::stringstream strm; - for (auto&& val : any.as()) { - strm << val.first << " " << to_config_string(val.second) << " "; - } - return strm.str(); - } else { - std::stringstream strm; - any.print(strm); - return strm.str(); - } - }; std::map result; for (auto&& value : params) { - result.emplace(value.first, to_config_string(value.second)); + result.emplace(value.first, value.second.as()); } return result; } - -void any_lexical_cast(const ov::Any& from, ov::Any& to) { - if (!from.is()) { - to = from; - } else { - auto str = from.as(); - if (to.is()) { - to = from; - } else if (to.is()) { - if (str == CONFIG_VALUE(YES)) { - to = true; - } else if (str == CONFIG_VALUE(NO)) { - to = false; - } else { - OPENVINO_UNREACHABLE("Unsupported lexical cast to bool from: ", str); - } - } else { - std::stringstream strm(str); - to.read(strm); - if (strm.fail()) { - OPENVINO_UNREACHABLE("Unsupported lexical cast to ", to.type_info().name(), " from: ", str); - } - } - } -} } // namespace ov diff --git a/src/inference/src/any_copy.hpp b/src/inference/src/any_copy.hpp index 3b6c2d7042a..3517c0bf088 100644 --- a/src/inference/src/any_copy.hpp +++ b/src/inference/src/any_copy.hpp @@ -15,7 +15,4 @@ namespace ov { std::map any_copy(const ov::AnyMap& config_map); - -void any_lexical_cast(const Any& any, ov::Any& to); - } // namespace ov diff --git a/src/inference/src/cpp/ie_executable_network.cpp b/src/inference/src/cpp/ie_executable_network.cpp index 6368852116e..0e411043129 100644 --- a/src/inference/src/cpp/ie_executable_network.cpp +++ b/src/inference/src/cpp/ie_executable_network.cpp @@ -250,10 +250,6 @@ Any CompiledModel::get_property(const std::string& name) const { }); } -void CompiledModel::get_property(const std::string& name, Any& to) const { - any_lexical_cast(get_property(name), to); -} - RemoteContext CompiledModel::get_context() const { OV_EXEC_NET_CALL_STATEMENT(return {_impl->GetContext(), _so}); } diff --git a/src/inference/src/cpp/ie_plugin.hpp b/src/inference/src/cpp/ie_plugin.hpp index 7417ff3f8fd..f2fbe04ace6 100644 --- a/src/inference/src/cpp/ie_plugin.hpp +++ b/src/inference/src/cpp/ie_plugin.hpp @@ -272,21 +272,12 @@ public: template T get_property(const ov::Property& property) const { - auto to = Any::make(); - get_property(property.name(), {}, to); - return to.template as(); + return get_property(property.name(), {}).template as(); } template T get_property(const ov::Property& property, const AnyMap& arguments) const { - auto to = Any::make(); - get_property(property.name(), arguments, to); - return to.template as(); - } - -private: - void get_property(const std::string& name, const AnyMap& arguments, Any& to) const { - any_lexical_cast(get_property(name, arguments), to); + return get_property(property.name(), arguments).template as(); } }; diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp index a970fdefceb..762e4c256b6 100644 --- a/src/inference/src/ie_core.cpp +++ b/src/inference/src/ie_core.cpp @@ -1094,12 +1094,13 @@ public: for (auto&& config : configs) { auto parsed = parseDeviceNameIntoConfig(config.first); if (deviceName.find(parsed._deviceName) != std::string::npos) { - std::string key, value; std::stringstream strm(config.second); - while (strm >> key >> value) { + std::map device_configs; + util::Read>{}(strm, device_configs); + for (auto&& device_config : device_configs) { if (supportedConfigKeys.end() != - std::find(supportedConfigKeys.begin(), supportedConfigKeys.end(), key)) { - supportedConfig[key] = value; + std::find(supportedConfigKeys.begin(), supportedConfigKeys.end(), device_config.first)) { + supportedConfig[device_config.first] = device_config.second; } } for (auto&& config : parsed._config) { @@ -1757,13 +1758,6 @@ Any Core::get_property(const std::string& deviceName, const std::string& name, c }); } -void Core::get_property(const std::string& deviceName, - const std::string& name, - const AnyMap& arguments, - ov::Any& to) const { - any_lexical_cast(get_property(deviceName, name, arguments), to); -} - std::vector Core::get_available_devices() const { OV_CORE_CALL_STATEMENT(return _impl->GetAvailableDevices();); } diff --git a/src/inference/src/threading/ie_istreams_executor.cpp b/src/inference/src/threading/ie_istreams_executor.cpp index ca6bfe27ebf..7bd8067809c 100644 --- a/src/inference/src/threading/ie_istreams_executor.cpp +++ b/src/inference/src/threading/ie_istreams_executor.cpp @@ -26,7 +26,7 @@ std::vector IStreamsExecutor::Config::SupportedKeys() const { CONFIG_KEY(CPU_BIND_THREAD), CONFIG_KEY(CPU_THREADS_NUM), CONFIG_KEY_INTERNAL(CPU_THREADS_PER_STREAM), - ov::streams::num.name(), + ov::num_streams.name(), ov::inference_num_threads.name(), ov::affinity.name(), }; @@ -107,20 +107,22 @@ void IStreamsExecutor::Config::SetConfig(const std::string& key, const std::stri } _streams = val_i; } - } else if (key == ov::streams::num) { - int32_t streams = std::stoi(value); - if (streams == ov::streams::NUMA) { + } else if (key == ov::num_streams) { + ov::NumStreams streams; + std::stringstream strm{value}; + strm >> streams; + if (streams.num == ov::NumStreams::NUMA) { _streams = static_cast(getAvailableNUMANodes().size()); - } else if (streams == ov::streams::AUTO) { + } else if (streams.num == ov::NumStreams::AUTO) { // bare minimum of streams (that evenly divides available number of cores) _streams = GetDefaultNumStreams(); - } else if (streams >= 0) { - _streams = streams; + } else if (streams.num >= 0) { + _streams = streams.num; } else { OPENVINO_UNREACHABLE("Wrong value for property key ", - ov::streams::num.name(), + ov::num_streams.name(), ". Expected non negative numbers (#streams) or ", - "ov::streams::NUMA|ov::streams::AUTO, Got: ", + "ov::NumStreams::NUMA|ov::NumStreams::AUTO, Got: ", streams); } } else if (key == CONFIG_KEY(CPU_THREADS_NUM) || key == ov::inference_num_threads) { @@ -179,8 +181,8 @@ Parameter IStreamsExecutor::Config::GetConfig(const std::string& key) const { } } else if (key == CONFIG_KEY(CPU_THROUGHPUT_STREAMS)) { return {std::to_string(_streams)}; - } else if (key == ov::streams::num) { - return decltype(ov::streams::num)::value_type{_streams}; + } else if (key == ov::num_streams) { + return decltype(ov::num_streams)::value_type{_streams}; } else if (key == CONFIG_KEY(CPU_THREADS_NUM)) { return {std::to_string(_threads)}; } else if (key == ov::inference_num_threads) { diff --git a/src/plugins/intel_cpu/src/mkldnn_exec_network.cpp b/src/plugins/intel_cpu/src/mkldnn_exec_network.cpp index bfa634dc234..81f5b776b6e 100644 --- a/src/plugins/intel_cpu/src/mkldnn_exec_network.cpp +++ b/src/plugins/intel_cpu/src/mkldnn_exec_network.cpp @@ -288,7 +288,7 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name) RO_property(ov::supported_properties.name()), RO_property(ov::model_name.name()), RO_property(ov::optimal_number_of_infer_requests.name()), - RO_property(ov::streams::num.name()), + RO_property(ov::num_streams.name()), RO_property(ov::affinity.name()), RO_property(ov::inference_num_threads.name()), RO_property(ov::enable_profiling.name()), @@ -304,9 +304,9 @@ InferenceEngine::Parameter MKLDNNExecNetwork::GetMetric(const std::string &name) } else if (name == ov::optimal_number_of_infer_requests) { const auto streams = config.streamExecutorConfig._streams; return static_cast(streams); // ov::optimal_number_of_infer_requests has no negative values - } else if (name == ov::streams::num) { + } else if (name == ov::num_streams) { const auto streams = config.streamExecutorConfig._streams; - return static_cast(streams); // ov::streams::num has special negative values (AUTO = -1, NUMA = -2) + return static_cast(streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2) } else if (name == ov::affinity) { const auto affinity = config.streamExecutorConfig._threadBindingType; switch (affinity) { diff --git a/src/plugins/intel_cpu/src/mkldnn_plugin.cpp b/src/plugins/intel_cpu/src/mkldnn_plugin.cpp index 342aa422f1f..935b6bd4f99 100644 --- a/src/plugins/intel_cpu/src/mkldnn_plugin.cpp +++ b/src/plugins/intel_cpu/src/mkldnn_plugin.cpp @@ -717,10 +717,10 @@ Parameter Engine::GetConfig(const std::string& name, const std::map(streams); // ov::optimal_number_of_infer_requests has no negative values - } else if (name == ov::streams::num) { + return decltype(ov::optimal_number_of_infer_requests)::value_type(streams); // ov::optimal_number_of_infer_requests has no negative values + } else if (name == ov::num_streams) { const auto streams = engConfig.streamExecutorConfig._streams; - return static_cast(streams); // ov::streams::num has special negative values (AUTO = -1, NUMA = -2) + return decltype(ov::num_streams)::value_type(streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2) } else if (name == ov::affinity) { const auto affinity = engConfig.streamExecutorConfig._threadBindingType; switch (affinity) { @@ -736,19 +736,20 @@ Parameter Engine::GetConfig(const std::string& name, const std::map(); } else if (name == ov::hint::num_requests) { const auto perfHintNumRequests = engConfig.perfHintsConfig.ovPerfHintNumRequests; - return perfHintNumRequests; + return decltype(ov::hint::num_requests)::value_type(perfHintNumRequests); } /* Internally legacy parameters are used with new API as part of migration procedure. * This fallback can be removed as soon as migration completed */ @@ -836,7 +837,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map rwProperties {RW_property(ov::streams::num.name()), + std::vector rwProperties {RW_property(ov::num_streams.name()), RW_property(ov::affinity.name()), RW_property(ov::inference_num_threads.name()), RW_property(ov::enable_profiling.name()), diff --git a/src/plugins/intel_gna/gna_plugin_config.cpp b/src/plugins/intel_gna/gna_plugin_config.cpp index 56d08e0ae1f..630a835fdcf 100644 --- a/src/plugins/intel_gna/gna_plugin_config.cpp +++ b/src/plugins/intel_gna/gna_plugin_config.cpp @@ -12,6 +12,7 @@ #include "ie_common.h" #include #include +#include using namespace InferenceEngine; using namespace InferenceEngine::details; @@ -43,19 +44,6 @@ static const std::set supportedTargets = { "" }; -inline std::istream& operator>>(std::istream& is, ov::element::Type& p) { - std::string str; - is >> str; - if ((str == "i8") || (str == "I8")) { - p = ov::element::i8; - } else if ((str == "i16") || (str == "I16")) { - p = ov::element::i16; - } else { - throw ov::Exception{"Unsupported precision: " + str}; - } - return is; -} - void Config::UpdateFromMap(const std::map& config) { for (auto&& item : config) { auto key = item.first; @@ -109,7 +97,7 @@ void Config::UpdateFromMap(const std::map& config) { }; if (key == ov::intel_gna::scale_factors_per_input) { - inputScaleFactorsPerInput = InferenceEngine::util::string_to_property(value, ov::intel_gna::scale_factors_per_input); + inputScaleFactorsPerInput = ov::util::from_string(value, ov::intel_gna::scale_factors_per_input); for (auto&& sf : inputScaleFactorsPerInput) { check_scale_factor(sf.second); } @@ -162,7 +150,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START } OPENVINO_SUPPRESS_DEPRECATED_END } else if (key == ov::intel_gna::execution_target || key == ov::intel_gna::compile_target) { - auto target = InferenceEngine::util::string_to_property(value, ov::intel_gna::execution_target); + auto target = ov::util::from_string(value, ov::intel_gna::execution_target); std::string target_str = ""; if (ov::intel_gna::HWGeneration::GNA_2_0 == target) { target_str = GNAConfigParams::GNA_TARGET_2_0; @@ -195,7 +183,7 @@ OPENVINO_SUPPRESS_DEPRECATED_END THROW_GNA_EXCEPTION << "EXCLUSIVE_ASYNC_REQUESTS should be YES/NO, but not" << value; } } else if (key == ov::hint::performance_mode) { - performance_mode = InferenceEngine::util::string_to_property(value, ov::hint::performance_mode); + performance_mode = ov::util::from_string(value, ov::hint::performance_mode); } else if (key == ov::hint::inference_precision) { std::stringstream ss(value); ss >> inference_precision; @@ -214,7 +202,7 @@ OPENVINO_SUPPRESS_DEPRECATED_END } gnaPrecision = precision; } else if (key == ov::intel_gna::pwl_design_algorithm) { - gnaFlags.pwl_design_algorithm = InferenceEngine::util::string_to_property(value, ov::intel_gna::pwl_design_algorithm); + gnaFlags.pwl_design_algorithm = ov::util::from_string(value, ov::intel_gna::pwl_design_algorithm); gnaFlags.uniformPwlDesign = (gnaFlags.pwl_design_algorithm == ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION) ? true : false; OPENVINO_SUPPRESS_DEPRECATED_START } else if (key == GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)) { @@ -289,7 +277,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_END } else if (key == CONFIG_KEY(LOG_LEVEL) || key == ov::log::level) { if (value == PluginConfigParams::LOG_WARNING || value == PluginConfigParams::LOG_NONE) { - gnaFlags.log_level = InferenceEngine::util::string_to_property(value, ov::log::level); + gnaFlags.log_level = ov::util::from_string(value, ov::log::level); } else { log << "Currently only LOG_LEVEL = LOG_WARNING and LOG_NONE are supported, not " << value; THROW_GNA_EXCEPTION << "Currently only LOG_LEVEL = LOG_WARNING and LOG_NONE are supported, not " << value; @@ -318,7 +306,7 @@ void Config::AdjustKeyMapValues() { if (!inputScaleFactorsPerInput.empty()) { keyConfigMap[ov::intel_gna::scale_factors_per_input.name()] = - InferenceEngine::util::property_to_string(inputScaleFactorsPerInput); + ov::util::to_string(inputScaleFactorsPerInput); } else { if (inputScaleFactors.empty()) { inputScaleFactors.push_back(1.0); @@ -335,12 +323,12 @@ void Config::AdjustKeyMapValues() { IE_SUPPRESS_DEPRECATED_END std::string device_mode; if (gnaFlags.sw_fp32) { - device_mode = InferenceEngine::util::property_to_string(ov::intel_gna::ExecutionMode::SW_FP32); + device_mode = ov::util::to_string(ov::intel_gna::ExecutionMode::SW_FP32); } else { for (auto&& value : supported_values) { if (value.second.first == pluginGna2AccMode && value.second.second == swExactMode) { - device_mode = InferenceEngine::util::property_to_string(value.first); + device_mode = ov::util::to_string(value.first); break; } } @@ -353,16 +341,16 @@ void Config::AdjustKeyMapValues() { gnaFlags.compact_mode ? PluginConfigParams::YES : PluginConfigParams::NO; keyConfigMap[CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)] = gnaFlags.exclusive_async_requests ? PluginConfigParams::YES: PluginConfigParams::NO; - keyConfigMap[ov::hint::performance_mode.name()] = InferenceEngine::util::property_to_string(performance_mode); + keyConfigMap[ov::hint::performance_mode.name()] = ov::util::to_string(performance_mode); if (inference_precision != ov::element::undefined) { - keyConfigMap[ov::hint::inference_precision.name()] = InferenceEngine::util::property_to_string(inference_precision); + keyConfigMap[ov::hint::inference_precision.name()] = ov::util::to_string(inference_precision); } else { keyConfigMap[GNA_CONFIG_KEY(PRECISION)] = gnaPrecision.name(); } OPENVINO_SUPPRESS_DEPRECATED_START if (gnaFlags.pwl_design_algorithm != ov::intel_gna::PWLDesignAlgorithm::UNDEFINED) { keyConfigMap[ov::intel_gna::pwl_design_algorithm.name()] = - InferenceEngine::util::property_to_string(gnaFlags.pwl_design_algorithm); + ov::util::to_string(gnaFlags.pwl_design_algorithm); } else { keyConfigMap[GNA_CONFIG_KEY(PWL_UNIFORM_DESIGN)] = gnaFlags.uniformPwlDesign ? PluginConfigParams::YES: PluginConfigParams::NO; @@ -375,7 +363,7 @@ OPENVINO_SUPPRESS_DEPRECATED_START OPENVINO_SUPPRESS_DEPRECATED_END keyConfigMap[ov::enable_profiling.name()] = gnaFlags.performance_counting ? PluginConfigParams::YES: PluginConfigParams::NO; - keyConfigMap[ov::log::level.name()] = InferenceEngine::util::property_to_string(gnaFlags.log_level); + keyConfigMap[ov::log::level.name()] = ov::util::to_string(gnaFlags.log_level); } Parameter Config::GetParameter(const std::string& name) const { diff --git a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp index 3b3789df4d4..552f54d5104 100644 --- a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp +++ b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp @@ -24,6 +24,8 @@ #include #include +#include + using namespace InferenceEngine; using namespace InferenceEngine::details; @@ -146,27 +148,27 @@ InferenceEngine::Parameter CompiledModel::GetConfig(const std::string &name) con if (name == ov::enable_profiling) { return val == PluginConfigParams::YES ? true : false; } else if (name == ov::hint::model_priority) { - return InferenceEngine::util::string_to_property(val, ov::hint::model_priority); + return ov::util::from_string(val, ov::hint::model_priority); } else if (name == ov::intel_gpu::hint::host_task_priority) { - return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::host_task_priority); + return ov::util::from_string(val, ov::intel_gpu::hint::host_task_priority); } else if (name == ov::intel_gpu::hint::queue_priority) { - return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::queue_priority); + return ov::util::from_string(val, ov::intel_gpu::hint::queue_priority); } else if (name == ov::intel_gpu::hint::queue_throttle) { - return InferenceEngine::util::string_to_property(val, ov::intel_gpu::hint::queue_throttle); + return ov::util::from_string(val, ov::intel_gpu::hint::queue_throttle); } else if (name == ov::intel_gpu::enable_loop_unrolling) { return val == PluginConfigParams::YES ? true : false; } else if (name == ov::cache_dir) { - return InferenceEngine::util::string_to_property(val, ov::cache_dir); + return ov::util::from_string(val, ov::cache_dir); } else if (name == ov::hint::performance_mode) { - return InferenceEngine::util::string_to_property(val, ov::hint::performance_mode); + return ov::util::from_string(val, ov::hint::performance_mode); } else if (name == ov::compilation_num_threads) { - return InferenceEngine::util::string_to_property(val, ov::compilation_num_threads); - } else if (name == ov::streams::num) { - return InferenceEngine::util::string_to_property(val, ov::streams::num); + return ov::util::from_string(val, ov::compilation_num_threads); + } else if (name == ov::num_streams) { + return ov::util::from_string(val, ov::num_streams); } else if (name == ov::hint::num_requests) { - return InferenceEngine::util::string_to_property(val, ov::hint::num_requests); + return ov::util::from_string(val, ov::hint::num_requests); } else if (name == ov::device::id) { - return InferenceEngine::util::string_to_property(val, ov::device::id); + return ov::util::from_string(val, ov::device::id); } else { return val; } @@ -200,7 +202,7 @@ InferenceEngine::Parameter CompiledModel::GetMetric(const std::string &name) con ov::PropertyName{ov::cache_dir.name(), PropertyMutability::RO}, ov::PropertyName{ov::hint::performance_mode.name(), PropertyMutability::RO}, ov::PropertyName{ov::compilation_num_threads.name(), PropertyMutability::RO}, - ov::PropertyName{ov::streams::num.name(), PropertyMutability::RO}, + ov::PropertyName{ov::num_streams.name(), PropertyMutability::RO}, ov::PropertyName{ov::hint::num_requests.name(), PropertyMutability::RO}, ov::PropertyName{ov::device::id.name(), PropertyMutability::RO} }; diff --git a/src/plugins/intel_gpu/src/plugin/device_config.cpp b/src/plugins/intel_gpu/src/plugin/device_config.cpp index 862559e1ebe..49ce89cd89a 100644 --- a/src/plugins/intel_gpu/src/plugin/device_config.cpp +++ b/src/plugins/intel_gpu/src/plugin/device_config.cpp @@ -2,27 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "intel_gpu/plugin/device_config.hpp" + +#include #include #include #include -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" -#include "ie_api.h" -#include "file_utils.h" -#include "intel_gpu/plugin/device_config.hpp" -#include "intel_gpu/plugin/itt.hpp" -#include "openvino/runtime/intel_gpu/properties.hpp" -#include #include +#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" +#include "file_utils.h" +#include "ie_api.h" +#include "intel_gpu/plugin/itt.hpp" +#include "openvino/runtime/intel_gpu/properties.hpp" +#include + #ifdef _WIN32 -# include -#ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -# define mkdir(dir, mode) _wmkdir(dir) -#else -# define mkdir(dir, mode) _mkdir(dir) -#endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT -#endif // _WIN32 +# include +# ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +# define mkdir(dir, mode) _wmkdir(dir) +# else +# define mkdir(dir, mode) _mkdir(dir) +# endif // OPENVINO_ENABLE_UNICODE_PATH_SUPPORT +#endif // _WIN32 using namespace InferenceEngine; @@ -67,8 +70,7 @@ void Config::UpdateFromMap(const std::map& configMap) const auto hints = perfHintsConfig.SupportedKeys(); if (hints.end() != std::find(hints.begin(), hints.end(), key)) { perfHintsConfig.SetConfig(key, val); - } else if (key.compare(PluginConfigParams::KEY_PERF_COUNT) == 0 || - key == ov::enable_profiling) { + } else if (key.compare(PluginConfigParams::KEY_PERF_COUNT) == 0 || key == ov::enable_profiling) { if (val.compare(PluginConfigParams::YES) == 0) { useProfiling = true; } else if (val.compare(PluginConfigParams::NO) == 0) { @@ -101,18 +103,18 @@ void Config::UpdateFromMap(const std::map& configMap) IE_THROW(NotFound) << "Unsupported property value by plugin: " << val; } switch (uVal) { - case 0: - case 2: - queuePriority = cldnn::priority_mode_types::med; - break; - case 1: - queuePriority = cldnn::priority_mode_types::low; - break; - case 3: - queuePriority = cldnn::priority_mode_types::high; - break; - default: - IE_THROW(ParameterMismatch) << "Unsupported queue priority value: " << uVal; + case 0: + case 2: + queuePriority = cldnn::priority_mode_types::med; + break; + case 1: + queuePriority = cldnn::priority_mode_types::low; + break; + case 3: + queuePriority = cldnn::priority_mode_types::high; + break; + default: + IE_THROW(ParameterMismatch) << "Unsupported queue priority value: " << uVal; } } else if (key == ov::intel_gpu::hint::queue_priority) { std::stringstream ss(val); @@ -124,33 +126,33 @@ void Config::UpdateFromMap(const std::map& configMap) queuePriority = cldnn::priority_mode_types::med; else queuePriority = cldnn::priority_mode_types::low; - } else if (key.compare(PluginConfigParams::KEY_MODEL_PRIORITY) == 0 || - key == ov::hint::model_priority) { + } else if (key.compare(PluginConfigParams::KEY_MODEL_PRIORITY) == 0 || key == ov::hint::model_priority) { if (val.compare(PluginConfigParams::MODEL_PRIORITY_HIGH) == 0 || - val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH)) == 0) { + val.compare(ov::util::to_string(ov::hint::Priority::HIGH)) == 0) { queuePriority = cldnn::priority_mode_types::high; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::BIG; } else if (val.compare(PluginConfigParams::MODEL_PRIORITY_MED) == 0 || - val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM)) == 0) { + val.compare(ov::util::to_string(ov::hint::Priority::MEDIUM)) == 0) { queuePriority = cldnn::priority_mode_types::med; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY; } else if (val.compare(PluginConfigParams::MODEL_PRIORITY_LOW) == 0 || - val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::LOW)) == 0) { + val.compare(ov::util::to_string(ov::hint::Priority::LOW)) == 0) { queuePriority = cldnn::priority_mode_types::low; task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::LITTLE; } else { - IE_THROW() << "Not found appropriate value for config key " << PluginConfigParams::KEY_MODEL_PRIORITY << ".\n"; + IE_THROW() << "Not found appropriate value for config key " << PluginConfigParams::KEY_MODEL_PRIORITY + << ".\n"; } if (getAvailableCoresTypes().size() > 1) { - if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG - || task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE) { - task_exec_config._streams = std::min(task_exec_config._streams, - getNumberOfCores(task_exec_config._threadPreferredCoreType)); + if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG || + task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE) { + task_exec_config._streams = std::min(task_exec_config._streams, + getNumberOfCores(task_exec_config._threadPreferredCoreType)); } } else { task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY; - task_exec_config._streams = std::min(task_exec_config._streams, - static_cast(std::thread::hardware_concurrency())); + task_exec_config._streams = + std::min(task_exec_config._streams, static_cast(std::thread::hardware_concurrency())); } } else if (key.compare(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) == 0 || key.compare(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) == 0) { @@ -161,18 +163,18 @@ void Config::UpdateFromMap(const std::map& configMap) IE_THROW(NotFound) << "Unsupported property value by plugin: " << val; } switch (uVal) { - case 0: - case 2: - queueThrottle = cldnn::throttle_mode_types::med; - break; - case 1: - queueThrottle = cldnn::throttle_mode_types::low; - break; - case 3: - queueThrottle = cldnn::throttle_mode_types::high; - break; - default: - IE_THROW(ParameterMismatch) << "Unsupported queue throttle value: " << uVal; + case 0: + case 2: + queueThrottle = cldnn::throttle_mode_types::med; + break; + case 1: + queueThrottle = cldnn::throttle_mode_types::low; + break; + case 3: + queueThrottle = cldnn::throttle_mode_types::high; + break; + default: + IE_THROW(ParameterMismatch) << "Unsupported queue throttle value: " << uVal; } } else if (key == ov::intel_gpu::hint::queue_throttle) { std::stringstream ss(val); @@ -221,8 +223,7 @@ void Config::UpdateFromMap(const std::map& configMap) graph_dumps_dir = val; createDirectory(graph_dumps_dir); } - } else if (key.compare(PluginConfigParams::KEY_CACHE_DIR) == 0 || - key == ov::cache_dir) { + } else if (key.compare(PluginConfigParams::KEY_CACHE_DIR) == 0 || key == ov::cache_dir) { if (!val.empty()) { kernels_cache_dir = val; createDirectory(kernels_cache_dir); @@ -240,10 +241,9 @@ void Config::UpdateFromMap(const std::map& configMap) } else { IE_THROW(NotFound) << "Unsupported property value by plugin: " << val; } - } else if (key.compare(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == 0 || - key == ov::streams::num) { + } else if (key.compare(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == 0 || key == ov::num_streams) { if (val.compare(PluginConfigParams::GPU_THROUGHPUT_AUTO) == 0 || - val.compare(std::to_string(ov::streams::AUTO)) == 0) { + val.compare(ov::num_streams(ov::NumStreams::AUTO).second.as()) == 0) { throughput_streams = GetDefaultNStreamsForThroughputMode(); } else { int val_i; @@ -251,21 +251,20 @@ void Config::UpdateFromMap(const std::map& configMap) val_i = std::stoi(val); } catch (const std::exception&) { IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS - << ". Expected only positive numbers (#streams) or " - << "PluginConfigParams::GPU_THROUGHPUT_AUTO"; + << ". Expected only positive numbers (#streams) or " + << "PluginConfigParams::GPU_THROUGHPUT_AUTO"; } if (val_i > 0) throughput_streams = static_cast(val_i); } - } else if (key.compare(PluginConfigParams::KEY_DEVICE_ID) == 0 || - key == ov::device::id) { + } else if (key.compare(PluginConfigParams::KEY_DEVICE_ID) == 0 || key == ov::device::id) { // Validate if passed value is postivie number. try { int val_i = std::stoi(val); (void)val_i; } catch (const std::exception&) { IE_THROW() << "Wrong value for property key " << ov::device::id.name() - << ". DeviceIDs are only represented by positive numbers"; + << ". DeviceIDs are only represented by positive numbers"; } // Set this value. device_id = val; @@ -294,8 +293,7 @@ void Config::UpdateFromMap(const std::map& configMap) } else { IE_THROW(NotFound) << "Unsupported KEY_CLDNN_ENABLE_FP16_FOR_QUANTIZED_MODELS flag value: " << val; } - } else if (key.compare(GPUConfigParams::KEY_GPU_MAX_NUM_THREADS) == 0 || - key == ov::compilation_num_threads) { + } else if (key.compare(GPUConfigParams::KEY_GPU_MAX_NUM_THREADS) == 0 || key == ov::compilation_num_threads) { int max_threads = std::max(1, static_cast(std::thread::hardware_concurrency())); try { int val_i = std::stoi(val); @@ -305,8 +303,8 @@ void Config::UpdateFromMap(const std::map& configMap) task_exec_config._streams = std::min(task_exec_config._streams, val_i); } catch (const std::exception&) { IE_THROW() << "Wrong value for property key " << GPUConfigParams::KEY_GPU_MAX_NUM_THREADS << ": " << val - << "\nSpecify the number of threads use for build as an integer." - << "\nOut of range value will be set as a default value, maximum concurrent threads."; + << "\nSpecify the number of threads use for build as an integer." + << "\nOut of range value will be set as a default value, maximum concurrent threads."; } } else if (key.compare(GPUConfigParams::KEY_GPU_ENABLE_LOOP_UNROLLING) == 0 || key == ov::intel_gpu::enable_loop_unrolling) { @@ -320,13 +318,13 @@ void Config::UpdateFromMap(const std::map& configMap) } else if (key.compare(GPUConfigParams::KEY_GPU_HOST_TASK_PRIORITY) == 0 || key == ov::intel_gpu::hint::host_task_priority) { if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_HIGH) == 0 || - val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH)) == 0) { + val.compare(ov::util::to_string(ov::hint::Priority::HIGH)) == 0) { task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::BIG; } else if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_MEDIUM) == 0 || - val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM)) == 0) { + val.compare(ov::util::to_string(ov::hint::Priority::MEDIUM)) == 0) { task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::ANY; } else if (val.compare(GPUConfigParams::GPU_HOST_TASK_PRIORITY_LOW) == 0 || - val.compare(InferenceEngine::util::property_to_string(ov::hint::Priority::LOW)) == 0) { + val.compare(ov::util::to_string(ov::hint::Priority::LOW)) == 0) { task_exec_config._threadPreferredCoreType = IStreamsExecutor::Config::LITTLE; } else { IE_THROW(NotFound) << "Unsupported host task priority by plugin: " << val; @@ -384,22 +382,35 @@ void Config::adjustKeyMapValues() { { if (queuePriority == cldnn::priority_mode_types::high && - (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG || getAvailableCoresTypes().size() == 1)) { - key_config_map[ov::hint::model_priority.name()] = InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH); + (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG || + getAvailableCoresTypes().size() == 1)) { + key_config_map[ov::hint::model_priority.name()] = + ov::util::to_string(ov::hint::Priority::HIGH); } else if (queuePriority == cldnn::priority_mode_types::low && - (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE || getAvailableCoresTypes().size() == 1)) { - key_config_map[ov::hint::model_priority.name()] = InferenceEngine::util::property_to_string(ov::hint::Priority::LOW); - } else if (queuePriority == cldnn::priority_mode_types::med && task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::ANY) { - key_config_map[ov::hint::model_priority.name()] = InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM); + (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE || + getAvailableCoresTypes().size() == 1)) { + key_config_map[ov::hint::model_priority.name()] = + ov::util::to_string(ov::hint::Priority::LOW); + } else if (queuePriority == cldnn::priority_mode_types::med && + task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::ANY) { + key_config_map[ov::hint::model_priority.name()] = + ov::util::to_string(ov::hint::Priority::MEDIUM); } } { std::string qp = "0"; switch (queuePriority) { - case cldnn::priority_mode_types::low: qp = "1"; break; - case cldnn::priority_mode_types::med: qp = "2"; break; - case cldnn::priority_mode_types::high: qp = "3"; break; - default: break; + case cldnn::priority_mode_types::low: + qp = "1"; + break; + case cldnn::priority_mode_types::med: + qp = "2"; + break; + case cldnn::priority_mode_types::high: + qp = "3"; + break; + default: + break; } key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_PRIORITY] = qp; key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_PRIORITY] = qp; @@ -407,20 +418,27 @@ void Config::adjustKeyMapValues() { { std::string priority; if (queuePriority == cldnn::priority_mode_types::high) - priority = InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH); + priority = ov::util::to_string(ov::hint::Priority::HIGH); else if (queuePriority == cldnn::priority_mode_types::low) - priority = InferenceEngine::util::property_to_string(ov::hint::Priority::LOW); + priority = ov::util::to_string(ov::hint::Priority::LOW); else - priority = InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM); + priority = ov::util::to_string(ov::hint::Priority::MEDIUM); key_config_map[ov::intel_gpu::hint::queue_priority.name()] = priority; } { std::string qt = "0"; switch (queueThrottle) { - case cldnn::throttle_mode_types::low: qt = "1"; break; - case cldnn::throttle_mode_types::med: qt = "2"; break; - case cldnn::throttle_mode_types::high: qt = "3"; break; - default: break; + case cldnn::throttle_mode_types::low: + qt = "1"; + break; + case cldnn::throttle_mode_types::med: + qt = "2"; + break; + case cldnn::throttle_mode_types::high: + qt = "3"; + break; + default: + break; } key_config_map[CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE] = qt; key_config_map[GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE] = qt; @@ -428,31 +446,40 @@ void Config::adjustKeyMapValues() { { std::string throttleLevel; if (queueThrottle == cldnn::throttle_mode_types::high) - throttleLevel = InferenceEngine::util::property_to_string(ov::intel_gpu::hint::ThrottleLevel::HIGH); + throttleLevel = ov::util::to_string(ov::intel_gpu::hint::ThrottleLevel::HIGH); else if (queueThrottle == cldnn::throttle_mode_types::low) - throttleLevel = InferenceEngine::util::property_to_string(ov::intel_gpu::hint::ThrottleLevel::LOW); + throttleLevel = ov::util::to_string(ov::intel_gpu::hint::ThrottleLevel::LOW); else - throttleLevel = InferenceEngine::util::property_to_string(ov::intel_gpu::hint::ThrottleLevel::MEDIUM); + throttleLevel = ov::util::to_string(ov::intel_gpu::hint::ThrottleLevel::MEDIUM); key_config_map[ov::intel_gpu::hint::queue_throttle.name()] = throttleLevel; } { std::string hostTaskPriority; if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::LITTLE) - hostTaskPriority = InferenceEngine::util::property_to_string(ov::hint::Priority::LOW); + hostTaskPriority = ov::util::to_string(ov::hint::Priority::LOW); else if (task_exec_config._threadPreferredCoreType == IStreamsExecutor::Config::BIG) - hostTaskPriority = InferenceEngine::util::property_to_string(ov::hint::Priority::HIGH); + hostTaskPriority = ov::util::to_string(ov::hint::Priority::HIGH); else - hostTaskPriority = InferenceEngine::util::property_to_string(ov::hint::Priority::MEDIUM); + hostTaskPriority = ov::util::to_string(ov::hint::Priority::MEDIUM); key_config_map[ov::intel_gpu::hint::host_task_priority.name()] = hostTaskPriority; } { std::string tm = PluginConfigParams::TUNING_DISABLED; switch (tuningConfig.mode) { - case cldnn::tuning_mode::tuning_tune_and_cache: tm = PluginConfigParams::TUNING_CREATE; break; - case cldnn::tuning_mode::tuning_use_cache: tm = PluginConfigParams::TUNING_USE_EXISTING; break; - case cldnn::tuning_mode::tuning_use_and_update: tm = PluginConfigParams::TUNING_UPDATE; break; - case cldnn::tuning_mode::tuning_retune_and_cache: tm = PluginConfigParams::TUNING_RETUNE; break; - default: break; + case cldnn::tuning_mode::tuning_tune_and_cache: + tm = PluginConfigParams::TUNING_CREATE; + break; + case cldnn::tuning_mode::tuning_use_cache: + tm = PluginConfigParams::TUNING_USE_EXISTING; + break; + case cldnn::tuning_mode::tuning_use_and_update: + tm = PluginConfigParams::TUNING_UPDATE; + break; + case cldnn::tuning_mode::tuning_retune_and_cache: + tm = PluginConfigParams::TUNING_RETUNE; + break; + default: + break; } key_config_map[PluginConfigParams::KEY_TUNING_MODE] = tm; key_config_map[PluginConfigParams::KEY_TUNING_FILE] = tuningConfig.cache_file_path; @@ -464,7 +491,7 @@ void Config::adjustKeyMapValues() { key_config_map[ov::cache_dir.name()] = kernels_cache_dir; key_config_map[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(throughput_streams); - key_config_map[ov::streams::num.name()] = std::to_string(throughput_streams); + key_config_map[ov::num_streams.name()] = std::to_string(throughput_streams); key_config_map[PluginConfigParams::KEY_DEVICE_ID] = device_id; key_config_map[ov::device::id.name()] = device_id; @@ -490,18 +517,18 @@ void Config::adjustKeyMapValues() { } bool Config::isNewApiProperty(std::string property) { - static const std::set new_api_keys { + static const std::set new_api_keys{ ov::intel_gpu::hint::queue_priority.name(), ov::intel_gpu::hint::queue_throttle.name(), ov::compilation_num_threads.name(), - ov::streams::num.name(), + ov::num_streams.name(), }; return new_api_keys.find(property) != new_api_keys.end(); } std::string Config::ConvertPropertyToLegacy(const std::string& key, const std::string& value) { if (key == PluginConfigParams::KEY_MODEL_PRIORITY) { - auto priority = InferenceEngine::util::string_to_property(value, ov::hint::model_priority); + auto priority = ov::util::from_string(value, ov::hint::model_priority); if (priority == ov::hint::Priority::HIGH) return PluginConfigParams::MODEL_PRIORITY_HIGH; else if (priority == ov::hint::Priority::MEDIUM) @@ -509,7 +536,7 @@ std::string Config::ConvertPropertyToLegacy(const std::string& key, const std::s else if (priority == ov::hint::Priority::LOW) return PluginConfigParams::MODEL_PRIORITY_LOW; } else if (key == GPUConfigParams::KEY_GPU_HOST_TASK_PRIORITY) { - auto priority = InferenceEngine::util::string_to_property(value, ov::intel_gpu::hint::host_task_priority); + auto priority = ov::util::from_string(value, ov::intel_gpu::hint::host_task_priority); if (priority == ov::hint::Priority::HIGH) return GPUConfigParams::GPU_HOST_TASK_PRIORITY_HIGH; else if (priority == ov::hint::Priority::MEDIUM) diff --git a/src/plugins/intel_gpu/src/plugin/plugin.cpp b/src/plugins/intel_gpu/src/plugin/plugin.cpp index 74b3123e554..28b14893f9c 100644 --- a/src/plugins/intel_gpu/src/plugin/plugin.cpp +++ b/src/plugins/intel_gpu/src/plugin/plugin.cpp @@ -29,6 +29,7 @@ #include #include "openvino/pass/serialize.hpp" +#include #include "intel_gpu/runtime/device_query.hpp" #include "intel_gpu/runtime/debug_configuration.hpp" @@ -214,14 +215,14 @@ std::map Plugin::ConvertPerfHintsToConfig( : plugin_config.perfHintsConfig.ovPerfHint; //checking streams (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig) const auto streams = config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) == config.end() && - config.find(ov::streams::num.name()) == config.end(); + config.find(ov::num_streams.name()) == config.end(); if (streams && !streamsSet) { if (mode_name == CONFIG_VALUE(LATENCY)) { config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = std::to_string(1); - config[ov::streams::num.name()] = std::to_string(1); + config[ov::num_streams.name()] = std::to_string(1); } else if (mode_name == CONFIG_VALUE(THROUGHPUT)) { config[PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS] = CONFIG_VALUE(GPU_THROUGHPUT_AUTO); - config[ov::streams::num.name()] = std::to_string(ov::streams::AUTO); + config[ov::num_streams.name()] = ov::Any::make(ov::NumStreams::AUTO).as(); //disabling the throttling temporarily to set the validation (that is switching to the hints) perf baseline //checking throttling (to avoid overriding what user might explicitly set in the incoming config or previously via SetConfig) // const auto bInConfig = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() || @@ -337,7 +338,7 @@ InferenceEngine::RemoteContext::Ptr Plugin::GetDefaultContext(const AnyMap& para void Plugin::SetConfig(const std::map &config) { streamsSet = config.find(PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS) != config.end() || - config.find(ov::streams::num.name()) != config.end(); + config.find(ov::num_streams.name()) != config.end(); throttlingSet = config.find(GPUConfigParams::KEY_GPU_PLUGIN_THROTTLE) != config.end() || config.find(CLDNNConfigParams::KEY_CLDNN_PLUGIN_THROTTLE) != config.end() || config.find(ov::intel_gpu::hint::queue_throttle.name()) != config.end(); @@ -543,28 +544,28 @@ Parameter Plugin::GetConfig(const std::string& name, const std::mapsecond.is()) { n_streams = it_streams->second.as(); - if (n_streams == ov::streams::AUTO) - n_streams = config.GetDefaultNStreamsForThroughputMode(); } else if (it_streams->second.is()) { n_streams = it_streams->second.as(); } else if (it_streams->second.is()) { std::string n_streams_str = it_streams->second.as(); - if (n_streams_str != CONFIG_VALUE(GPU_THROUGHPUT_AUTO)) { + if (n_streams_str != CONFIG_VALUE(GPU_THROUGHPUT_AUTO) && + n_streams_str != util::to_string(ov::NumStreams(ov::NumStreams::AUTO))) { IE_THROW() << "[GPU_MAX_BATCH_SIZE] bad casting: GPU_THROUGHPUT_STREAMS should be either of uint32_t type or \"GPU_THROUGHPUT_AUTO\""; } n_streams = config.GetDefaultNStreamsForThroughputMode(); diff --git a/src/plugins/intel_myriad/common/src/configuration/options/ov_throughput_streams.cpp b/src/plugins/intel_myriad/common/src/configuration/options/ov_throughput_streams.cpp index 67f9d6f1c4b..fa9f13e501f 100644 --- a/src/plugins/intel_myriad/common/src/configuration/options/ov_throughput_streams.cpp +++ b/src/plugins/intel_myriad/common/src/configuration/options/ov_throughput_streams.cpp @@ -33,7 +33,7 @@ void OvThroughputStreamsOption::validate(const PluginConfiguration& configuratio } std::string OvThroughputStreamsOption::key() { - return ov::streams::num.name(); + return ov::num_streams.name(); } details::Access OvThroughputStreamsOption::access() { @@ -46,7 +46,7 @@ details::Category OvThroughputStreamsOption::category() { std::string OvThroughputStreamsOption::defaultValue() { std::stringstream ss; - ss << ov::streams::AUTO; + ss << ov::NumStreams::AUTO; return ss.str(); } diff --git a/src/plugins/intel_myriad/myriad_plugin/myriad_metrics.cpp b/src/plugins/intel_myriad/myriad_plugin/myriad_metrics.cpp index bf8b3741991..03230a0ad1a 100644 --- a/src/plugins/intel_myriad/myriad_plugin/myriad_metrics.cpp +++ b/src/plugins/intel_myriad/myriad_plugin/myriad_metrics.cpp @@ -116,7 +116,7 @@ RangeType MyriadMetrics::RangeForAsyncInferRequests( auto throughput_streams_str = config.find(InferenceEngine::MYRIAD_THROUGHPUT_STREAMS); if (throughput_streams_str == config.end()) { - throughput_streams_str = config.find(ov::streams::num.name()); + throughput_streams_str = config.find(ov::num_streams.name()); } if (throughput_streams_str != config.end()) { try { diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index 0fbd0abc626..c57e509f2a9 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -87,14 +87,14 @@ TEST(OVClassBasicTest, smoke_SetConfigInferenceNumThreads) { int32_t value = 0; int32_t num_threads = 1; - ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); ASSERT_EQ(num_threads, value); num_threads = 4; - ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::inference_num_threads(num_threads))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::inference_num_threads)); ASSERT_EQ(num_threads, value); } @@ -104,19 +104,19 @@ TEST(OVClassBasicTest, smoke_SetConfigStreamsNum) { int32_t num_streams = 1; auto setGetProperty = [&ie](int32_t& getProperty, int32_t setProperty){ - ASSERT_NO_THROW(ie.set_property("CPU", ov::streams::num(setProperty))); - ASSERT_NO_THROW(getProperty = ie.get_property("CPU", ov::streams::num)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::num_streams(setProperty))); + OV_ASSERT_NO_THROW(getProperty = ie.get_property("CPU", ov::num_streams)); }; setGetProperty(value, num_streams); ASSERT_EQ(num_streams, value); - num_streams = ov::streams::NUMA; + num_streams = ov::NumStreams::NUMA; setGetProperty(value, num_streams); ASSERT_GT(value, 0); // value has been configured automatically - num_streams = ov::streams::AUTO; + num_streams = ov::NumStreams::AUTO; setGetProperty(value, num_streams); ASSERT_GT(value, 0); // value has been configured automatically @@ -132,12 +132,12 @@ TEST(OVClassBasicTest, smoke_SetConfigAffinity) { #else auto defaultBindThreadParameter = ov::Affinity::CORE; #endif - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); ASSERT_EQ(defaultBindThreadParameter, value); const ov::Affinity affinity = ov::Affinity::HYBRID_AWARE; - ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::affinity(affinity))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::affinity)); ASSERT_EQ(affinity, value); } @@ -146,13 +146,13 @@ TEST(OVClassBasicTest, smoke_SetConfigHintInferencePrecision) { auto value = ov::element::f32; const auto precision = InferenceEngine::with_cpu_x86_bfloat16() ? ov::element::bf16 : ov::element::f32; - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(precision, value); const auto forcedPrecision = ov::element::f32; - ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forcedPrecision))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(forcedPrecision))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(precision, forcedPrecision); } @@ -161,13 +161,13 @@ TEST(OVClassBasicTest, smoke_SetConfigEnableProfiling) { bool value; const bool enableProfilingDefault = false; - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); ASSERT_EQ(enableProfilingDefault, value); const bool enableProfiling = true; - ASSERT_NO_THROW(ie.set_property("CPU", ov::enable_profiling(enableProfiling))); - ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); + OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::enable_profiling(enableProfiling))); + OV_ASSERT_NO_THROW(value = ie.get_property("CPU", ov::enable_profiling)); ASSERT_EQ(enableProfiling, value); } diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_plugin/core_intergration.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_plugin/core_intergration.cpp index 9fd32306838..722ca704fc3 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_plugin/core_intergration.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_plugin/core_intergration.cpp @@ -70,9 +70,9 @@ INSTANTIATE_TEST_SUITE_P(nightly_OVClassGetConfigTest, OVClassGetConfigTest, ::t TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedScaleFactors) { ov::Core core; float sf1, sf2; - ASSERT_NO_THROW(core.set_property({{"GNA_SCALE_FACTOR_0", "1634.0"}, {"GNA_SCALE_FACTOR_1", "2000.0"}})); - ASSERT_NO_THROW(sf1 = std::stof(core.get_property("GNA", "GNA_SCALE_FACTOR_0").as())); - ASSERT_NO_THROW(sf2 = std::stof(core.get_property("GNA", "GNA_SCALE_FACTOR_1").as())); + OV_ASSERT_NO_THROW(core.set_property({{"GNA_SCALE_FACTOR_0", "1634.0"}, {"GNA_SCALE_FACTOR_1", "2000.0"}})); + OV_ASSERT_NO_THROW(sf1 = std::stof(core.get_property("GNA", "GNA_SCALE_FACTOR_0").as())); + OV_ASSERT_NO_THROW(sf2 = std::stof(core.get_property("GNA", "GNA_SCALE_FACTOR_1").as())); ASSERT_FLOAT_EQ(1634.0, sf1); ASSERT_FLOAT_EQ(2000.0, sf2); @@ -84,16 +84,16 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedScaleFactorsPerInput) { ov::Core core; std::map scale_factors_per_input; - ASSERT_NO_THROW(core.set_property("GNA", + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::scale_factors_per_input(std::map{{"input_0", 1634.0f}, {"input_1", 2000.0f}}))); - ASSERT_NO_THROW(scale_factors_per_input = core.get_property("GNA", ov::intel_gna::scale_factors_per_input)); + OV_ASSERT_NO_THROW(scale_factors_per_input = core.get_property("GNA", ov::intel_gna::scale_factors_per_input)); ASSERT_EQ(2, scale_factors_per_input.size()); ASSERT_FLOAT_EQ(1634.0f, scale_factors_per_input["input_0"]); ASSERT_FLOAT_EQ(2000.0f, scale_factors_per_input["input_1"]); - ASSERT_NO_THROW(core.set_property("GNA", + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::scale_factors_per_input(std::map{{"0", 1.0f}}))); - ASSERT_NO_THROW(scale_factors_per_input = core.get_property("GNA", ov::intel_gna::scale_factors_per_input)); + OV_ASSERT_NO_THROW(scale_factors_per_input = core.get_property("GNA", ov::intel_gna::scale_factors_per_input)); ASSERT_EQ(1, scale_factors_per_input.size()); ASSERT_FLOAT_EQ(1.0f, scale_factors_per_input["0"]); } @@ -102,23 +102,23 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedPrecisionHint) { ov::Core core; ov::element::Type precision; - ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); ASSERT_EQ(ov::element::undefined, precision); - ASSERT_NO_THROW(core.set_property("GNA", ov::hint::inference_precision(ov::element::i8))); - ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::inference_precision(ov::element::i8))); + OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); ASSERT_EQ(ov::element::i8, precision); - ASSERT_NO_THROW(core.set_property("GNA", ov::hint::inference_precision(ov::element::i16))); - ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::inference_precision(ov::element::i16))); + OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); ASSERT_EQ(ov::element::i16, precision); - ASSERT_NO_THROW(core.set_property("GNA", {{ov::hint::inference_precision.name(), "I8"}})); - ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(core.set_property("GNA", {{ov::hint::inference_precision.name(), "I8"}})); + OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); ASSERT_EQ(ov::element::i8, precision); - ASSERT_NO_THROW(core.set_property("GNA", {{ov::hint::inference_precision.name(), "I16"}})); - ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); + OV_ASSERT_NO_THROW(core.set_property("GNA", {{ov::hint::inference_precision.name(), "I16"}})); + OV_ASSERT_NO_THROW(precision = core.get_property("GNA", ov::hint::inference_precision)); ASSERT_EQ(ov::element::i16, precision); ASSERT_THROW(core.set_property("GNA", { ov::hint::inference_precision(ov::element::i8), @@ -132,15 +132,15 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedPerformanceHint) { ov::Core core; ov::hint::PerformanceMode mode; - ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); + OV_ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); ASSERT_EQ(ov::hint::PerformanceMode::UNDEFINED, mode); - ASSERT_NO_THROW(core.set_property("GNA", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); - ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))); + OV_ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); ASSERT_EQ(ov::hint::PerformanceMode::LATENCY, mode); - ASSERT_NO_THROW(core.set_property("GNA", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))); - ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))); + OV_ASSERT_NO_THROW(mode = core.get_property("GNA", ov::hint::performance_mode)); ASSERT_EQ(ov::hint::PerformanceMode::THROUGHPUT, mode); ASSERT_THROW(core.set_property("GNA", {{ov::hint::performance_mode.name(), "ABC"}}), ov::Exception); @@ -150,24 +150,24 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedNumRequests) { ov::Core core; uint32_t num_requests; - ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(8))); - ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(8))); + OV_ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); ASSERT_EQ(8, num_requests); - ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(1))); - ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(1))); + OV_ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); ASSERT_EQ(1, num_requests); - ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(1000))); - ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(1000))); + OV_ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); ASSERT_EQ(127, num_requests); // maximum value - ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(0))); - ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::hint::num_requests(0))); + OV_ASSERT_NO_THROW(num_requests = core.get_property("GNA", ov::hint::num_requests)); ASSERT_EQ(1, num_requests); // minimum value OPENVINO_SUPPRESS_DEPRECATED_START - ASSERT_NO_THROW(core.set_property("GNA", {ov::hint::num_requests(8), {GNA_CONFIG_KEY(LIB_N_THREADS), "8"}})); + OV_ASSERT_NO_THROW(core.set_property("GNA", {ov::hint::num_requests(8), {GNA_CONFIG_KEY(LIB_N_THREADS), "8"}})); ASSERT_THROW(core.set_property("GNA", {ov::hint::num_requests(4), {GNA_CONFIG_KEY(LIB_N_THREADS), "8"}}), ov::Exception); OPENVINO_SUPPRESS_DEPRECATED_END ASSERT_THROW(core.set_property("GNA", {{ov::hint::num_requests.name(), "ABC"}}), ov::Exception); @@ -177,31 +177,31 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedExecutionMode) { ov::Core core; auto execution_mode = ov::intel_gna::ExecutionMode::AUTO; - ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); + OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_EXACT, execution_mode); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_FP32))); - ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_FP32))); + OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_FP32, execution_mode); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT))); - ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT))); + OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); ASSERT_EQ(ov::intel_gna::ExecutionMode::SW_EXACT, execution_mode); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK))); - ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK))); + OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); ASSERT_EQ(ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK, execution_mode); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW))); - ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW))); + OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); ASSERT_EQ(ov::intel_gna::ExecutionMode::HW, execution_mode); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::AUTO))); - ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::AUTO))); + OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); ASSERT_EQ(ov::intel_gna::ExecutionMode::AUTO, execution_mode); ASSERT_THROW(core.set_property("GNA", {{ov::intel_gna::execution_mode.name(), "ABC"}}), ov::Exception); - ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); + OV_ASSERT_NO_THROW(execution_mode = core.get_property("GNA", ov::intel_gna::execution_mode)); ASSERT_EQ(ov::intel_gna::ExecutionMode::AUTO, execution_mode); } @@ -210,30 +210,30 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedTargetDevice) { auto execution_target = ov::intel_gna::HWGeneration::UNDEFINED; auto compile_target = ov::intel_gna::HWGeneration::UNDEFINED; - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_2_0))); - ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_2_0))); + OV_ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, execution_target); - ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); + OV_ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, compile_target); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_3_0))); - ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_3_0))); + OV_ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, execution_target); - ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); + OV_ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_2_0, compile_target); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::GNA_3_0))); - ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::GNA_3_0))); + OV_ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, execution_target); - ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); + OV_ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::compile_target)); ASSERT_EQ(ov::intel_gna::HWGeneration::GNA_3_0, compile_target); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::UNDEFINED))); - ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::UNDEFINED))); + OV_ASSERT_NO_THROW(execution_target = core.get_property("GNA", ov::intel_gna::execution_target)); ASSERT_EQ(ov::intel_gna::HWGeneration::UNDEFINED, execution_target); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::UNDEFINED))); - ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::execution_target)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::UNDEFINED))); + OV_ASSERT_NO_THROW(compile_target = core.get_property("GNA", ov::intel_gna::execution_target)); ASSERT_EQ(ov::intel_gna::HWGeneration::UNDEFINED, compile_target); ASSERT_THROW(core.set_property("GNA", {ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_2_0), @@ -249,22 +249,22 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedPwlAlgorithm) { auto pwl_algo = ov::intel_gna::PWLDesignAlgorithm::UNDEFINED; float pwl_max_error = 0.0f; - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::RECURSIVE_DESCENT))); - ASSERT_NO_THROW(pwl_algo = core.get_property("GNA", ov::intel_gna::pwl_design_algorithm)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::RECURSIVE_DESCENT))); + OV_ASSERT_NO_THROW(pwl_algo = core.get_property("GNA", ov::intel_gna::pwl_design_algorithm)); ASSERT_EQ(ov::intel_gna::PWLDesignAlgorithm::RECURSIVE_DESCENT, pwl_algo); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION))); - ASSERT_NO_THROW(pwl_algo = core.get_property("GNA", ov::intel_gna::pwl_design_algorithm)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION))); + OV_ASSERT_NO_THROW(pwl_algo = core.get_property("GNA", ov::intel_gna::pwl_design_algorithm)); ASSERT_EQ(ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION, pwl_algo); ASSERT_THROW(core.set_property("GNA", {{ov::intel_gna::pwl_design_algorithm.name(), "ABC"}}), ov::Exception); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_max_error_percent(0.05))); - ASSERT_NO_THROW(pwl_max_error = core.get_property("GNA", ov::intel_gna::pwl_max_error_percent)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_max_error_percent(0.05))); + OV_ASSERT_NO_THROW(pwl_max_error = core.get_property("GNA", ov::intel_gna::pwl_max_error_percent)); ASSERT_FLOAT_EQ(0.05, pwl_max_error); - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_max_error_percent(100.0f))); - ASSERT_NO_THROW(pwl_max_error = core.get_property("GNA", ov::intel_gna::pwl_max_error_percent)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::pwl_max_error_percent(100.0f))); + OV_ASSERT_NO_THROW(pwl_max_error = core.get_property("GNA", ov::intel_gna::pwl_max_error_percent)); ASSERT_FLOAT_EQ(100.0f, pwl_max_error); OPENVINO_SUPPRESS_DEPRECATED_START @@ -279,12 +279,12 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedLogLevel) { ov::Core core; auto level = ov::log::Level::NO; - ASSERT_NO_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::WARNING))); - ASSERT_NO_THROW(level = core.get_property("GNA", ov::log::level)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::WARNING))); + OV_ASSERT_NO_THROW(level = core.get_property("GNA", ov::log::level)); ASSERT_EQ(ov::log::Level::WARNING, level); - ASSERT_NO_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::NO))); - ASSERT_NO_THROW(level = core.get_property("GNA", ov::log::level)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::NO))); + OV_ASSERT_NO_THROW(level = core.get_property("GNA", ov::log::level)); ASSERT_EQ(ov::log::Level::NO, level); ASSERT_THROW(core.set_property("GNA", ov::log::level(ov::log::Level::ERR)), ov::Exception); @@ -298,8 +298,8 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedFwModelPath) { ov::Core core; std::string path = ""; - ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::firmware_model_image_path("model.bin"))); - ASSERT_NO_THROW(path = core.get_property("GNA", ov::intel_gna::firmware_model_image_path)); + OV_ASSERT_NO_THROW(core.set_property("GNA", ov::intel_gna::firmware_model_image_path("model.bin"))); + OV_ASSERT_NO_THROW(path = core.get_property("GNA", ov::intel_gna::firmware_model_image_path)); ASSERT_EQ("model.bin", path); } diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index 8049f28269f..1fa70b4e0c4 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -447,7 +447,7 @@ TEST_P(OVClassGetMetricTest_GPU_MAX_BATCH_SIZE_STREAM_DEVICE_MEM, GetMetricAndPr uint32_t n_streams = 2; int64_t available_device_mem_size = 1073741824; ov::AnyMap _options = {ov::hint::model(simpleNetwork), - ov::streams::num(n_streams), + ov::num_streams(n_streams), ov::intel_gpu::hint::available_device_mem(available_device_mem_size)}; ASSERT_NO_THROW(p = ie.get_property(deviceName, ov::max_batch_size.name(), _options));