diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index bfa2b208d4e..9706012a827 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -356,11 +356,10 @@ int main(int argc, char* argv[]) { bool perf_counts = false; // check if using the virtual device - auto if_auto = std::find(devices.begin(), devices.end(), "AUTO") != devices.end(); - auto if_multi = std::find(devices.begin(), devices.end(), "MULTI") != devices.end(); + auto is_virtual = is_virtual_device_found(devices); auto hardware_devices = devices; - // Remove the hardware devices if AUTO/MULTI appears in the devices list. - if (if_auto || if_multi) { + // Remove the hardware devices if AUTO/MULTI/HETERO appears in the devices list. + if (is_virtual) { devices.clear(); // Parse out the currect virtual device as the target device. std::string virtual_device = split(device_name, ':').at(0); @@ -376,8 +375,11 @@ int main(int argc, char* argv[]) { auto& device_config = config[device]; // high-level performance modes - auto ov_perf_hint = get_performance_hint(device, core); - device_config.emplace(ov::hint::performance_mode(ov_perf_hint)); + if (!device_config.count(ov::hint::performance_mode.name())) { + device_config.emplace(ov::hint::performance_mode(get_performance_hint(device, core))); + } + auto ov_perf_hint = device_config.at(ov::hint::performance_mode.name()).as(); + if (FLAGS_nireq != 0) device_config.emplace(ov::hint::num_requests(unsigned(FLAGS_nireq))); @@ -415,7 +417,7 @@ int main(int argc, char* argv[]) { std::end(supported_properties); }; // the rest are individual per-device settings (overriding the values set with perf modes) - auto setThroughputStreams = [&]() { + auto set_throughput_streams = [&]() { std::string key = getDeviceTypeFromName(device) + "_THROUGHPUT_STREAMS"; auto it_device_nstreams = device_nstreams.find(device); if (it_device_nstreams != device_nstreams.end()) { @@ -426,34 +428,13 @@ int main(int argc, char* argv[]) { // Use API 2.0 key for streams key = ov::num_streams.name(); device_config[key] = it_device_nstreams->second; - } else if (device == "MULTI" || device == "AUTO") { - // check if the element contains the hardware device property - auto value_vec = split(it_device_nstreams->second, ' '); - if (value_vec.size() == 1) { - key = ov::num_streams.name(); - device_config[key] = it_device_nstreams->second; - } else { - // set device nstreams properties in the AUTO/MULTI plugin - std::stringstream strm(it_device_nstreams->second); - std::map devices_property; - ov::util::Read>{}(strm, devices_property); - for (const auto& it : devices_property) { - if (device_config.find(it.first) == device_config.end() || - (is_load_config && is_dev_set_property[it.first])) { - // Create ov::device::properties with ov::num_stream and - // 1. Insert this ov::device::properties into device config if this - // ov::device::properties isn't existed. Otherwise, - // 2. Replace the existed ov::device::properties within device config. - is_dev_set_property[it.first] = false; - device_config.erase(it.first); - device_config.insert( - ov::device::properties(it.first, ov::num_streams(std::stoi(it.second)))); - } else { - auto& property = device_config[it.first].as(); - property.emplace(ov::num_streams(std::stoi(it.second))); - } - } - } + } else if (is_virtual_device(device)) { + key = ov::num_streams.name(); + update_device_config_for_virtual_device(it_device_nstreams->second, + device_config, + ov::num_streams, + is_dev_set_property, + is_load_config); } else { throw std::logic_error("Device " + device + " doesn't support config key '" + key + "' " + "and '" + ov::num_streams.name() + "'!" + @@ -477,7 +458,7 @@ int main(int argc, char* argv[]) { // Use API 2.0 key for streams key = ov::num_streams.name(); device_config[key] = ov::streams::AUTO; - } else if (device == "MULTI" || device == "AUTO") { + } else if (is_virtual_device(device)) { // Set nstreams to default value auto if no nstreams specified from cmd line. for (auto& hwdevice : hardware_devices) { std::string key = std::string(getDeviceTypeFromName(hwdevice) + "_THROUGHPUT_STREAMS"); @@ -502,34 +483,12 @@ int main(int argc, char* argv[]) { // set to user defined value if (supported(ov::inference_precision.name())) { device_config.emplace(ov::inference_precision(it_device_infer_precision->second)); - } else if (device == "MULTI" || device == "AUTO") { - // check if the element contains the hardware device property - auto value_vec = split(it_device_infer_precision->second, ' '); - if (value_vec.size() == 1) { - auto key = ov::inference_precision.name(); - device_config[key] = it_device_infer_precision->second; - } else { - // set device inference_precison properties in the AUTO/MULTI plugin - std::stringstream strm(it_device_infer_precision->second); - std::map devices_property; - ov::util::Read>{}(strm, devices_property); - for (const auto& it : devices_property) { - if (device_config.find(it.first) == device_config.end() || - (is_load_config && is_dev_set_property[it.first])) { - // Create ov::device::properties with ov::inference_precision and - // 1. Insert this ov::device::properties into device config if this - // ov::device::properties isn't existed. Otherwise, - // 2. Replace the existed ov::device::properties within device config. - is_dev_set_property[it.first] = false; - device_config.erase(it.first); - device_config.insert( - ov::device::properties(it.first, ov::inference_precision(it.second))); - } else { - auto& property = device_config[it.first].as(); - property.emplace(ov::inference_precision(it.second)); - } - } - } + } else if (is_virtual_device(device)) { + update_device_config_for_virtual_device(it_device_infer_precision->second, + device_config, + ov::inference_precision, + is_dev_set_property, + is_load_config); } else { throw std::logic_error("Device " + device + " doesn't support config key '" + ov::inference_precision.name() + "'! " + @@ -556,7 +515,7 @@ int main(int argc, char* argv[]) { if (supported(property_name) || device_name == "AUTO") { // create nthreads/pin primary property for HW device or AUTO if -d is AUTO directly. device_config.emplace(property); - } else if (if_auto || if_multi) { + } else if (is_virtual) { // Create secondary property of -nthreads/-pin only for CPU if CPU device appears in the devices // list specified by -d. for (auto& device : hardware_devices) { @@ -571,38 +530,10 @@ int main(int argc, char* argv[]) { if (isFlagSetInCommandLine("pin")) set_nthreads_pin("pin"); - if (device.find("CPU") != std::string::npos || device.find("GPU") != std::string::npos) { - // CPU supports few special performance-oriented keys - // for CPU and GPU execution, more throughput-oriented execution via streams - setThroughputStreams(); - set_infer_precision(); - } else if (device.find("GNA") != std::string::npos) { - set_infer_precision(); - } else if (device.find("AUTO") != std::string::npos) { - setThroughputStreams(); - set_infer_precision(); - device_nstreams.erase(device); - } else if (device.find("MULTI") != std::string::npos) { - setThroughputStreams(); - set_infer_precision(); - if ((device_name.find("GPU") != std::string::npos) && (device_name.find("CPU") != std::string::npos)) { - slog::warn << "GPU throttling is turned on. Multi-device execution with " - "the CPU + GPU performs best with GPU throttling hint, " - << "which releases another CPU thread (that is otherwise " - "used by the GPU driver for active polling)." - << slog::endl; + set_throughput_streams(); + set_infer_precision(); - device_config.insert(ov::device::properties("GPU", {{GPU_CONFIG_KEY(PLUGIN_THROTTLE), 1}})); - // limit threading for CPU portion of inference - if (!isFlagSetInCommandLine("pin")) { - auto it_affinity = device_config.find(ov::affinity.name()); - if (it_affinity != device_config.end()) { - slog::warn << "Turn off threads pinning for " << device - << " device since multi-scenario with GPU device is used." << slog::endl; - it_affinity->second = ov::Affinity::NONE; - } - } - } + if (is_virtual_device(device)) { device_nstreams.erase(device); } } @@ -905,7 +836,21 @@ int main(int argc, char* argv[]) { if (cfg == ov::supported_properties) continue; auto prop = compiledModel.get_property(cfg); - slog::info << " " << cfg << ": " << prop.as() << slog::endl; + if (cfg == ov::device::properties) { + auto devices_properties = prop.as(); + for (auto& item : devices_properties) { + slog::info << " " << item.first << ": " << slog::endl; + for (auto& item2 : item.second.as()) { + if (item2.first == ov::supported_properties || + item2.first == METRIC_KEY(SUPPORTED_CONFIG_KEYS) || + item2.first == METRIC_KEY(SUPPORTED_METRICS)) + continue; + slog::info << " " << item2.first << ": " << item2.second.as() << slog::endl; + } + } + } else { + slog::info << " " << cfg << ": " << prop.as() << slog::endl; + } } // Update number of streams diff --git a/samples/cpp/benchmark_app/utils.cpp b/samples/cpp/benchmark_app/utils.cpp index 6728b8438b7..8c53f3d1924 100644 --- a/samples/cpp/benchmark_app/utils.cpp +++ b/samples/cpp/benchmark_app/utils.cpp @@ -107,13 +107,27 @@ std::vector split_float(const std::string& s, char delim) { return result; } +static const std::vector meta_plugins{"MULTI", "HETERO", "AUTO"}; +bool is_virtual_device(const std::string& device_name) { + return std::find(meta_plugins.begin(), meta_plugins.end(), device_name) != meta_plugins.end(); +} + +bool is_virtual_device_found(const std::vector& device_names) { + for (const auto& device_name : device_names) { + if (is_virtual_device(device_name)) { + return true; + } + } + return false; +} + std::vector parse_devices(const std::string& device_string) { std::string comma_separated_devices = device_string; auto colon = comma_separated_devices.find(":"); std::vector result; if (colon != std::string::npos) { auto target_device = comma_separated_devices.substr(0, colon); - if (target_device == "AUTO" || target_device == "MULTI") { + if (is_virtual_device(target_device)) { result.push_back(target_device); } auto bracket = comma_separated_devices.find("("); // e.g. in BATCH:GPU(4) @@ -137,8 +151,8 @@ void parse_value_for_virtual_device(const std::string& device, std::mapsecond; values_string.clear(); @@ -146,23 +160,92 @@ void parse_value_for_virtual_device(const std::string& device, std::mapfirst == device) { iter++; continue; } - values_string[device] += iter->first + " " + iter->second + " "; + if (ss.str().empty()) + ss << '{'; + else + ss << ','; + ss << iter->first << ":" << iter->second; iter = values_string.erase(iter); } - if (values_string.find(device) != values_string.end()) { - auto& nstreams = values_string[device]; - // Remove the space at the tail. - nstreams.pop_back(); + if (!ss.str().empty()) { + ss << '}'; + values_string[device] = ss.str(); } return; } +template +void update_device_config_for_virtual_device(const std::string& value, + ov::AnyMap& device_config, + ov::Property property, + std::map& is_dev_set_property, + bool is_load_config) { + // check if the element contains the hardware device property + if (split(value, ':').size() == 1) { + device_config[property.name()] = value; + } else { + // set device nstreams properties in the AUTO/MULTI/HETERO plugin + std::stringstream strm(value); + std::map devices_property; + ov::util::Read>{}(strm, devices_property); + for (const auto& it : devices_property) { + const auto& device_name = it.first; + const auto& device_value = it.second; + if (device_config.find(ov::device::properties.name()) == device_config.end() || + (is_load_config && is_dev_set_property[device_name])) { + // Create ov::device::properties with ov::num_stream/ov::inference_precision and + // 1. Insert this ov::device::properties into device config if this + // ov::device::properties isn't existed. Otherwise, + // 2. Replace the existed ov::device::properties within device config. + is_dev_set_property[device_name] = false; + device_config.erase(device_name); + device_config[ov::device::properties.name()] = ov::AnyMap{}; + auto& secondary_property = device_config.at(ov::device::properties.name()).as(); + secondary_property[device_name] = ov::AnyMap{{property.name(), device_value}}; + } else { + auto& secondary_property = device_config.at(ov::device::properties.name()).as(); + if (secondary_property.count(device_name)) { + auto& device_property = secondary_property.at(device_name).as(); + device_property.emplace(property(device_value)); + } else { + secondary_property[device_name] = ov::AnyMap{{property.name(), device_value}}; + } + } + } + } +} + +void update_device_config_for_virtual_device(const std::string& value, + ov::AnyMap& device_config, + ov::Property property, + std::map& is_dev_set_property, + bool is_load_config) { + return update_device_config_for_virtual_device(value, + device_config, + property, + is_dev_set_property, + is_load_config); +} + +void update_device_config_for_virtual_device(const std::string& value, + ov::AnyMap& device_config, + ov::Property property, + std::map& is_dev_set_property, + bool is_load_config) { + return update_device_config_for_virtual_device(value, + device_config, + property, + is_dev_set_property, + is_load_config); +} + std::map parse_value_per_device(const std::vector& devices, const std::string& values_string) { // Format: :,: or just @@ -691,27 +774,12 @@ void dump_config(const std::string& filename, const std::map device_properties; for (const auto& option : item.second) { - if (option.second.is()) { - // hw device properties - device_properties[option.first] = option.second.as(); - } else { - // primary property - std::stringstream strm; - option.second.print(strm); - auto property_string = strm.str(); - jsonConfig[deviceName][option.first] = property_string; - } - if (!device_properties.empty()) { - for (auto& item : device_properties) { - auto hw_device_name = item.first; - for (auto& property : item.second) { - jsonConfig[deviceName]["DEVICE_PROPERTIES"][hw_device_name][property.first] = - property.second.as(); - } - } - } + // primary property + std::stringstream strm; + option.second.print(strm); + auto property_string = strm.str(); + jsonConfig[deviceName][option.first] = property_string; } } @@ -740,23 +808,7 @@ void load_config(const std::string& filename, std::map& const std::string& deviceName = item.key(); const auto& itemValue = item.value(); for (auto option = itemValue.cbegin(), itemValueEnd = itemValue.cend(); option != itemValueEnd; ++option) { - if (option.key() != "DEVICE_PROPERTIES") { - config[deviceName][option.key()] = option.value().get(); - continue; - } - const auto& optionValue = option.value(); - for (auto hw_properties = optionValue.cbegin(), optionValueEnd = optionValue.cend(); - hw_properties != optionValueEnd; - ++hw_properties) { - const std::string& hw_device_name = hw_properties.key(); - std::map hw_device_properties; - const auto& hw_propertiesValue = hw_properties.value(); - for (auto property = hw_propertiesValue.cbegin(), hw_propertiesEnd = hw_propertiesValue.cend(); - property != hw_propertiesEnd; - ++property) - hw_device_properties[property.key()] = property.value().get(); - config[deviceName][hw_device_name] = hw_device_properties; - } + config[deviceName][option.key()] = option.value().get(); } } } diff --git a/samples/cpp/benchmark_app/utils.hpp b/samples/cpp/benchmark_app/utils.hpp index 9c89e2d4df5..406500d3c36 100644 --- a/samples/cpp/benchmark_app/utils.hpp +++ b/samples/cpp/benchmark_app/utils.hpp @@ -58,11 +58,19 @@ using InputsInfo = std::map; using PartialShapes = std::map; } // namespace benchmark_app +bool is_virtual_device(const std::string& device_name); +bool is_virtual_device_found(const std::vector& device_names); std::vector parse_devices(const std::string& device_string); uint32_t device_default_device_duration_in_seconds(const std::string& device); std::map parse_value_per_device(const std::vector& devices, const std::string& values_string); void parse_value_for_virtual_device(const std::string& device, std::map& values_string); +template +void update_device_config_for_virtual_device(const std::string& value, + ov::AnyMap& device_config, + ov::Property property, + std::map& is_dev_set_property, + bool is_load_config = false); std::string get_shapes_string(const benchmark_app::PartialShapes& shapes); size_t get_batch_size(const benchmark_app::InputsInfo& inputs_info); std::vector split(const std::string& s, char delim); diff --git a/samples/cpp/speech_sample/main.cpp b/samples/cpp/speech_sample/main.cpp index e913d71411a..7553ee7bf22 100644 --- a/samples/cpp/speech_sample/main.cpp +++ b/samples/cpp/speech_sample/main.cpp @@ -255,7 +255,11 @@ int main(int argc, char* argv[]) { // ----------------------------------------------------------------------------------------------------- // --------------------------- Step 2. Loading model to the device ------------------------------------------ if (useGna) { - genericPluginConfig.insert(std::begin(gnaPluginConfig), std::end(gnaPluginConfig)); + if (useHetero) { + genericPluginConfig.insert(ov::device::properties("GNA", gnaPluginConfig)); + } else { + genericPluginConfig.insert(std::begin(gnaPluginConfig), std::end(gnaPluginConfig)); + } } auto t0 = Time::now(); ms loadTime = std::chrono::duration_cast(Time::now() - t0); diff --git a/src/bindings/python/src/pyopenvino/core/properties/properties.cpp b/src/bindings/python/src/pyopenvino/core/properties/properties.cpp index 7bca2a50980..7c45761700f 100644 --- a/src/bindings/python/src/pyopenvino/core/properties/properties.cpp +++ b/src/bindings/python/src/pyopenvino/core/properties/properties.cpp @@ -6,6 +6,7 @@ #include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/any.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -158,6 +159,31 @@ void regmodule_properties(py::module m) { wrap_property_RO(m_device, ov::device::capabilities, "capabilities"); wrap_property_RO(m_device, ov::device::uuid, "uuid"); + // Special case: ov::device::properties + m_device.def("properties", []() { + return ov::device::properties.name(); + }); + + m_device.def("properties", [](py::args& args) { + ov::AnyMap value = {}; + for (auto v : args) { + if (!py::isinstance(v)) { + throw py::type_error("Incorrect passed value: " + std::string(py::str(v)) + + ", expected dictionary instead of " + typeid(v).name()); + } + auto dict = py::cast(v); + for (auto item : dict) { + if (!py::isinstance(item.first)) { + throw py::type_error("Incorrect passed key in value: " + std::string(py::str(item.first)) + + ", expected string instead of " + typeid(item.first).name()); + } + value[py::cast(item.first)] = + Common::utils::py_object_to_any(py::cast(item.second)); + } + } + return ov::device::properties(value); + }); + // Modules made in pybind cannot easily register attributes, thus workaround is needed. // Let's simulate module with attributes by creating empty proxy class called FakeModuleName. class FakeCapability {}; diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index f5e1b5f9e05..f029323e35a 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -99,6 +99,10 @@ py::object from_ov_any(const ov::Any& any) { else if (any.is>()) { return py::cast(any.as>()); } + // Check for std::map { + else if (any.is>()) { + return py::cast(any.as>()); + } // Check for std::vector else if (any.is>()) { auto val = any.as>(); @@ -194,6 +198,33 @@ void deprecation_warning(const std::string& function_name, const std::string& ve PyErr_WarnEx(PyExc_DeprecationWarning, ss.str().data(), 2); } +bool py_object_is_any_map(const py::object& py_obj) { + if (!py::isinstance(py_obj)) { + return false; + } + auto dict = py::cast(py_obj); + return std::all_of(dict.begin(), dict.end(), [&](const std::pair& elem) { + return py::isinstance(elem.first); + }); +} + +ov::AnyMap py_object_to_any_map(const py::object& py_obj) { + OPENVINO_ASSERT(py_object_is_any_map(py_obj), "Unsupported attribute type."); + ov::AnyMap return_value = {}; + for (auto& item : py::cast(py_obj)) { + std::string key = py::cast(item.first); + py::object value = py::cast(item.second); + if (py::isinstance(value)) { + return_value[key] = py::cast(value); + } else if (py_object_is_any_map(value)) { + return_value[key] = Common::utils::py_object_to_any_map(value); + } else { + return_value[key] = Common::utils::py_object_to_any(value); + } + } + return return_value; +} + ov::Any py_object_to_any(const py::object& py_obj) { // Python types if (py::isinstance(py_obj)) { @@ -244,6 +275,8 @@ ov::Any py_object_to_any(const py::object& py_obj) { OPENVINO_ASSERT(false, "Unsupported attribute type."); } // OV types + } else if (py_object_is_any_map(py_obj)) { + return py_object_to_any_map(py_obj); } else if (py::isinstance(py_obj)) { return py::cast(py_obj); } else if (py::isinstance(py_obj)) { diff --git a/src/bindings/python/src/pyopenvino/utils/utils.hpp b/src/bindings/python/src/pyopenvino/utils/utils.hpp index ff6da6558c8..5b0d00165b9 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.hpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.hpp @@ -23,6 +23,10 @@ namespace utils { void deprecation_warning(const std::string& function_name, const std::string& version = std::string(), const std::string& message = std::string()); + bool py_object_is_any_map(const py::object& py_obj); + + ov::AnyMap py_object_to_any_map(const py::object& py_obj); + ov::Any py_object_to_any(const py::object& py_obj); ov::pass::Serialize::Version convert_to_version(const std::string& version); diff --git a/src/bindings/python/tests/test_graph/test_any.py b/src/bindings/python/tests/test_graph/test_any.py index 8deb5df899b..e4817e52f7e 100644 --- a/src/bindings/python/tests/test_graph/test_any.py +++ b/src/bindings/python/tests/test_graph/test_any.py @@ -33,18 +33,18 @@ def test_any_list(values, data_type): assert ovany.get() == values -@pytest.mark.parametrize(("value_dict", "data_type"), [ - ({"key": "value"}, str), - ({21: 37}, int), - ({21.0: 37.0}, float), +@pytest.mark.parametrize(("value_dict", "value_type", "data_type"), [ + ({"key": "value"}, OVAny, str), + ({21: 37}, int, int), + ({21.0: 37.0}, float, float), ]) -def test_any_dict(value_dict, data_type): +def test_any_dict(value_dict, value_type, data_type): ovany = OVAny(value_dict) key = list(value_dict.keys())[0] assert isinstance(ovany.value, dict) assert ovany[key] == list(value_dict.values())[0] assert len(ovany.value) == 1 - assert type(ovany.value[key]) == data_type + assert type(ovany.value[key]) == value_type assert type(list(value_dict.values())[0]) == data_type assert ovany.get() == value_dict diff --git a/src/bindings/python/tests/test_runtime/test_properties.py b/src/bindings/python/tests/test_runtime/test_properties.py index 0f74dfb9c9c..41558de5aa7 100644 --- a/src/bindings/python/tests/test_runtime/test_properties.py +++ b/src/bindings/python/tests/test_runtime/test_properties.py @@ -305,6 +305,30 @@ def test_properties_device_priorities(): assert f"Incorrect passed value: {value} , expected string values." in str(e.value) +def test_properties_device_properties(): + assert properties.device.properties() == "DEVICE_PROPERTIES" + + def make_dict(*arg): + return dict( # noqa: C406 + [*arg]) + + def check(value1, value2): + assert properties.device.properties(value1) == ("DEVICE_PROPERTIES", OVAny(value2)) + + check({"CPU": {properties.streams.num(): 2}}, + {"CPU": {"NUM_STREAMS": 2}}) + check({"CPU": make_dict(properties.streams.num(2))}, + {"CPU": {"NUM_STREAMS": properties.streams.Num(2)}}) + check({"GPU": make_dict(properties.inference_precision(Type.f32))}, + {"GPU": {"INFERENCE_PRECISION_HINT": Type.f32}}) + check({"CPU": make_dict(properties.streams.num(2), properties.inference_precision(Type.f32))}, + {"CPU": {"INFERENCE_PRECISION_HINT": Type.f32, "NUM_STREAMS": properties.streams.Num(2)}}) + check({"CPU": make_dict(properties.streams.num(2), properties.inference_precision(Type.f32)), + "GPU": make_dict(properties.streams.num(1), properties.inference_precision(Type.f16))}, + {"CPU": {"INFERENCE_PRECISION_HINT": Type.f32, "NUM_STREAMS": properties.streams.Num(2)}, + "GPU": {"INFERENCE_PRECISION_HINT": Type.f16, "NUM_STREAMS": properties.streams.Num(1)}}) + + def test_properties_streams(): # Test extra Num class assert properties.streams.Num().to_integer() == -1 diff --git a/src/core/include/openvino/core/any.hpp b/src/core/include/openvino/core/any.hpp index 805fd808516..0e070f3a0e7 100644 --- a/src/core/include/openvino/core/any.hpp +++ b/src/core/include/openvino/core/any.hpp @@ -28,6 +28,9 @@ namespace ov { class Plugin; /** @cond INTERNAL */ class Any; + +using AnyMap = std::map; + namespace util { OPENVINO_API bool equal(std::type_index lhs, std::type_index rhs); @@ -126,6 +129,11 @@ struct OPENVINO_API Read> { void operator()(std::istream& is, std::tuple& tuple) const; }; +template <> +struct OPENVINO_API Read { + void operator()(std::istream& is, AnyMap& map) const; +}; + template auto from_string(const std::string& str) -> const typename std::enable_if::value, T>::type& { @@ -210,14 +218,36 @@ struct Read< std::map, typename std::enable_if::value && std::is_default_constructible::value>::type> { void operator()(std::istream& is, std::map& map) const { - while (is.good()) { - std::string str; - is >> str; - auto k = from_string(str); - is >> str; - auto v = from_string(str); - map.emplace(std::move(k), std::move(v)); + char c; + + is >> c; + OPENVINO_ASSERT(c == '{', "Failed to parse std::map. Starting symbols is not '{', it's ", c); + + while (c != '}') { + std::string key, value; + std::getline(is, key, ':'); + size_t enclosed_container_level = 0; + + while (is.good()) { + is >> c; + if (c == ',') { // delimiter between map's pairs + if (enclosed_container_level == 0) // we should interrupt after delimiter + break; + } + if (c == '{' || c == '[') // case of enclosed maps / arrays + ++enclosed_container_level; + if (c == '}' || c == ']') { + if (enclosed_container_level == 0) + break; // end of map + --enclosed_container_level; + } + + value += c; // accumulate current value + } + map.emplace(from_string(key), from_string(value)); } + + OPENVINO_ASSERT(c == '}', "Failed to parse std::map. Ending symbols is not '}', it's ", c); } }; @@ -322,14 +352,14 @@ struct Write> { void operator()(std::ostream& os, const std::map& map) const { if (!map.empty()) { std::size_t i = 0; + os << '{'; for (auto&& v : map) { - os << to_string(v.first); - os << ' '; - os << to_string(v.second); + os << to_string(v.first) << ':' << to_string(v.second); if (i < (map.size() - 1)) - os << ' '; + os << ','; ++i; } + os << '}'; } } }; @@ -914,8 +944,6 @@ public: const void* addressof() const; }; -using AnyMap = std::map; - using RTMap = AnyMap; using AnyVector = std::vector; diff --git a/src/core/src/any.cpp b/src/core/src/any.cpp index 21296939b01..d1ecbd0716b 100644 --- a/src/core/src/any.cpp +++ b/src/core/src/any.cpp @@ -216,6 +216,39 @@ void Read>::operator()( Read{}(is, std::get<2>(tuple)); } +void Read::operator()(std::istream& is, AnyMap& map) const { + std::string key, value; + char c; + + is >> c; + OPENVINO_ASSERT(c == '{', "Failed to parse ov::AnyMap. Starting symbols is not '{', it's ", c); + + while (c != '}') { + std::getline(is, key, ':'); + size_t enclosed_container_level = 0; + + while (is.good()) { + is >> c; + if (c == ',') { // delimiter between map's pairs + if (enclosed_container_level == 0) // we should interrupt after delimiter + break; + } + if (c == '{' || c == '[') // case of enclosed maps / arrays + ++enclosed_container_level; + if (c == '}' || c == ']') { + if (enclosed_container_level == 0) + break; // end of map + --enclosed_container_level; + } + + value += c; // accumulate current value + } + map.emplace(std::move(key), std::move(value)); + } + + OPENVINO_ASSERT(c == '}', "Failed to parse ov::AnyMap. Ending symbols is not '}', it's ", c); +} + void Read>::operator()(std::istream& is, std::tuple& tuple) const { Read{}(is, std::get<0>(tuple)); diff --git a/src/core/tests/any.cpp b/src/core/tests/any.cpp index 4a5d46953f7..6b009cd15f3 100644 --- a/src/core/tests/any.cpp +++ b/src/core/tests/any.cpp @@ -161,6 +161,187 @@ TEST_F(AnyTests, AnyAsMapOfAnys) { ASSERT_EQ(refMap["testParamString"].as(), testString); } +TEST_F(AnyTests, AnyAsMapOfMapOfAnys) { + std::map refMap1; + refMap1["testParamInt"] = 4; + refMap1["testParamString"] = "test"; + + std::map refMap2; + refMap2["testParamInt"] = 5; + refMap2["testParamString"] = "test2"; + + std::map refMap; + refMap["refMap1"] = refMap1; + refMap["refMap2"] = refMap2; + + Any p = refMap; + bool isMap = p.is>(); + ASSERT_TRUE(isMap); + auto testMap = p.as>(); + + ASSERT_NE(testMap.find("refMap1"), testMap.end()); + auto testMap1 = testMap.at("refMap1").as>(); + ASSERT_NE(testMap1.find("testParamInt"), testMap.end()); + ASSERT_NE(testMap1.find("testParamString"), testMap.end()); + + int testInt1 = testMap1["testParamInt"].as(); + std::string testString1 = testMap1["testParamString"].as(); + + ASSERT_EQ(refMap1["testParamInt"].as(), testInt1); + ASSERT_EQ(refMap1["testParamString"].as(), testString1); + + ASSERT_NE(testMap.find("refMap2"), testMap.end()); + auto testMap2 = testMap.at("refMap2").as>(); + ASSERT_NE(testMap2.find("testParamInt"), testMap.end()); + ASSERT_NE(testMap2.find("testParamString"), testMap.end()); + + int testInt2 = testMap2["testParamInt"].as(); + std::string testString2 = testMap2["testParamString"].as(); + + ASSERT_EQ(refMap2["testParamInt"].as(), testInt2); + ASSERT_EQ(refMap2["testParamString"].as(), testString2); +} + +TEST_F(AnyTests, AnyAsMapOfMapOfAnysFromString) { + const std::string string_props = "{map1:{prop1:1,prop2:2.0},map2:{prop1:value}}"; + ov::Any any(string_props); + + ov::AnyMap map; + ASSERT_TRUE(any.is()); + ASSERT_FALSE(any.is()); + ASSERT_NO_THROW(map = any.as()); + ASSERT_EQ(string_props, ov::Any(map).as()); + + // check map1 + using MapStrDouble = std::map; + MapStrDouble map1; + ASSERT_TRUE(map["map1"].is()); + ASSERT_FALSE(map["map1"].is()); + ASSERT_FALSE(map["map1"].is()); + ASSERT_NO_THROW(map1 = map["map1"].as()); + ASSERT_EQ(2, map1.size()); + + // check map1:prop1 + ASSERT_EQ(1.0, map1["prop1"]); + // check map1:prop2 + ASSERT_EQ(2.0, map1["prop2"]); + + // check map2 + ov::AnyMap map2; + ASSERT_TRUE(map["map2"].is()); + ASSERT_FALSE(map["map2"].is()); + ASSERT_NO_THROW(map2 = map["map2"].as()); + ASSERT_EQ(1, map2.size()); + + // check map1:prop1 + ASSERT_TRUE(map2["prop1"].is()); + ASSERT_FALSE(map2["prop1"].is()); + ASSERT_EQ("value", map2["prop1"].as()); +} + +TEST_F(AnyTests, AnyAsMapOfMapOfMapOfAnysFromString) { + const std::string string_props = "{map1:{subprop_map:{prop:value}},prop1:1,prop2:2.0}"; + ov::Any any(string_props); + + ov::AnyMap map; + ASSERT_TRUE(any.is()); + ASSERT_FALSE(any.is()); + ASSERT_NO_THROW(map = any.as()); + ASSERT_EQ(3, map.size()); + ASSERT_EQ(string_props, ov::Any(map).as()); + + // check prop1 + ASSERT_TRUE(map["prop1"].is()); + ASSERT_FALSE(map["prop1"].is()); + ASSERT_EQ("1", map["prop1"].as()); + ASSERT_EQ(1, map["prop1"].as()); + + // check prop2 + ASSERT_TRUE(map["prop2"].is()); + ASSERT_FALSE(map["prop2"].is()); + ASSERT_FALSE(map["prop2"].is()); + ASSERT_EQ("2.0", map["prop2"].as()); + ASSERT_EQ(2, map["prop2"].as()); + ASSERT_EQ(2.0, map["prop2"].as()); + + // check map1 + ov::AnyMap map1; + ASSERT_TRUE(map["map1"].is()); + ASSERT_FALSE(map["map1"].is()); + ASSERT_NO_THROW(map1 = map["map1"].as()); + + // check subprop + ov::AnyMap subprop_map; + ASSERT_TRUE(map1["subprop_map"].is()); + ASSERT_FALSE(map1["subprop_map"].is()); + ASSERT_NO_THROW(subprop_map = map1["subprop_map"].as()); + + // check prop + ASSERT_TRUE(subprop_map["prop"].is()); + ASSERT_FALSE(subprop_map["prop"].is()); + ASSERT_EQ("value", subprop_map["prop"].as()); +} + +TEST_F(AnyTests, AnyDoesNotShareValues) { + // simple types + { + Any a = 1; + Any b = a; + a = 2; + ASSERT_EQ(1, b.as()); + ASSERT_EQ(2, a.as()); + b = 3; + ASSERT_EQ(2, a.as()); + ASSERT_EQ(3, b.as()); + } + + // AnyMap's + { + AnyMap map{ + {"1", ov::Any(1)}, + {"2", ov::Any(2)}, + }; + + Any a = map; + + // check initial state + ASSERT_EQ(1, a.as()["1"].as()); + ASSERT_EQ(2, a.as()["2"].as()); + + map["1"] = 3; // change map + ASSERT_EQ(1, a.as()["1"].as()); // Any is not changed + + a.as()["2"] = 4; // change Any + ASSERT_EQ(2, map["2"].as()); // map is not changed + + // erase from Any's map + AnyMap from_any_map = a.as(); + from_any_map.erase(from_any_map.begin()); + ASSERT_EQ(2, map.size()); + + // erase from map + map.erase(map.find("2")); + ASSERT_NE(from_any_map.end(), from_any_map.find("2")); + ASSERT_EQ(4, a.as()["2"].as()); + } +} + +TEST_F(AnyTests, DISABLED_AnyMapSharesValues) { + AnyMap map{ + {"1", 1}, + {"2", 2}, + }; + + AnyMap copy_map = map; + + // check initial state + ASSERT_EQ(1, copy_map["1"].as()); + ASSERT_EQ(2, copy_map["2"].as()); + + map["1"].as() = 110; // change map + EXPECT_EQ(1, copy_map["1"].as()); // TODO: why value is changed here? +} + TEST_F(AnyTests, AnyNotEmpty) { Any p = 4; ASSERT_FALSE(p.empty()); @@ -401,7 +582,31 @@ TEST_F(AnyTests, PrintToMapOfAnys) { { Any p = refMap; ASSERT_NO_THROW(p.print(stream)); - ASSERT_EQ(stream.str(), std::string{"testParamInt 4 testParamString test"}); + ASSERT_EQ(stream.str(), std::string{"{testParamInt:4,testParamString:test}"}); + } +} + +TEST_F(AnyTests, PrintToMapOfMapsOfAnys) { + std::map refMap1; + refMap1["testParamInt"] = 4; + refMap1["testParamString"] = "test"; + + std::map refMap2; + refMap2["testParamInt"] = 5; + refMap2["testParamString"] = "test2"; + + std::map refMap; + refMap["refMap1"] = refMap1; + refMap["refMap2"] = refMap2; + + std::stringstream stream; + { + Any p = refMap; + ASSERT_NO_THROW(p.print(stream)); + ASSERT_EQ( + stream.str(), + std::string{ + "{refMap1:{testParamInt:4,testParamString:test},refMap2:{testParamInt:5,testParamString:test2}}"}); } } diff --git a/src/inference/dev_api/openvino/runtime/icore.hpp b/src/inference/dev_api/openvino/runtime/icore.hpp index 6c189f8b107..723aa5a3687 100644 --- a/src/inference/dev_api/openvino/runtime/icore.hpp +++ b/src/inference/dev_api/openvino/runtime/icore.hpp @@ -203,6 +203,14 @@ public: return get_property(device_name, property.name(), arguments).template as(); } + /** + * @brief Get only properties that are suppored by specified device + * @param full_device_name Name of a device (can be either virtual or hardware) + * @param properties Properties that can contains configs that are not supported by device + * @return map of properties that are supported by device + */ + virtual AnyMap get_supported_property(const std::string& full_device_name, const AnyMap& properties) const = 0; + /** * @brief Default virtual destructor */ diff --git a/src/inference/include/openvino/runtime/properties.hpp b/src/inference/include/openvino/runtime/properties.hpp index 72e116834dd..d5a1853cb5f 100644 --- a/src/inference/include/openvino/runtime/properties.hpp +++ b/src/inference/include/openvino/runtime/properties.hpp @@ -647,7 +647,19 @@ static constexpr Priorities priorities{"MULTI_DEVICE_PRIORITIES"}; * @brief Type for property to pass set of properties to specified device * @ingroup ov_runtime_cpp_prop_api */ -struct Properties { + +struct Properties : public Property>> { + using Property>>::Property; + + /** + * @brief Constructs property + * @param configs set of property values with names + * @return Pair of string key representation and type erased property value. + */ + inline std::pair operator()(const AnyMap& config) const { + return {name(), config}; + } + /** * @brief Constructs property * @param device_name device plugin alias @@ -655,7 +667,7 @@ struct Properties { * @return Pair of string key representation and type erased property value. */ inline std::pair operator()(const std::string& device_name, const AnyMap& config) const { - return {device_name, config}; + return {name() + std::string("_") + device_name, config}; } /** @@ -669,7 +681,7 @@ struct Properties { inline util::EnableIfAllStringAny, Properties...> operator()( const std::string& device_name, Properties&&... configs) const { - return {device_name, AnyMap{std::pair{configs}...}}; + return {name() + std::string("_") + device_name, AnyMap{std::pair{configs}...}}; } }; @@ -684,7 +696,7 @@ struct Properties { * ov::device::properties("GPU", ov::enable_profiling(false))); * @endcode */ -static constexpr Properties properties; +static constexpr Properties properties{"DEVICE_PROPERTIES"}; /** * @brief Read-only property to get a std::string value representing a full device name. diff --git a/src/inference/src/core.cpp b/src/inference/src/core.cpp index 2d2526f38ef..01454656e7f 100644 --- a/src/inference/src/core.cpp +++ b/src/inference/src/core.cpp @@ -27,36 +27,6 @@ std::string resolve_extension_path(const std::string& path) { return retvalue; } -ov::AnyMap flatten_sub_properties(const std::string& device, const ov::AnyMap& properties) { - ov::AnyMap result = properties; - bool isVirtualDev = device.find("AUTO") != std::string::npos || device.find("MULTI") != std::string::npos || - device.find("HETERO") != std::string::npos; - for (auto item = result.begin(); item != result.end();) { - auto parsed = ov::parseDeviceNameIntoConfig(item->first); - if (!item->second.is()) { - item++; - continue; - } - if (device == parsed._deviceName) { - // 1. flatten the secondary property for target device - for (auto&& sub_property : item->second.as()) { - // 1.1 1st level property overrides 2nd level property - if (result.find(sub_property.first) != result.end()) - continue; - result[sub_property.first] = sub_property.second; - } - item = result.erase(item); - } else if (isVirtualDev) { - // 2. keep the secondary property for the other virtual devices - item++; - } else { - // 3. remove the secondary property setting for other hardware device - item = result.erase(item); - } - } - return result; -} - } // namespace namespace ov { @@ -151,7 +121,7 @@ CompiledModel Core::compile_model(const std::shared_ptr& model, const std::string& device_name, const AnyMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = _impl->compile_model(model, device_name, flatten_sub_properties(device_name, config)); + auto exec = _impl->compile_model(model, device_name, config); return {exec._ptr, exec._so}; }); } @@ -162,7 +132,7 @@ CompiledModel Core::compile_model(const std::string& model_path, const AnyMap& c CompiledModel Core::compile_model(const std::string& model_path, const std::string& device_name, const AnyMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = _impl->compile_model(model_path, device_name, flatten_sub_properties(device_name, config)); + auto exec = _impl->compile_model(model_path, device_name, config); return {exec._ptr, exec._so}; }); } @@ -172,7 +142,7 @@ CompiledModel Core::compile_model(const std::string& model, const std::string& device_name, const AnyMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = _impl->compile_model(model, weights, device_name, flatten_sub_properties(device_name, config)); + auto exec = _impl->compile_model(model, weights, device_name, config); return {exec._ptr, exec._so}; }); } @@ -181,7 +151,7 @@ CompiledModel Core::compile_model(const std::shared_ptr& model, const RemoteContext& context, const AnyMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = _impl->compile_model(model, context, flatten_sub_properties(context.get_device_name(), config)); + auto exec = _impl->compile_model(model, context, config); return {exec._ptr, exec._so}; }); } @@ -235,7 +205,7 @@ void Core::add_extension(const std::vector>& exte CompiledModel Core::import_model(std::istream& modelStream, const std::string& device_name, const AnyMap& config) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); OV_CORE_CALL_STATEMENT({ - auto exec = _impl->import_model(modelStream, device_name, flatten_sub_properties(device_name, config)); + auto exec = _impl->import_model(modelStream, device_name, config); return {exec._ptr, exec._so}; }); } @@ -253,7 +223,7 @@ CompiledModel Core::import_model(std::istream& modelStream, const RemoteContext& SupportedOpsMap Core::query_model(const std::shared_ptr& model, const std::string& device_name, const AnyMap& config) const { - OV_CORE_CALL_STATEMENT(return _impl->query_model(model, device_name, flatten_sub_properties(device_name, config));); + OV_CORE_CALL_STATEMENT(return _impl->query_model(model, device_name, config);); } void Core::set_property(const AnyMap& properties) { @@ -300,7 +270,7 @@ RemoteContext Core::create_context(const std::string& device_name, const AnyMap& OPENVINO_ASSERT(device_name.find("BATCH") != 0, "BATCH device does not support remote context"); OV_CORE_CALL_STATEMENT({ - auto parsed = parseDeviceNameIntoConfig(device_name, flatten_sub_properties(device_name, params)); + auto parsed = parseDeviceNameIntoConfig(device_name, params); auto remoteContext = _impl->get_plugin(parsed._deviceName).create_context(parsed._config); return {remoteContext._impl, {remoteContext._so}}; }); diff --git a/src/inference/src/dev/core_impl.cpp b/src/inference/src/dev/core_impl.cpp index 2c5ced3e824..44fc79a4987 100644 --- a/src/inference/src/dev/core_impl.cpp +++ b/src/inference/src/dev/core_impl.cpp @@ -57,8 +57,251 @@ void stripDeviceName(std::string& device, const std::string& substr) { } } +bool is_virtual_device(const std::string& device_name) { + return (device_name.find("AUTO") != std::string::npos || device_name.find("MULTI") != std::string::npos || + device_name.find("HETERO") != std::string::npos || device_name.find("BATCH") != std::string::npos); +}; + +ov::AnyMap clone_map(const ov::AnyMap& m) { + ov::AnyMap rm; + for (auto&& kvp : m) { + rm[kvp.first] = kvp.second.is() ? ov::Any(clone_map(kvp.second.as())) : kvp.second; + } + + return rm; +} + +/** + * @brief Converts / flattens ov::device::properties from + * @code + * core.compile_model(model, "GPU", ov::device::properties("GPU", ov::cache_dir("/tmp"))); + * // or + * core.compile_model(model, "GPU", ov::device::properties({ + * { "GPU", ov::cache_dir("/tmp") }, + * { "CPU", ov::cache_dir("") } + * })); + * @endcode + * To the form: + * @code + * core.compile_model(model, "GPU", ov::cache_dir("/tmp")); + * @endcode + * + * @param user_device_name A device name for which properties flattening is performed + * @param user_properties Original set of properties + * @return ov::AnyMap Flattened ov::AnyMap with properties + */ +ov::AnyMap flatten_sub_properties(const std::string& user_device_name, const ov::AnyMap& user_properties) { + ov::AnyMap result_properties = clone_map(user_properties); + + // puts sub-property to result_properties if it's not there yet + auto update_result_properties = [&result_properties](const ov::AnyMap& sub_properties) -> void { + for (auto&& sub_property : sub_properties) + result_properties[sub_property.first] = sub_property.second; + }; + + // First search for ov::device::properties(DEVICE, ...), which has higher + for (auto secondary_property = result_properties.begin(); secondary_property != result_properties.end();) { + auto subprop_device_name_pos = secondary_property->first.find(ov::device::properties.name() + std::string("_")); + if (subprop_device_name_pos == std::string::npos) { + // 1. Skip non-matching properties + secondary_property++; + continue; + } + + // 2. device properties DEVICE_PROPERTIES_ are found + auto subprop_device_name = + secondary_property->first.substr(subprop_device_name_pos + std::strlen(ov::device::properties.name()) + 1); + // flattening is performed only when config is applicable (see docs for ov::is_config_applicable) + if (ov::is_config_applicable(user_device_name, subprop_device_name) || is_virtual_device(user_device_name)) { + // 2.1. keep the secondary property for the other virtual devices, but repack them + auto device_properties = result_properties.find(ov::device::properties.name()); + if (device_properties == result_properties.end()) { + result_properties[ov::device::properties.name()] = ov::AnyMap{}; + } else if (device_properties->second.is()) { // because of legacy API 1.0 + device_properties->second = device_properties->second.as(); + } + auto& secondary_properties = result_properties[ov::device::properties.name()].as(); + auto secondary_properties_it = secondary_properties.find(subprop_device_name); + if (secondary_properties_it == secondary_properties.end()) { + // 2.1.1. No device name in map yet, insert all config as is + secondary_properties[subprop_device_name] = secondary_property->second; + } else { + if (secondary_properties_it->second.is()) { // because of legacy API 1.0 + secondary_properties_it->second = secondary_properties_it->second.as(); + } + // 2.1.2. Device name is present in config file, merge properties according to: + // ov::device::properties() overrides ov::device::properties(ov::AnyMap{}) + auto& secondary_device_properties = secondary_properties_it->second.as(); + for (auto& item : secondary_property->second.as()) { + secondary_device_properties[item.first] = item.second; + } + } + } + + // 3. since the sub-property is flattened, we need to drop it + secondary_property = result_properties.erase(secondary_property); + } + + // Second search for ov::device::properties(ov::AnyMap{...}) + for (auto property = result_properties.begin(); property != result_properties.end();) { + if (property->first != ov::device::properties.name()) { + // 1. Skip non-matching properties + property++; + continue; + } + + // 2. device properties DEVICE_PROPERTIES are found + if (property->second.is()) { // because of legacy API 1.0 + property->second = property->second.as(); + } + auto& secondary_properties = property->second.as(); + + for (auto secondary_property = secondary_properties.begin(); + secondary_property != secondary_properties.end();) { + // flattening is performed only when config is applicable (see docs for ov::is_config_applicable) + if (ov::is_config_applicable(user_device_name, secondary_property->first)) { + // 2.1. flatten the secondary property for target device + // example: core.compile_model("GPU", ov::device::properties("GPU", ov::prop1)); + // example: core.compile_model("GPU.1", ov::device::properties("GPU", ov::prop1)); + update_result_properties(secondary_property->second.as()); + secondary_property = secondary_properties.erase(secondary_property); + } else if (is_virtual_device(user_device_name)) { + // 2.2. keep the secondary property for the other virtual devices + secondary_property++; + continue; + } else { + // 2.3. remove the secondary property setting for other hardware device + // example: core.compile_model("GPU", ov::device::properties("CPU", ov::prop1)); + secondary_property = secondary_properties.erase(secondary_property); + } + } + + // 3. go to the next property + if (secondary_properties.empty()) { + // 3.1. since the sub-property is flattened, we need to drop it + property = result_properties.erase(property); + } else { + // 3.2. some properties are still in ov::device::properties(ov::AnyMap{}), abort loop + break; + } + } + + return result_properties; +} + +enum class MatchType { EXACT = 0, SUBSTR }; + +struct DevicePriority { + std::string prop_name; + MatchType match_type; +}; + +DevicePriority get_device_priority_property(const std::string& device_name) { + return is_virtual_device(device_name) + ? DevicePriority{ov::device::priorities.name(), MatchType::EXACT} + : + // ov::device::properties(GPU.0) can be applied for GPU tile identified by GPU.0.0 + DevicePriority{ov::device::id.name(), MatchType::SUBSTR}; +} + +void clean_batch_properties(const std::string& deviceName, ov::AnyMap& config, const ov::PropertyName& property_name) { + // auto-batching is not applicable, if there is auto_batch_timeout, delete it + if (deviceName.find("BATCH") == std::string::npos) { + const auto& batch_timeout_mode = config.find(property_name); + if (batch_timeout_mode != config.end()) { + if (!is_virtual_device(deviceName)) + config.erase(batch_timeout_mode); + } + } +} + } // namespace +bool ov::is_config_applicable(const std::string& user_device_name, const std::string& subprop_device_name) { + // full match + if (user_device_name == subprop_device_name) + return true; + + auto parsed_user_device_name = ov::parseDeviceNameIntoConfig(user_device_name); + auto parsed_subprop_device_name = ov::parseDeviceNameIntoConfig(subprop_device_name); + + // if device name is matched, check additional condition + auto is_matched = [&](const std::string& key, MatchType match_type) -> bool { + auto user_value = + parsed_user_device_name._config.count(key) ? parsed_user_device_name._config.at(key).as() : ""; + auto subprop_value = parsed_subprop_device_name._config.count(key) + ? parsed_subprop_device_name._config.at(key).as() + : ""; + + if (!user_value.empty() && subprop_value.empty()) { + // property without additional limitation can be applied + return true; + } + return match_type == MatchType::EXACT ? (user_value == subprop_value) : (user_value.find(subprop_value) == 0); + return false; + }; + + if (parsed_user_device_name._deviceName == parsed_subprop_device_name._deviceName) { + auto device_priority = get_device_priority_property(parsed_user_device_name._deviceName); + return is_matched(device_priority.prop_name, device_priority.match_type); + } + + return false; +} + +ov::Parsed ov::parseDeviceNameIntoConfig(const std::string& deviceName, const AnyMap& config) { + auto updated_config = config; + auto updated_device_name = deviceName; + + /** Note: auto-batching is already applied by this time, so the call: + * core.compile_model("GPU", ov::device::properties("BATCH", ov::auto_batch_timeout(400))); + * is transformed and we have here: + * ov::parseDeviceNameIntoConfig("BATCH", ov::device::priorities("GPU"), + * ov::device::properties("BATCH", + * ov::auto_batch_timeout(400))); + * so, after 'flatten_sub_properties' we will have: + * core.compile_model("BATCH", ov::auto_batch_timeout(400), + * ov::device::priorities("GPU")); + * + * So, if one day, we want to add more options in form of ov::allow_, we need to apply it before + * 'flatten_sub_properties' call to have proper behavior + */ + + updated_config = flatten_sub_properties(deviceName, updated_config); + std::string parsed_device_priority; + + // try to find ':' to extract name of virtual device + auto pos = deviceName.find_first_of(':'); + if (pos != std::string::npos) { + updated_device_name = deviceName.substr(0, pos); + parsed_device_priority = deviceName.substr(pos + 1); + } else { + InferenceEngine::DeviceIDParser parser(deviceName); + updated_device_name = parser.getDeviceName(); + parsed_device_priority = parser.getDeviceID(); + } + + // checks and updates device priority + if (!parsed_device_priority.empty()) { + const auto priority_prop_name = get_device_priority_property(updated_device_name).prop_name; + const auto it = updated_config.find(priority_prop_name); + if (it == updated_config.end()) + updated_config[priority_prop_name] = parsed_device_priority; + else if (it->second == parsed_device_priority) { + // do nothing + } else { + IE_THROW() << "Device priority / ID mismatch: " << parsed_device_priority << " (from " << deviceName + << ") vs " << it->second.as() << " (from config)"; + } + }; + + // clean-up auto-batch related properties + clean_batch_properties(updated_device_name, updated_config, ov::hint::allow_auto_batching.name()); + clean_batch_properties(updated_device_name, updated_config, ov::auto_batch_timeout.name()); + + return {updated_device_name, updated_config}; +} + ov::CoreImpl::CoreImpl(bool _newAPI) : m_new_api(_newAPI) { add_mutex(""); // Register global mutex m_executor_manager = ov::threading::executor_manager(); @@ -279,7 +522,6 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::shared_ptr< ov::AnyMap config_with_batch = config; // if auto-batching is applicable, the below function will patch the device name and config accordingly: apply_auto_batching(model, deviceName, config_with_batch); - clean_properties(deviceName, config_with_batch, ov::auto_batch_timeout); auto parsed = parseDeviceNameIntoConfig(deviceName, config_with_batch); auto plugin = get_plugin(parsed._deviceName); @@ -305,15 +547,12 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::shared_ptr< if (context._impl == nullptr) { IE_THROW() << "Remote context is null"; } - // have to deduce the device name/config from the context first - auto parsed = parseDeviceNameIntoConfig(context.get_device_name(), config); - std::string& deviceName = parsed._deviceName; - auto& config_with_batch = parsed._config; + std::string deviceName = context.get_device_name(); + ov::AnyMap config_with_batch = config; // if auto-batching is applicable, the below function will patch the device name and config accordingly: apply_auto_batching(model, deviceName, config_with_batch); - clean_properties(deviceName, config_with_batch, ov::auto_batch_timeout); - parsed = parseDeviceNameIntoConfig(deviceName, config_with_batch); + auto parsed = parseDeviceNameIntoConfig(deviceName, config_with_batch); auto plugin = get_plugin(parsed._deviceName); ov::SoPtr res; auto cacheManager = coreConfig.get_cache_config_for_device(plugin, parsed._config)._cacheManager; @@ -354,6 +593,7 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::string& mod const ov::AnyMap& config) const { OV_ITT_SCOPE(FIRST_INFERENCE, ie::itt::domains::IE_LT, "Core::compile_model::Path"); auto parsed = parseDeviceNameIntoConfig(device_name, config); + // in case of compile_model(file_name), we need to clear-up core-level properties auto plugin = get_plugin(parsed._deviceName); ov::SoPtr compiled_model; @@ -384,6 +624,7 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::string& mod const ov::AnyMap& config) const { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::compile_model::from_memory"); auto parsed = parseDeviceNameIntoConfig(device_name, config); + // in case of compile_model(file_name), we need to clear-up core-level properties auto plugin = get_plugin(parsed._deviceName); ov::SoPtr compiled_model; @@ -395,7 +636,7 @@ ov::SoPtr ov::CoreImpl::compile_model(const std::string& mod auto lock = cacheGuard.get_hash_lock(cacheContent.blobId); compiled_model = load_model_from_cache(cacheContent, plugin, parsed._config, ov::RemoteContext{}, [&]() { auto cnnNetwork = read_model(model_str, weights); - return compile_model_and_cache(cnnNetwork, plugin, parsed._config, {}, cacheContent); + return compile_model_and_cache(cnnNetwork, plugin, parsed._config, ov::RemoteContext{}, cacheContent); }); } else { auto model = read_model(model_str, weights); @@ -409,7 +650,7 @@ ov::SoPtr ov::CoreImpl::import_model(std::istream& model, const ov::AnyMap& config) const { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); auto parsed = parseDeviceNameIntoConfig(device_name, config); - auto compiled_model = get_plugin(parsed._deviceName).import_model(model, config); + auto compiled_model = get_plugin(parsed._deviceName).import_model(model, parsed._config); if (auto wrapper = std::dynamic_pointer_cast(compiled_model._ptr)) { wrapper->get_executable_network()->loadedFromCache(); } @@ -422,8 +663,7 @@ ov::SupportedOpsMap ov::CoreImpl::query_model(const std::shared_ptr ov::CoreImpl::get_available_devices() const { @@ -461,56 +701,77 @@ std::vector ov::CoreImpl::get_available_devices() const { return devices; } -ov::RemoteContext ov::CoreImpl::create_context(const std::string& device_name, const AnyMap& args) const { - auto parsed = ov::parseDeviceNameIntoConfig(device_name, args); +ov::RemoteContext ov::CoreImpl::create_context(const std::string& device_name, const AnyMap& params) const { + auto parsed = ov::parseDeviceNameIntoConfig(device_name, params); return get_plugin(parsed._deviceName).create_context(parsed._config); } -ov::AnyMap ov::CoreImpl::get_supported_property(const std::string& device_name, const ov::AnyMap& config) const { - std::vector supportedConfigKeys; +ov::AnyMap ov::CoreImpl::get_supported_property(const std::string& full_device_name, + const ov::AnyMap& user_properties) const { + static const std::vector core_level_properties = { + ov::cache_dir.name(), + ov::force_tbb_terminate.name(), + // auto-batch properties are also treated as core-level + ov::auto_batch_timeout.name(), + ov::hint::allow_auto_batching.name(), + }; + + const auto flattened = ov::parseDeviceNameIntoConfig(full_device_name, user_properties); + const std::string& device_name = flattened._deviceName; + const auto& flattened_config = flattened._config; + ov::AnyMap supported_config, options; + + // fill 'options' to provide more information to ICore::get_property calls + { + auto priority_prop_name = get_device_priority_property(device_name).prop_name; + auto it = flattened_config.find(priority_prop_name); + if (it != flattened_config.end()) + options[it->first] = it->second; + else if (device_name == "HETERO") { + // TODO: remove together with API 1.0 + priority_prop_name = "TARGET_FALLBACK"; + it = flattened_config.find(priority_prop_name); + if (it != flattened_config.end()) + options[it->first] = it->second; + } else if (device_name == "BATCH") { + // TODO: remove together with API 1.0 + priority_prop_name = CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG); + it = flattened_config.find(priority_prop_name); + if (it != flattened_config.end()) + options[it->first] = it->second; + } + } + + // virtual plugins should bypass core-level properties to HW plugins + // so, we need to report them as supported + std::vector supported_config_keys = core_level_properties; // try to search against IE API 1.0' SUPPORTED_CONFIG_KEYS try { - supportedConfigKeys = GetMetric(device_name, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); + const auto supported_keys = + GetMetric(device_name, METRIC_KEY(SUPPORTED_CONFIG_KEYS), options).as>(); + for (auto&& config_key : supported_keys) { + supported_config_keys.emplace_back(config_key); + } } catch (ov::Exception&) { } // try to search against OV API 2.0' supported_properties try { - for (auto&& property : ICore::get_property(device_name, ov::supported_properties)) { + for (auto&& property : ICore::get_property(device_name, ov::supported_properties, options)) { if (property.is_mutable()) { - supportedConfigKeys.emplace_back(std::move(property)); + supported_config_keys.emplace_back(std::move(property)); } } } catch (ov::Exception&) { } - ov::AnyMap supportedConfig; - for (auto&& key : supportedConfigKeys) { - auto itKey = config.find(key); - if (config.end() != itKey) { - supportedConfig[key] = itKey->second; + for (auto&& kvp : flattened_config) { + if (util::contains(supported_config_keys, kvp.first)) { + supported_config[kvp.first] = kvp.second; } } - - for (auto&& config : config) { - auto parsed = parseDeviceNameIntoConfig(config.first); - if (device_name.find(parsed._deviceName) != std::string::npos) { - std::stringstream strm(config.second.as()); - std::map device_configs; - util::Read>{}(strm, device_configs); - for (auto&& device_config : device_configs) { - if (util::contains(supportedConfigKeys, device_config.first)) { - supportedConfig[device_config.first] = device_config.second; - } - } - for (auto&& config : parsed._config) { - supportedConfig[config.first] = config.second.as(); - } - } - } - - return supportedConfig; + return supported_config; } bool ov::CoreImpl::is_new_api() const { @@ -518,7 +779,7 @@ bool ov::CoreImpl::is_new_api() const { } ov::RemoteContext ov::CoreImpl::get_default_context(const std::string& device_name) const { - auto parsed = ov::parseDeviceNameIntoConfig(device_name, ov::AnyMap{}); + auto parsed = ov::parseDeviceNameIntoConfig(device_name); return get_plugin(parsed._deviceName).get_default_context(parsed._config); } @@ -545,6 +806,7 @@ void ov::CoreImpl::apply_auto_batching(const std::shared_ptr& m } catch (const std::runtime_error&) { return; } + // check whether the Auto-Batching is disabled explicitly const auto& batch_mode = config.find(ov::hint::allow_auto_batching.name()); if (batch_mode != config.end()) { @@ -552,26 +814,30 @@ void ov::CoreImpl::apply_auto_batching(const std::shared_ptr& m // virtual plugins like AUTO/MULTI will need the config // e.g to deduce the #requests correctly // otherwise, no need for this config key in the rest of loading - if (deviceName.find("AUTO") == std::string::npos && deviceName.find("MULTI") == std::string::npos) + if (!is_virtual_device(deviceName)) config.erase(batch_mode); if (disabled) return; } else if (!coreConfig.get_allow_auto_batch()) { + if (is_virtual_device(deviceName)) { + config[ov::hint::allow_auto_batching.name()] = coreConfig.get_allow_auto_batch(); + } return; } // check whether if the Auto-Batching is applicable to the device - auto device = ov::parseDeviceNameIntoConfig(deviceName); + auto parsed = ov::parseDeviceNameIntoConfig(deviceName); deviceNameWithoutBatch = deviceName; - auto d = device._deviceName; - std::vector metrics = - get_plugin(d).get_property(METRIC_KEY(SUPPORTED_METRICS), {}).as>(); + std::vector metrics = get_plugin(parsed._deviceName) + .get_property(METRIC_KEY(SUPPORTED_METRICS), parsed._config) + .as>(); auto it = std::find(metrics.begin(), metrics.end(), METRIC_KEY(OPTIMAL_BATCH_SIZE)); if (metrics.end() == it) return; // if applicable, the Auto-Batching is implicitly enabled via the performance hints - bool bTputInPlg = GetConfig(d, CONFIG_KEY(PERFORMANCE_HINT)).as() == CONFIG_VALUE(THROUGHPUT); + bool bTputInPlg = + GetConfig(parsed._deviceName, CONFIG_KEY(PERFORMANCE_HINT)).as() == CONFIG_VALUE(THROUGHPUT); const auto& mode = config.find(CONFIG_KEY(PERFORMANCE_HINT)); bool bTputInLoadCfg = (mode != config.end() && mode->second.as() == CONFIG_VALUE(THROUGHPUT)); const auto& excl = config.find(CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)); @@ -594,17 +860,6 @@ void ov::CoreImpl::apply_auto_batching(const std::shared_ptr& m } } -void ov::CoreImpl::clean_properties(std::string& deviceName, ov::AnyMap& config, ov::Any property) const { - // auto-batching is not applicable, if there is auto_batch_timeout, delete it - if (deviceName.find("BATCH") == std::string::npos) { - const auto& batch_timeout_mode = config.find(property.as()); - if (batch_timeout_mode != config.end()) { - if (deviceName.find("AUTO") == std::string::npos && deviceName.find("MULTI") == std::string::npos) - config.erase(batch_timeout_mode); - } - } -} - void ov::CoreImpl::set_property(const std::string& device_name, const AnyMap& properties) { OPENVINO_ASSERT(device_name.find("HETERO:") != 0, "set_property is supported only for HETERO itself (without devices). " @@ -622,11 +877,7 @@ void ov::CoreImpl::set_property(const std::string& device_name, const AnyMap& pr // unsupport to set ov::device::properties to HW device through this function auto devices = get_registered_devices(); for (auto&& config : properties) { - auto parsed = parseDeviceNameIntoConfig(config.first); - auto is_secondary_config_for_hw_device = - std::any_of(devices.begin(), devices.end(), [&](const std::string& device) { - return device == parsed._deviceName; - }); + auto is_secondary_config_for_hw_device = config.first.find(ov::device::properties.name()) != std::string::npos; OPENVINO_ASSERT(!is_secondary_config_for_hw_device, "set_property do not support ov::device::propreties. " "You can configure the devices through the compile_model()/loadNetwork() API."); @@ -650,7 +901,7 @@ ov::Any ov::CoreImpl::get_property_for_core(const std::string& name) const { ov::Any ov::CoreImpl::get_property(const std::string& device_name, const std::string& name, - const AnyMap& arguments) const { + const AnyMap& options) const { OPENVINO_ASSERT(device_name.find("HETERO:") != 0, "You can only get_property of the HETERO itself (without devices). " "get_property is also possible for the individual devices before creating the HETERO on top."); @@ -664,7 +915,7 @@ ov::Any ov::CoreImpl::get_property(const std::string& device_name, "You can only get_property of the BATCH itself (without devices). " "get_property is also possible for the individual devices before creating the BATCH on top."); - auto parsed = parseDeviceNameIntoConfig(device_name, arguments); + auto parsed = parseDeviceNameIntoConfig(device_name, options); if (parsed._deviceName.empty()) { return get_property_for_core(name); @@ -841,8 +1092,7 @@ const std::vector& ov::CoreImpl::GetExtensions() bool ov::CoreImpl::device_supports_import_export(const std::string& deviceName) const { auto parsed = parseDeviceNameIntoConfig(deviceName); - auto plugin = get_plugin(parsed._deviceName); - return device_supports_import_export(plugin); + return device_supports_import_export(get_plugin(parsed._deviceName)); } bool ov::CoreImpl::device_supports_property(const ov::Plugin& plugin, const std::string& key) const { @@ -944,22 +1194,22 @@ ov::SoPtr ov::CoreImpl::load_model_from_cache( return compiled_model; } -ov::AnyMap ov::CoreImpl::create_compile_config(const ov::Plugin& plugin, const ov::AnyMap& origConfig) const { +ov::AnyMap ov::CoreImpl::create_compile_config(const ov::Plugin& plugin, const ov::AnyMap& user_config) const { ov::AnyMap property_config; - ov::AnyMap compileConfig; + ov::AnyMap compile_config; // 0. Move TARGET_FALLBACK key to property_config - auto targetFallbackIt = origConfig.find("TARGET_FALLBACK"); - if (targetFallbackIt == origConfig.end()) { - targetFallbackIt = origConfig.find(ov::device::priorities.name()); + auto targetFallbackIt = user_config.find("TARGET_FALLBACK"); + if (targetFallbackIt == user_config.end()) { + targetFallbackIt = user_config.find(ov::device::priorities.name()); } - if (targetFallbackIt != origConfig.end()) { + if (targetFallbackIt != user_config.end()) { property_config[targetFallbackIt->first] = targetFallbackIt->second.as(); } // 1. Move DEVICE_ID key to property_config - auto deviceIt = origConfig.find(ov::device::id.name()); - if (deviceIt != origConfig.end()) { + auto deviceIt = user_config.find(ov::device::id.name()); + if (deviceIt != user_config.end()) { property_config[deviceIt->first] = deviceIt->second.as(); } else { // we likely need to extract default device_id from the plugin, @@ -969,22 +1219,23 @@ ov::AnyMap ov::CoreImpl::create_compile_config(const ov::Plugin& plugin, const o // 2. Replace DEVICE_ID with DEVICE_ARCHITECTURE value to identify device if (device_supports_property(plugin, ov::device::architecture.name())) { - compileConfig[ov::device::architecture.name()] = plugin.get_property(ov::device::architecture, property_config); + compile_config[ov::device::architecture.name()] = + plugin.get_property(ov::device::architecture, property_config); } else { // Take device name if device does not support DEVICE_ARCHITECTURE metric - compileConfig[ov::device::architecture.name()] = plugin.get_name(); + compile_config[ov::device::architecture.name()] = plugin.get_name(); } // 3. Extract config keys which affect compilation process if (device_supports_property(plugin, ov::caching_properties.name())) { auto cachingProps = plugin.get_property(ov::caching_properties); for (const auto& prop : cachingProps) { - // origConfig values have higher priority than plugin parameters - auto it = origConfig.find(prop); - compileConfig[prop] = it == origConfig.end() ? plugin.get_property(prop, property_config) : it->second; + // user_config values have higher priority than plugin parameters + auto it = user_config.find(prop); + compile_config[prop] = it == user_config.end() ? plugin.get_property(prop, property_config) : it->second; } } - return compileConfig; + return compile_config; } void ov::CoreImpl::AddExtensionUnsafe(const InferenceEngine::IExtensionPtr& extension) const { diff --git a/src/inference/src/dev/core_impl.hpp b/src/inference/src/dev/core_impl.hpp index 49c13d4f406..2277d70b9d0 100644 --- a/src/inference/src/dev/core_impl.hpp +++ b/src/inference/src/dev/core_impl.hpp @@ -31,42 +31,26 @@ namespace ov { const std::string DEFAULT_DEVICE_NAME = "DEFAULT_DEVICE"; -template struct Parsed { std::string _deviceName; - std::map _config; + AnyMap _config; }; -template -ov::Parsed parseDeviceNameIntoConfig(const std::string& deviceName, const std::map& config = {}) { - auto config_ = config; - auto deviceName_ = deviceName; - if (deviceName_.find("HETERO:") == 0) { - deviceName_ = "HETERO"; - config_["TARGET_FALLBACK"] = deviceName.substr(7); - } else if (deviceName_.find("MULTI:") == 0) { - deviceName_ = "MULTI"; - config_[InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES] = deviceName.substr(6); - } else if (deviceName == "AUTO" || deviceName.find("AUTO:") == 0) { - deviceName_ = "AUTO"; - if (deviceName.find("AUTO:") == 0) { - config_[InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES] = - deviceName.substr(std::string("AUTO:").size()); - } - } else if (deviceName_.find("BATCH:") == 0) { - deviceName_ = "BATCH"; - config_[CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG)] = deviceName.substr(6); - } else { - InferenceEngine::DeviceIDParser parser(deviceName_); - deviceName_ = parser.getDeviceName(); - std::string deviceIDLocal = parser.getDeviceID(); +Parsed parseDeviceNameIntoConfig(const std::string& deviceName, const AnyMap& config = {}); - if (!deviceIDLocal.empty()) { - config_[InferenceEngine::PluginConfigParams::KEY_DEVICE_ID] = deviceIDLocal; - } - } - return {deviceName_, config_}; -} +/** + * @brief Checks whether config is applicable for device with 'device_name' + * @code + * core.compile_model(, model, ov::device::properties(, ...)); + * @endcode + * The common logic behind this is that 'device_name_to_parse' should match 'device_name' or be more + * generic (e.g. GPU is more generic than GPU.x) + * + * @param device_name Target device + * @param device_name_to_parse Device ID of property + * @return true if ov::device::properties(, ...) is applicable for device identified by 'device_name + */ +bool is_config_applicable(const std::string& device_name, const std::string& device_name_to_parse); #ifndef OPENVINO_STATIC_LIBRARY @@ -234,8 +218,6 @@ public: std::string& deviceName, ov::AnyMap& config) const; - void clean_properties(std::string& deviceName, ov::AnyMap& config, ov::Any property) const; - #ifdef OPENVINO_STATIC_LIBRARY /** @@ -425,7 +407,7 @@ public: ov::RemoteContext create_context(const std::string& device_name, const AnyMap& args) const override; - ov::AnyMap get_supported_property(const std::string& device_name, const ov::AnyMap& config) const; + ov::AnyMap get_supported_property(const std::string& device_name, const ov::AnyMap& config) const override; bool is_new_api() const override; diff --git a/src/inference/src/dev/core_impl_ie.cpp b/src/inference/src/dev/core_impl_ie.cpp index 20c7ed35866..cbf60f265ba 100644 --- a/src/inference/src/dev/core_impl_ie.cpp +++ b/src/inference/src/dev/core_impl_ie.cpp @@ -73,11 +73,9 @@ ov::SoPtr ov::CoreImpl::LoadNetwork IE_THROW() << "Remote context is null"; } // have to deduce the device name/config from the context first - auto parsed = parseDeviceNameIntoConfig(context->getDeviceName(), config); - + auto parsed = parseDeviceNameIntoConfig(context->getDeviceName(), any_copy(config)); auto plugin = get_plugin(parsed._deviceName); - - auto res = LoadNetworkImpl(network, plugin, parsed._config, context); + auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config), context); return res; } @@ -91,9 +89,9 @@ InferenceEngine::SoExecutableNetworkInternal ov::CoreImpl::LoadNetwork( compile_model(ov::legacy_convert::convert_model(network, isNewAPI()), deviceName, any_copy(config)); return {ov::legacy_convert::convert_compiled_model(compiled_model._ptr), compiled_model._so}; } - auto parsed = parseDeviceNameIntoConfig(deviceName, config); + auto parsed = parseDeviceNameIntoConfig(deviceName, any_copy(config)); auto plugin = get_plugin(parsed._deviceName); - auto res = LoadNetworkImpl(network, plugin, parsed._config, nullptr); + auto res = LoadNetworkImpl(network, plugin, any_copy(parsed._config), nullptr); return {res._ptr, res._so}; } @@ -188,11 +186,7 @@ ov::Any ov::CoreImpl::GetMetric(const std::string& deviceName, } } - auto parsed = parseDeviceNameIntoConfig(deviceName); - for (auto o : options) { - parsed._config.insert(o); - } - + auto parsed = parseDeviceNameIntoConfig(deviceName, options); return get_plugin(parsed._deviceName).get_property(name, parsed._config); } @@ -225,43 +219,7 @@ bool ov::CoreImpl::DeviceSupportsImportExport(const std::string& deviceName) con std::map ov::CoreImpl::GetSupportedConfig(const std::string& deviceName, const std::map& configs) { - std::vector supportedConfigKeys; - try { - supportedConfigKeys = GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); - } catch (ov::Exception&) { - } - try { - for (auto&& property : ICore::get_property(deviceName, ov::supported_properties)) { - if (property.is_mutable()) { - supportedConfigKeys.emplace_back(std::move(property)); - } - } - } catch (ov::Exception&) { - } - std::map supportedConfig; - for (auto&& key : supportedConfigKeys) { - auto itKey = configs.find(key); - if (configs.end() != itKey) { - supportedConfig[key] = itKey->second; - } - } - for (auto&& config : configs) { - auto parsed = parseDeviceNameIntoConfig(config.first); - if (deviceName.find(parsed._deviceName) != std::string::npos) { - std::stringstream strm(config.second); - std::map device_configs; - util::Read>{}(strm, device_configs); - for (auto&& device_config : device_configs) { - if (ov::util::contains(supportedConfigKeys, device_config.first)) { - supportedConfig[device_config.first] = device_config.second; - } - } - for (auto&& config : parsed._config) { - supportedConfig[config.first] = config.second.as(); - } - } - } - return supportedConfig; + return ov::any_copy(get_supported_property(deviceName, any_copy(configs))); } std::map ov::CoreImpl::GetVersions(const std::string& deviceName) const { diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp index 66398306e1e..799e284f58b 100644 --- a/src/inference/src/ie_core.cpp +++ b/src/inference/src/ie_core.cpp @@ -203,11 +203,11 @@ ExecutableNetwork Core::ImportNetwork(const std::string& modelFileName, const std::string& deviceName, const std::map& config) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::ImportNetwork"); - auto parsed = ov::parseDeviceNameIntoConfig(deviceName, config); + auto parsed = ov::parseDeviceNameIntoConfig(deviceName, ov::any_copy(config)); std::ifstream modelStream(modelFileName, std::ios::binary); if (!modelStream.is_open()) IE_THROW(NetworkNotRead) << "Model file " << modelFileName << " cannot be opened!"; - auto exec = _impl->get_plugin(parsed._deviceName).import_model(modelStream, ov::any_copy(parsed._config)); + auto exec = _impl->get_plugin(parsed._deviceName).import_model(modelStream, parsed._config); return {ov::legacy_convert::convert_compiled_model(exec._ptr), exec._so}; } @@ -254,11 +254,11 @@ ExecutableNetwork Core::ImportNetwork(std::istream& networkModel, DeviceIDParser device(deviceName_); std::string deviceName = device.getDeviceName(); - auto parsed = ov::parseDeviceNameIntoConfig(deviceName, config); + auto parsed = ov::parseDeviceNameIntoConfig(deviceName, ov::any_copy(config)); auto exec = _impl->get_plugin(deviceName) .import_model(networkModel, ov::RemoteContext{ov::legacy_convert::convert_remote_context(context), {}}, - ov::any_copy(parsed._config)); + parsed._config); return {ov::legacy_convert::convert_compiled_model(exec._ptr), exec._so}; } diff --git a/src/inference/tests/unit/core.cpp b/src/inference/tests/unit/core.cpp index ac3744762f4..673661cbf18 100644 --- a/src/inference/tests/unit/core.cpp +++ b/src/inference/tests/unit/core.cpp @@ -130,3 +130,254 @@ TEST(CoreTests_get_plugin_path, Use_filename_as_is_if_not_exist_in_workdir) { auto ref_path = FileUtils::makePluginLibraryName({}, std::string(lib_name)); EXPECT_STREQ(abs_path.c_str(), ref_path.c_str()); } + +TEST(CoreTests_check_device_name, is_config_applicable) { + // Single device + ASSERT_EQ(ov::is_config_applicable("DEVICE", "DEVICE"), true); + ASSERT_EQ(ov::is_config_applicable("DEVICE.", "DEVICE"), true); + ASSERT_EQ(ov::is_config_applicable("DEVICE", "DEVICE."), true); + ASSERT_EQ(ov::is_config_applicable("DEVICE.x", "DEVICE"), true); + ASSERT_EQ(ov::is_config_applicable("DEVICE.x.y", "DEVICE"), true); + ASSERT_EQ(ov::is_config_applicable("DEVICE.x", "DEVICE.x"), true); + ASSERT_EQ(ov::is_config_applicable("DEVICE.x.y", "DEVICE.x"), true); // sub-device and device + ASSERT_EQ(ov::is_config_applicable("DEVICE", "DEVICE.x"), false); + ASSERT_EQ(ov::is_config_applicable("DEVICE.x", "DEVICE.y"), false); + ASSERT_EQ(ov::is_config_applicable("DEVICE.x.y", "DEVICE.y"), false); + // HETERO + ASSERT_EQ(ov::is_config_applicable("HETERO", "HETERO"), true); + ASSERT_EQ(ov::is_config_applicable("HETERO.", "HETERO"), true); + ASSERT_EQ(ov::is_config_applicable("HETERO", "HETERO."), true); + ASSERT_EQ(ov::is_config_applicable("HETERO:DEVICE", "HETERO:DEVICE"), true); + ASSERT_EQ(ov::is_config_applicable("HETERO:DEVICE.x", "HETERO:DEVICE.x"), true); + ASSERT_EQ(ov::is_config_applicable("HETERO:DEVICE", "HETERO"), true); + ASSERT_EQ(ov::is_config_applicable("HETERO:DEVICE.x", "HETERO"), true); + ASSERT_EQ(ov::is_config_applicable("HETERO:DEVICE.x,DEVICE.y", "HETERO:DEVICE.x,DEVICE.y"), true); + ASSERT_EQ(ov::is_config_applicable("HETERO:DEVICE.x", "HETERO:DEVICE.x,DEVICE.y"), false); + ASSERT_EQ(ov::is_config_applicable("HETERO:DEVICE.x,DEVICE.y", "HETERO:DEVICE.x"), false); + ASSERT_EQ(ov::is_config_applicable("HETERO:DEVICE", "HETERO:DEVICE.x"), false); + ASSERT_EQ(ov::is_config_applicable("HETERO", "HETERO:DEVICE"), false); + // MULTI + ASSERT_EQ(ov::is_config_applicable("MULTI", "MULTI"), true); + ASSERT_EQ(ov::is_config_applicable("MULTI.", "MULTI"), true); + ASSERT_EQ(ov::is_config_applicable("MULTI", "MULTI."), true); + ASSERT_EQ(ov::is_config_applicable("MULTI:DEVICE", "MULTI:DEVICE"), true); + ASSERT_EQ(ov::is_config_applicable("MULTI:DEVICE.x", "MULTI:DEVICE.x"), true); + ASSERT_EQ(ov::is_config_applicable("MULTI:DEVICE", "MULTI"), true); + ASSERT_EQ(ov::is_config_applicable("MULTI:DEVICE.x", "MULTI"), true); + ASSERT_EQ(ov::is_config_applicable("MULTI:DEVICE.x,DEVICE.y", "MULTI:DEVICE.x,DEVICE.y"), true); + ASSERT_EQ(ov::is_config_applicable("MULTI:DEVICE.x", "MULTI:DEVICE.x,DEVICE.y"), false); + ASSERT_EQ(ov::is_config_applicable("MULTI:DEVICE.x,DEVICE.y", "MULTI:DEVICE.x"), false); + ASSERT_EQ(ov::is_config_applicable("MULTI:DEVICE", "MULTI:DEVICE.x"), false); + ASSERT_EQ(ov::is_config_applicable("MULTI", "MULTI:DEVICE"), false); + // AUTO + ASSERT_EQ(ov::is_config_applicable("AUTO", "AUTO"), true); + ASSERT_EQ(ov::is_config_applicable("AUTO.", "AUTO"), true); + ASSERT_EQ(ov::is_config_applicable("AUTO", "AUTO."), true); + ASSERT_EQ(ov::is_config_applicable("AUTO:DEVICE", "AUTO:DEVICE"), true); + ASSERT_EQ(ov::is_config_applicable("AUTO:DEVICE.x", "AUTO:DEVICE.x"), true); + ASSERT_EQ(ov::is_config_applicable("AUTO:DEVICE.x,DEVICE.y", "AUTO:DEVICE.x,DEVICE.y"), true); + ASSERT_EQ(ov::is_config_applicable("AUTO:DEVICE", "AUTO"), true); + ASSERT_EQ(ov::is_config_applicable("AUTO:DEVICE.x", "AUTO"), true); + ASSERT_EQ(ov::is_config_applicable("AUTO:DEVICE.x", "AUTO:DEVICE.x,DEVICE.y"), false); + ASSERT_EQ(ov::is_config_applicable("AUTO:DEVICE.x,DEVICE.y", "AUTO:DEVICE.x"), false); + ASSERT_EQ(ov::is_config_applicable("AUTO:DEVICE", "AUTO:DEVICE.x"), false); + ASSERT_EQ(ov::is_config_applicable("AUTO", "AUTO:DEVICE"), false); + // BATCH + ASSERT_EQ(ov::is_config_applicable("BATCH", "BATCH"), true); + ASSERT_EQ(ov::is_config_applicable("BATCH.", "BATCH"), true); + ASSERT_EQ(ov::is_config_applicable("BATCH", "BATCH."), true); + ASSERT_EQ(ov::is_config_applicable("BATCH:DEVICE", "BATCH:DEVICE"), true); + ASSERT_EQ(ov::is_config_applicable("BATCH:DEVICE.x", "BATCH:DEVICE.x"), true); + ASSERT_EQ(ov::is_config_applicable("BATCH:DEVICE", "BATCH"), true); + ASSERT_EQ(ov::is_config_applicable("BATCH:DEVICE.x", "BATCH"), true); + ASSERT_EQ(ov::is_config_applicable("BATCH:DEVICE.x", "BATCH:DEVICE.x,DEVICE.y"), false); + ASSERT_EQ(ov::is_config_applicable("BATCH:DEVICE.x,DEVICE.y", "BATCH:DEVICE.x"), false); + ASSERT_EQ(ov::is_config_applicable("BATCH:DEVICE.x", "BATCH:DEVICE.y"), false); + ASSERT_EQ(ov::is_config_applicable("BATCH", "BATCH:DEVICE"), false); +} + +TEST(CoreTests_parse_device_config, get_device_config) { + auto check_parsed_config = [&](const std::string& device, + const ov::AnyMap& config, + const std::string& expected_device, + const ov::AnyMap& expected_config) { + auto parsed = ov::parseDeviceNameIntoConfig(device, config); + ASSERT_EQ(parsed._deviceName, expected_device); + ASSERT_EQ(ov::Any(parsed._config).as(), ov::Any(expected_config).as()); + }; + // Single device + check_parsed_config("DEVICE.0", ov::AnyMap{}, "DEVICE", ov::AnyMap{ov::device::id("0")}); + // simple flattening + check_parsed_config("DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR))}, + "DEVICE", + ov::AnyMap{ov::log::level(ov::log::Level::ERR)}); + // sub-property has flattened, property is kept as is, device_id is moved to property + check_parsed_config( + "DEVICE.X", + ov::AnyMap{ov::num_streams(5), ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR))}, + "DEVICE", + ov::AnyMap{ov::device::id("X"), ov::num_streams(5), ov::log::level(ov::log::Level::ERR)}); + // explicit device sub-property has higher priority than ov::AnyMap + check_parsed_config( + "DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "DEVICE", + ov::AnyMap{ov::log::level(ov::log::Level::ERR)}); + // property always has higher priority than sub-property + check_parsed_config( + "DEVICE", + ov::AnyMap{ov::log::level(ov::log::Level::DEBUG), + ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "DEVICE", + ov::AnyMap{ov::log::level(ov::log::Level::ERR)}); + // DEVICE.X is not applicable for DEVICE + check_parsed_config( + "DEVICE", + ov::AnyMap{ov::device::properties("DEVICE.X", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "DEVICE", + ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}); + // properties for another device (for example, MULTI) are dropped + check_parsed_config("DEVICE", + ov::AnyMap{ov::device::properties("MULTI", ov::log::level(ov::log::Level::ERR))}, + "DEVICE", + ov::AnyMap{}); + + check_parsed_config("DEVICE.0", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties( + ov::AnyMap{{"DEVICE.0", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "DEVICE", + ov::AnyMap{ov::device::id(0), ov::log::level(ov::log::Level::WARNING)}); + check_parsed_config("DEVICE.0.1", + ov::AnyMap{ov::device::properties("DEVICE.0.1", ov::log::level(ov::log::Level::INFO)), + ov::device::properties( + ov::AnyMap{{"DEVICE.0", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "DEVICE", + ov::AnyMap{ov::device::id("0.1"), ov::log::level(ov::log::Level::INFO)}); + + // device ID mismatch + EXPECT_THROW(ov::parseDeviceNameIntoConfig("DEVICE.X", ov::AnyMap{ov::device::id("Y")}), + InferenceEngine::Exception); + + // HETERO + check_parsed_config("HETERO:DEVICE", ov::AnyMap{}, "HETERO", ov::AnyMap{ov::device::priorities("DEVICE")}); + check_parsed_config( + "HETERO:DEVICE", + ov::AnyMap{ov::device::properties("ANOTHER_DEVICE", ov::log::level(ov::log::Level::ERR))}, + "HETERO", + ov::AnyMap{ + ov::device::priorities("DEVICE"), + ov::device::properties(ov::AnyMap{{"ANOTHER_DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + check_parsed_config( + "HETERO:DEVICE", + ov::AnyMap{ov::device::properties("HETERO", ov::log::level(ov::log::Level::WARNING)), + ov::device::properties("ANOTHER_DEVICE", ov::log::level(ov::log::Level::ERR))}, + "HETERO", + ov::AnyMap{ + ov::device::priorities("DEVICE"), + ov::log::level(ov::log::Level::WARNING), + ov::device::properties(ov::AnyMap{{"ANOTHER_DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + check_parsed_config( + "HETERO:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::num_streams(5)}}})}, + "HETERO", + ov::AnyMap{ov::device::priorities("DEVICE"), + ov::device::properties( + ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR), ov::num_streams(5)}}})}); + check_parsed_config( + "HETERO:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "HETERO", + ov::AnyMap{ov::device::priorities("DEVICE"), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + // device priorities mismatch + EXPECT_THROW(ov::parseDeviceNameIntoConfig("HETERO:DEVICE", ov::AnyMap{ov::device::priorities("ANOTHER_DEVICE")}), + InferenceEngine::Exception); + + // MULTI + check_parsed_config("MULTI:DEVICE", ov::AnyMap{}, "MULTI", ov::AnyMap{ov::device::priorities("DEVICE")}); + check_parsed_config( + "MULTI:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR))}, + "MULTI", + ov::AnyMap{ov::device::priorities("DEVICE"), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + check_parsed_config( + "MULTI:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::num_streams(5)}}})}, + "MULTI", + ov::AnyMap{ov::device::priorities("DEVICE"), + ov::device::properties( + ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR), ov::num_streams(5)}}})}); + check_parsed_config( + "MULTI:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "MULTI", + ov::AnyMap{ov::device::priorities("DEVICE"), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + + // AUTO + check_parsed_config("AUTO:DEVICE", ov::AnyMap{}, "AUTO", ov::AnyMap{ov::device::priorities("DEVICE")}); + check_parsed_config( + "AUTO:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR))}, + "AUTO", + ov::AnyMap{ov::device::priorities("DEVICE"), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + check_parsed_config( + "AUTO:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::num_streams(5)}}})}, + "AUTO", + ov::AnyMap{ov::device::priorities("DEVICE"), + ov::device::properties( + ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR), ov::num_streams(5)}}})}); + check_parsed_config( + "AUTO:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "AUTO", + ov::AnyMap{ov::device::priorities("DEVICE"), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + + // BATCH + check_parsed_config("BATCH:DEVICE", ov::AnyMap{}, "BATCH", ov::AnyMap{{ov::device::priorities.name(), "DEVICE"}}); + check_parsed_config( + "BATCH:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR))}, + "BATCH", + ov::AnyMap{std::make_pair(ov::device::priorities.name(), "DEVICE"), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + check_parsed_config( + "BATCH:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::num_streams(5)}}})}, + "BATCH", + ov::AnyMap{std::make_pair(ov::device::priorities.name(), "DEVICE"), + ov::device::properties( + ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR), ov::num_streams(5)}}})}); + check_parsed_config( + "BATCH:DEVICE", + ov::AnyMap{ov::device::properties("DEVICE", ov::log::level(ov::log::Level::ERR)), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::WARNING)}}})}, + "BATCH", + ov::AnyMap{std::make_pair(ov::device::priorities.name(), "DEVICE"), + ov::device::properties(ov::AnyMap{{"DEVICE", ov::AnyMap{ov::log::level(ov::log::Level::ERR)}}})}); + + // MIX + check_parsed_config( + "HETERO", + ov::AnyMap{ov::device::properties("HETERO", ov::device::priorities("MULTI,DEVICE")), + ov::device::properties("MULTI", ov::device::priorities("DEVICE"))}, + "HETERO", + ov::AnyMap{ov::device::priorities("MULTI,DEVICE"), + ov::device::properties(ov::AnyMap{{"MULTI", ov::AnyMap{ov::device::priorities("DEVICE")}}})}); +} diff --git a/src/plugins/auto/auto_executable_network.cpp b/src/plugins/auto/auto_executable_network.cpp index 2d2d037d0cd..48de040c398 100644 --- a/src/plugins/auto/auto_executable_network.cpp +++ b/src/plugins/auto/auto_executable_network.cpp @@ -45,6 +45,7 @@ IE::Parameter AutoExecutableNetwork::GetMetric(const std::string& name) const { ov::PropertyName{ov::optimal_number_of_infer_requests.name(), ov::PropertyMutability::RO}, ov::PropertyName{ov::hint::model_priority.name(), ov::PropertyMutability::RO}, ov::PropertyName{ov::device::priorities.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::device::properties.name(), ov::PropertyMutability::RO}, ov::PropertyName{ov::execution_devices.name(), ov::PropertyMutability::RO}}; } else if (name == ov::hint::performance_mode) { auto value = _autoSContext->_performanceHint; @@ -61,6 +62,22 @@ IE::Parameter AutoExecutableNetwork::GetMetric(const std::string& name) const { } else if (name == ov::device::priorities) { auto value = _autoSContext->_config.find(ov::device::priorities.name()); return decltype(ov::device::priorities)::value_type {value->second.as()}; + } else if (name == ov::device::properties) { + ov::AnyMap all_devices = {}; + if (_autoSchedule->_loadContext[ACTUALDEVICE].isAlready) { + ov::AnyMap device_properties = {}; + auto& context = _autoSchedule->_loadContext[ACTUALDEVICE]; + auto device_supported_metrics = context.executableNetwork->GetMetric(METRIC_KEY(SUPPORTED_METRICS)); + for (auto&& property_name : device_supported_metrics.as>()) { + device_properties[property_name] = context.executableNetwork->GetMetric(property_name); + } + auto device_supported_configs = context.executableNetwork->GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + for (auto&& property_name : device_supported_configs.as>()) { + device_properties[property_name] = context.executableNetwork->GetConfig(property_name); + } + all_devices[context.deviceInfo.deviceName] = device_properties; + } + return all_devices; } else if (name == ov::hint::model_priority) { auto value = _autoSContext->_modelPriority; if (_autoSContext->_core->isNewAPI()) { diff --git a/src/plugins/auto/multi_executable_network.cpp b/src/plugins/auto/multi_executable_network.cpp index 131ea0d8fe1..b1a71b41f99 100644 --- a/src/plugins/auto/multi_executable_network.cpp +++ b/src/plugins/auto/multi_executable_network.cpp @@ -103,8 +103,24 @@ IE::Parameter MultiExecutableNetwork::GetMetric(const std::string& name) const { // Configs // device priority can be changed on-the-fly in MULTI ov::PropertyName{ov::device::priorities.name(), ov::PropertyMutability::RW}, + ov::PropertyName{ov::device::properties.name(), ov::PropertyMutability::RO}, ov::PropertyName{ov::execution_devices.name(), ov::PropertyMutability::RO} }; + } else if (name == ov::device::properties) { + ov::AnyMap all_devices = {}; + for (auto network : _multiSContext->_networksPerDevice) { + ov::AnyMap device_properties = {}; + auto device_supported_metrics = network.second->GetMetric(METRIC_KEY(SUPPORTED_METRICS)); + for (auto&& property_name : device_supported_metrics.as>()) { + device_properties[property_name] = network.second->GetMetric(property_name);; + } + auto device_supported_configs = network.second->GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + for (auto&& property_name : device_supported_configs.as>()) { + device_properties[property_name] = network.second->GetConfig(property_name); + } + all_devices[network.first] = device_properties; + } + return all_devices; } else if (name == ov::optimal_number_of_infer_requests) { unsigned int res = 0u; for (auto n : _multiSContext->_networksPerDevice) { diff --git a/src/plugins/auto/plugin.cpp b/src/plugins/auto/plugin.cpp index 5996e090bd3..641de18db3c 100644 --- a/src/plugins/auto/plugin.cpp +++ b/src/plugins/auto/plugin.cpp @@ -135,17 +135,8 @@ std::vector MultiDeviceInferencePlugin::ParseMetaDevices(cons }; auto getDeviceConfig = [&] (const DeviceName & deviceWithID) { - DeviceIDParser deviceParser(deviceWithID); - std::string deviceName = deviceParser.getDeviceName(); - std::map tconfig = config; - - // set device ID if any - std::string deviceIDLocal = deviceParser.getDeviceID(); - if (!deviceIDLocal.empty()) { - tconfig[PluginConfigParams::KEY_DEVICE_ID] = deviceIDLocal; - } - auto deviceConfig = GetCore()->GetSupportedConfig(deviceName, tconfig); - setDefaultHint(deviceName, deviceConfig, tconfig); + auto deviceConfig = GetCore()->GetSupportedConfig(deviceWithID, config); + setDefaultHint(deviceWithID, deviceConfig, config); return deviceConfig; }; @@ -370,7 +361,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons } } // updateFromMap will check config valid - loadConfig.set_user_property(PreProcessConfig(config), workModeAuto? true : false); + loadConfig.set_user_property(PreProcessConfig(config), workModeAuto); loadConfig.apply_user_properties(); auto fullProperty = loadConfig.get_full_properties(); // this can be updated when plugin switch to 2.0 API @@ -426,7 +417,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons // filter the device that supports filter configure auto metaDevices = ParseMetaDevices(strDevices, fullConfig); auto supportDevicesByConfig = FilterDevice(metaDevices, filterConfig); - if (supportDevicesByConfig.size() == 0) { + if (supportDevicesByConfig.empty()) { IE_THROW() << "There is no device support the configure"; } auto supportDevices = supportDevicesByConfig; @@ -680,7 +671,9 @@ QueryNetworkResult MultiDeviceInferencePlugin::QueryNetwork(const CNNNetwork& queryconfig.apply_user_properties(); auto fullproperty = queryconfig.get_full_properties(); // this can be updated when plugin switch to 2.0 API - std::map fullConfig = ConvertToStringMap(fullproperty);; + std::map fullConfig = ConvertToStringMap(fullproperty); + if (!queryconfig.is_set_by_user(ov::cache_dir)) + fullConfig.erase(ov::cache_dir.name()); auto priorities = fullConfig.find(ov::device::priorities.name()); if (!priorities->second.empty()) { auto metaDevices = ParseMetaDevices(priorities->second, fullConfig); diff --git a/src/plugins/auto/plugin.hpp b/src/plugins/auto/plugin.hpp index 6531d847a68..cf796e97745 100644 --- a/src/plugins/auto/plugin.hpp +++ b/src/plugins/auto/plugin.hpp @@ -58,9 +58,6 @@ public: void RegisterPriority(const unsigned int& priority, const std::string& deviceName); protected: - std::map GetSupportedConfig(const std::map& config, - const MultiDevicePlugin::DeviceName & deviceName) const; - ov::AnyMap PreProcessConfig(const std::map& orig_config) const; private: diff --git a/src/plugins/auto/plugin_config.cpp b/src/plugins/auto/plugin_config.cpp index a9000e51eee..b344585a06c 100644 --- a/src/plugins/auto/plugin_config.cpp +++ b/src/plugins/auto/plugin_config.cpp @@ -9,9 +9,7 @@ const std::set PluginConfig::_availableDevices = {"AUTO", "CPU", "G PluginConfig::PluginConfig() { set_default(); device_property_validator = std::dynamic_pointer_cast(std::make_shared([](const ov::Any& target) -> bool { - auto deviceName = target.as(); - return _availableDevices.end() != std::find(_availableDevices.begin(), _availableDevices.end(), - DeviceIDParser(deviceName).getDeviceName()); + return (target.as().find(ov::device::properties.name()) != std::string::npos); })); } @@ -60,13 +58,13 @@ void PluginConfig::set_property(const ov::AnyMap& properties) { // when user call set_property to set some config to plugin, we also respect this and pass through the config in this case user_properties[name] = val; } else { - OPENVINO_ASSERT(false, "property:", name, ": not supported"); + OPENVINO_ASSERT(false, "property: ", name, ": not supported"); } } } ov::Any PluginConfig::get_property(const std::string& name) const { - OPENVINO_ASSERT(internal_properties.find(name) != internal_properties.end(), "[AUTO]", "not supported property ", name); + OPENVINO_ASSERT(internal_properties.find(name) != internal_properties.end(), "[AUTO]", " not supported property ", name); return internal_properties.at(name); } @@ -98,7 +96,7 @@ void PluginConfig::set_user_property(const ov::AnyMap& config, bool checkfirstle } else if (!checkfirstlevel) { // for multi, accept it anyway when compiled model user_properties[kv.first] = kv.second; } else { - OPENVINO_ASSERT(false, "property", name, ": not supported"); + OPENVINO_ASSERT(false, "property ", name, ": not supported"); } } } diff --git a/src/plugins/auto_batch/src/auto_batch.cpp b/src/plugins/auto_batch/src/auto_batch.cpp index 1e8c40104fc..5a35ee4385b 100644 --- a/src/plugins/auto_batch/src/auto_batch.cpp +++ b/src/plugins/auto_batch/src/auto_batch.cpp @@ -28,6 +28,7 @@ namespace AutoBatchPlugin { using namespace InferenceEngine; std::vector supported_configKeys = {CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG), + ov::device::priorities.name(), CONFIG_KEY(AUTO_BATCH_TIMEOUT), CONFIG_KEY(CACHE_DIR)}; @@ -694,14 +695,8 @@ DeviceInformation AutoBatchInferencePlugin::ParseMetaDevice(const std::string& d DeviceIDParser deviceParser(deviceWithID); std::string deviceName = deviceParser.getDeviceName(); std::map tconfig = mergeConfigs(_config, config); - - // set device ID if any - std::string deviceIDLocal = deviceParser.getDeviceID(); - if (!deviceIDLocal.empty()) { - tconfig[PluginConfigParams::KEY_DEVICE_ID] = deviceIDLocal; - } // passthrough the cache dir to core->loadnetwork when underlying device does not support cache dir - auto deviceConfig = GetCore()->GetSupportedConfig(deviceName, tconfig); + auto deviceConfig = GetCore()->GetSupportedConfig(deviceWithID, tconfig); if (tconfig.find(CONFIG_KEY(CACHE_DIR)) != tconfig.end() && deviceConfig.find(CONFIG_KEY(CACHE_DIR)) == deviceConfig.end()) { auto tmpiter = tconfig.find(CONFIG_KEY(CACHE_DIR)); @@ -730,6 +725,8 @@ DeviceInformation AutoBatchInferencePlugin::ParseMetaDevice(const std::string& d RemoteContext::Ptr AutoBatchInferencePlugin::CreateContext(const InferenceEngine::ParamMap& config) { auto cfg = config; auto it = cfg.find(CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG)); + if (it == cfg.end()) + it = cfg.find(ov::device::priorities.name()); if (it == cfg.end()) IE_THROW() << "Value for KEY_AUTO_BATCH_DEVICE_CONFIG is not set"; @@ -762,7 +759,7 @@ void AutoBatchInferencePlugin::CheckConfig(const std::mapQueryNetwork(network, metaDevice.deviceName, cfg); } } - IE_THROW() << "Value for KEY_AUTO_BATCH is not set"; + IE_THROW() << "Value for KEY_AUTO_BATCH_DEVICE_CONFIG is not set"; } } // namespace AutoBatchPlugin diff --git a/src/plugins/auto_batch/src/auto_batch.hpp b/src/plugins/auto_batch/src/auto_batch.hpp index b57e0aa0fee..5978a9f0fe8 100644 --- a/src/plugins/auto_batch/src/auto_batch.hpp +++ b/src/plugins/auto_batch/src/auto_batch.hpp @@ -178,8 +178,6 @@ protected: DeviceInformation ParseMetaDevice(const std::string& devicesBatchCfg, const std::map& config) const; - std::map GetSupportedConfig(const std::map& config, - const DeviceName& deviceName) const; static DeviceInformation ParseBatchDevice(const std::string& deviceWithBatch); InferenceEngine::IExecutableNetworkInternal::Ptr LoadNetworkImpl( diff --git a/src/plugins/auto_batch/tests/unit/plugins_tests.cpp b/src/plugins/auto_batch/tests/unit/plugins_tests.cpp index f1534845bb4..3c7fd47a1ed 100644 --- a/src/plugins/auto_batch/tests/unit/plugins_tests.cpp +++ b/src/plugins/auto_batch/tests/unit/plugins_tests.cpp @@ -323,7 +323,7 @@ TEST_P(PluginMetricTest, GetPluginMetricTest) { } const char supported_metric[] = "SUPPORTED_METRICS FULL_DEVICE_NAME SUPPORTED_CONFIG_KEYS"; -const char supported_config_keys[] = "AUTO_BATCH_DEVICE_CONFIG AUTO_BATCH_TIMEOUT CACHE_DIR"; +const char supported_config_keys[] = "AUTO_BATCH_DEVICE_CONFIG MULTI_DEVICE_PRIORITIES AUTO_BATCH_TIMEOUT CACHE_DIR"; const std::vector batchDeviceTestConfigs = { BatchDeviceConfigParams{"CPU(4)", "CPU", 4, false}, diff --git a/src/plugins/hetero/executable_network.cpp b/src/plugins/hetero/executable_network.cpp index 989cf8ad3d4..08bc69bc041 100644 --- a/src/plugins/hetero/executable_network.cpp +++ b/src/plugins/hetero/executable_network.cpp @@ -92,16 +92,7 @@ HeteroExecutableNetwork::HeteroExecutableNetwork(const InferenceEngine::CNNNetwo } if (queryNetworkResult.supportedLayersMap.empty()) { - auto it = _config.find("TARGET_FALLBACK"); - if (it == _config.end()) { - it = _config.find(ov::device::priorities.name()); - } - if (it != _config.end()) { - queryNetworkResult = _heteroPlugin->QueryNetwork(network, _config); - } else { - IE_THROW() << "The '" << ov::device::priorities.name() - << "' option was not defined for heterogeneous plugin"; - } + queryNetworkResult = _heteroPlugin->QueryNetwork(network, _config); } using Input = ngraph::Input; @@ -771,15 +762,7 @@ IInferRequestInternal::Ptr HeteroExecutableNetwork::CreateInferRequest() { InferenceEngine::Parameter HeteroExecutableNetwork::GetConfig(const std::string& name) const { InferenceEngine::Parameter result; if (name == "TARGET_FALLBACK" || name == ov::device::priorities.name()) { - auto it = _config.find("TARGET_FALLBACK"); - if (it == _config.end()) { - it = _config.find(ov::device::priorities.name()); - } - if (it != _config.end()) { - result = it->second; - } else { - result = std::string{}; - } + result = _heteroPlugin->GetTargetFallback(_config, false); } else if (name == HETERO_CONFIG_KEY(DUMP_GRAPH_DOT) || name == CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)) { auto it = _config.find(name); IE_ASSERT(it != _config.end()); @@ -792,7 +775,15 @@ InferenceEngine::Parameter HeteroExecutableNetwork::GetConfig(const std::string& } InferenceEngine::Parameter HeteroExecutableNetwork::GetMetric(const std::string& name) const { - if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { + if (ov::supported_properties == name) { + return decltype(ov::supported_properties)::value_type{ + ov::PropertyName{ov::supported_properties.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::model_name.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::optimal_number_of_infer_requests.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::execution_devices.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::device::properties.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::device::priorities.name(), ov::PropertyMutability::RO}}; + } else if (EXEC_NETWORK_METRIC_KEY(SUPPORTED_METRICS) == name) { std::vector heteroMetrics = {ov::model_name.name(), METRIC_KEY(SUPPORTED_METRICS), METRIC_KEY(SUPPORTED_CONFIG_KEYS), @@ -805,6 +796,23 @@ InferenceEngine::Parameter HeteroExecutableNetwork::GetMetric(const std::string& HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS)}; IE_SET_METRIC_RETURN(SUPPORTED_CONFIG_KEYS, heteroConfigKeys); + } else if (ov::device::properties == name) { + ov::AnyMap all_devices = {}; + for (auto&& subnetwork : _networks) { + ov::AnyMap device_properties = {}; + if (all_devices.count(subnetwork._device) == 0) { + auto device_supported_metrics = subnetwork._network->GetMetric(METRIC_KEY(SUPPORTED_METRICS)); + for (auto&& property_name : device_supported_metrics.as>()) { + device_properties[property_name] = subnetwork._network->GetMetric(property_name); + } + auto device_supported_configs = subnetwork._network->GetMetric(METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + for (auto&& property_name : device_supported_configs.as>()) { + device_properties[property_name] = subnetwork._network->GetConfig(property_name); + } + all_devices[subnetwork._device] = device_properties; + } + } + return all_devices; } else if (ov::model_name == name) { return decltype(ov::model_name)::value_type{_name}; } else if (ov::optimal_number_of_infer_requests == name) { diff --git a/src/plugins/hetero/plugin.cpp b/src/plugins/hetero/plugin.cpp index 55776fc4c6e..9152f7d8161 100644 --- a/src/plugins/hetero/plugin.cpp +++ b/src/plugins/hetero/plugin.cpp @@ -14,8 +14,8 @@ #include #include "ie_plugin_config.hpp" #include "executable_network.hpp" -#include -#include +#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" +#include "openvino/runtime/properties.hpp" // clang-format on using namespace InferenceEngine; @@ -38,6 +38,13 @@ Engine::Configs mergeConfigs(Engine::Configs config, const Engine::Configs& loca return config; } +Engine::Configs mergeConfigs(Engine::Configs config, const ov::AnyMap& local) { + for (auto&& kvp : local) { + config[kvp.first] = kvp.second.as(); + } + return config; +} + const std::vector& getSupportedConfigKeys() { static const std::vector supported_configKeys = {HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), "TARGET_FALLBACK", @@ -49,20 +56,28 @@ const std::vector& getSupportedConfigKeys() { } // namespace +std::string Engine::GetTargetFallback(const Engine::Configs& config, bool raise_exception) const { + auto it = config.find("TARGET_FALLBACK"); + if (it == config.end()) { + it = config.find(ov::device::priorities.name()); + } + if (it == config.end()) { + if (raise_exception) + IE_THROW() << "The '" << ov::device::priorities.name() + << "' option was not defined for heterogeneous plugin"; + return std::string(""); + } + return it->second; +} + InferenceEngine::IExecutableNetworkInternal::Ptr Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork& network, const Configs& config) { if (GetCore() == nullptr) { IE_THROW() << "Please, work with HETERO device via InferencEngine::Core object"; } auto tconfig = mergeConfigs(_config, config); - auto it = tconfig.find("TARGET_FALLBACK"); - if (it == tconfig.end()) { - it = tconfig.find(ov::device::priorities.name()); - } - if (it == tconfig.end()) { - IE_THROW() << "The '" << ov::device::priorities.name() << "' option was not defined for heterogeneous plugin"; - } - DeviceMetaInformationMap metaDevices = GetDevicePlugins(it->second, tconfig); + std::string fallbackDevicesStr = GetTargetFallback(tconfig); + DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, tconfig); auto function = network.getFunction(); if (function == nullptr) { @@ -80,26 +95,12 @@ InferenceEngine::IExecutableNetworkInternal::Ptr Engine::ImportNetwork( Engine::DeviceMetaInformationMap Engine::GetDevicePlugins(const std::string& targetFallback, const Configs& localConfig) const { - auto getDeviceConfig = [&](const std::string& deviceWithID) { - DeviceIDParser deviceParser(deviceWithID); - std::string deviceName = deviceParser.getDeviceName(); - Configs tconfig = mergeConfigs(_config, localConfig); - - // set device ID if any - std::string deviceIDLocal = deviceParser.getDeviceID(); - if (!deviceIDLocal.empty()) { - tconfig[KEY_DEVICE_ID] = deviceIDLocal; - } - - return GetCore()->GetSupportedConfig(deviceName, tconfig); - }; - auto fallbackDevices = InferenceEngine::DeviceIDParser::getHeteroDevices(targetFallback); Engine::DeviceMetaInformationMap metaDevices; for (auto&& deviceName : fallbackDevices) { auto itPlugin = metaDevices.find(deviceName); if (metaDevices.end() == itPlugin) { - metaDevices[deviceName] = getDeviceConfig(deviceName); + metaDevices[deviceName] = GetCore()->GetSupportedConfig(deviceName, mergeConfigs(_config, localConfig)); } } return metaDevices; @@ -124,15 +125,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const Configs } auto tconfig = mergeConfigs(_config, config); - auto it = tconfig.find("TARGET_FALLBACK"); - if (it == tconfig.end()) { - it = tconfig.find(ov::device::priorities.name()); - } - if (it == tconfig.end()) { - IE_THROW() << "The '" << ov::device::priorities.name() << "' option was not defined for heterogeneous plugin"; - } - - std::string fallbackDevicesStr = it->second; + std::string fallbackDevicesStr = GetTargetFallback(tconfig); DeviceMetaInformationMap metaDevices = GetDevicePlugins(fallbackDevicesStr, tconfig); auto function = network.getFunction(); @@ -162,7 +155,14 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const Configs } Parameter Engine::GetMetric(const std::string& name, const std::map& options) const { - if (METRIC_KEY(SUPPORTED_METRICS) == name) { + if (ov::supported_properties == name) { + return decltype(ov::supported_properties)::value_type{ + ov::PropertyName{ov::supported_properties.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::device::full_name.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::device::architecture.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::device::capabilities.name(), ov::PropertyMutability::RO}, + ov::PropertyName{ov::device::priorities.name(), ov::PropertyMutability::RW}}; + } else if (METRIC_KEY(SUPPORTED_METRICS) == name) { IE_SET_METRIC_RETURN(SUPPORTED_METRICS, std::vector{METRIC_KEY(SUPPORTED_METRICS), ov::device::full_name.name(), @@ -179,18 +179,8 @@ Parameter Engine::GetMetric(const std::string& name, const std::mapsecond.as(); - } else { - deviceIt = options.find(ov::device::priorities.name()); - if (deviceIt != options.end()) { - targetFallback = deviceIt->second.as(); - } else { - targetFallback = GetConfig(ov::device::priorities.name(), {}).as(); - } - } + auto tconfig = mergeConfigs(_config, options); + std::string targetFallback = GetTargetFallback(tconfig); return decltype(ov::device::architecture)::value_type{DeviceArchitecture(targetFallback)}; } else { IE_THROW() << "Unsupported metric key: " << name; @@ -219,16 +209,12 @@ Parameter Engine::GetConfig(const std::string& name, const std::mapsecond == YES; return {dump}; - } else if (name == "TARGET_FALLBACK" || name == ov::device::priorities.name()) { - auto it = _config.find("TARGET_FALLBACK"); - if (it == _config.end()) { - it = _config.find(ov::device::priorities.name()); - } - if (it == _config.end()) { - IE_THROW() << "Value for" << name << " is not set"; - } else { - return {it->second}; - } + } else if (name == ov::device::priorities) { + std::string targetFallback = GetTargetFallback(_config); + auto priorities = ov::util::from_string(targetFallback, ov::device::priorities); + return decltype(ov::device::priorities)::value_type{priorities}; + } else if (name == "TARGET_FALLBACK") { + return GetTargetFallback(_config); } else { IE_THROW() << "Unsupported config key: " << name; } diff --git a/src/plugins/hetero/plugin.hpp b/src/plugins/hetero/plugin.hpp index 6b955268b20..296793c243b 100644 --- a/src/plugins/hetero/plugin.hpp +++ b/src/plugins/hetero/plugin.hpp @@ -46,8 +46,9 @@ public: DeviceMetaInformationMap GetDevicePlugins(const std::string& targetFallback, const Configs& localConfig) const; + std::string GetTargetFallback(const Engine::Configs& config, bool raise_exception = true) const; + private: - Configs GetSupportedConfig(const Configs& config, const std::string& deviceName) const; std::string DeviceArchitecture(const std::string& targetFallback) const; }; } // namespace HeteroPlugin diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index 071a9be9ba8..39a3429c67b 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -150,6 +150,11 @@ void Config::readProperties(const std::map &prop) { IE_THROW() << "Wrong value for property key " << PluginConfigParams::KEY_ENFORCE_BF16 << ". Expected only YES/NO"; } + } else if (key == ov::device::id.name()) { + device_id = val; + if (!device_id.empty()) { + IE_THROW() << "CPU plugin supports only '' as device id"; + } } else if (key == ov::inference_precision.name()) { if (val == "bf16") { if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) { @@ -248,6 +253,8 @@ void Config::updateProperties() { _config.insert({ PluginConfigParams::KEY_CPU_THREADS_NUM, std::to_string(streamExecutorConfig._threads) }); + _config.insert({ PluginConfigParams::KEY_DEVICE_ID, device_id }); + IE_SUPPRESS_DEPRECATED_START _config.insert({ PluginConfigParams::KEY_DUMP_EXEC_GRAPH_AS_DOT, dumpToDot }); IE_SUPPRESS_DEPRECATED_END; diff --git a/src/plugins/intel_cpu/src/config.h b/src/plugins/intel_cpu/src/config.h index be4cd264a8b..9f2680fbe88 100644 --- a/src/plugins/intel_cpu/src/config.h +++ b/src/plugins/intel_cpu/src/config.h @@ -42,7 +42,8 @@ struct Config { bool exclusiveAsyncRequests = false; bool enableDynamicBatch = false; SnippetsMode snippetsMode = SnippetsMode::Enable; - std::string dumpToDot = ""; + std::string dumpToDot = {}; + std::string device_id = {}; int batchLimit = 0; float fcSparseWeiDecompressionRate = 1.0f; size_t rtCacheCapacity = 5000ul; diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index e86e2e38b88..757537bcdec 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -499,6 +499,8 @@ Parameter Engine::GetConfig(const std::string& name, const std::map rwProperties {RW_property(ov::num_streams.name()), RW_property(ov::affinity.name()), RW_property(ov::inference_num_threads.name()), @@ -596,6 +598,7 @@ Parameter Engine::GetMetric(const std::string& name, const std::map supportedProperties; @@ -641,11 +644,8 @@ void Engine::AddExtension(const InferenceEngine::IExtensionPtr& extension) { } QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::map& config) const { - QueryNetworkResult res; - WeightsSharing::Ptr fake_w_cache; - // TODO: Clarify the behavior of SetConfig method. Skip eng_config or not? Config conf = engConfig; conf.readProperties(config); @@ -693,6 +693,7 @@ QueryNetworkResult Engine::QueryNetwork(const CNNNetwork& network, const std::ma return true; }); + QueryNetworkResult res; for (auto&& layerName : supported) { res.supportedLayersMap.emplace(layerName, GetName()); } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index c40ef333d90..397a183ef3c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -87,7 +87,19 @@ INSTANTIATE_TEST_SUITE_P( const std::vector multiConfigs = { {ov::device::priorities(CommonTestUtils::DEVICE_CPU)} }; -const std::vector configsDeviceProperties = {{ov::device::properties("CPU", ov::num_streams(3))}}; +const std::vector configsDeviceProperties = {{ov::device::properties("CPU", ov::num_streams(3))}, + {ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{ov::num_streams(3)}}})}}; +const std::vector configsDevicePropertiesDouble = {{ov::device::properties("CPU", ov::num_streams(5)), + ov::num_streams(3)}, + {ov::device::properties("CPU", ov::num_streams(5)), + ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{ov::num_streams(7)}}}), + ov::num_streams(3)}, + {ov::device::properties("CPU", ov::num_streams(3)), + ov::device::properties("CPU", ov::num_streams(5))}, + {ov::device::properties("CPU", ov::num_streams(3)), + ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{ov::num_streams(5)}}})}, + {ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{ov::num_streams(3)}}}), + ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{ov::num_streams(5)}}})}}; const std::vector configsWithSecondaryProperties = { {ov::device::properties("CPU", ov::num_streams(4))}, {ov::device::properties("CPU", @@ -130,10 +142,41 @@ const std::vector autoConfigsWithSecondaryProperties = { ov::device::properties("CPU", ov::num_streams(4), ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, - {ov::device::priorities(CommonTestUtils::DEVICE_CPU), + {ov::device::priorities(CommonTestUtils::DEVICE_GPU), ov::device::properties("AUTO", ov::enable_profiling(false), - ov::device::priorities(CommonTestUtils::DEVICE_GPU), + ov::device::priorities(CommonTestUtils::DEVICE_CPU), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), + ov::device::properties("CPU", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), + ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}}; + +const std::vector heteroConfigsWithSecondaryProperties = { + {ov::device::priorities(CommonTestUtils::DEVICE_CPU), + ov::device::properties("HETERO", + ov::enable_profiling(false), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, + {ov::device::priorities(CommonTestUtils::DEVICE_CPU), + ov::device::properties("CPU", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, + {ov::device::priorities(CommonTestUtils::DEVICE_CPU), + ov::device::properties("CPU", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)), + ov::device::properties("GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY))}, + {ov::device::priorities(CommonTestUtils::DEVICE_CPU), + ov::device::properties("HETERO", + ov::enable_profiling(false), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), + ov::device::properties("CPU", + ov::num_streams(4), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT))}, + {ov::device::priorities(CommonTestUtils::DEVICE_GPU), + ov::device::properties("HETERO", + ov::enable_profiling(false), + ov::device::priorities(CommonTestUtils::DEVICE_CPU), ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)), ov::device::properties("CPU", ov::num_streams(4), @@ -142,7 +185,7 @@ const std::vector autoConfigsWithSecondaryProperties = { INSTANTIATE_TEST_SUITE_P( smoke_OVClassSetDevicePriorityConfigTest, OVClassSetDevicePriorityConfigTest, - ::testing::Combine(::testing::Values("MULTI", "AUTO"), + ::testing::Combine(::testing::Values("MULTI", "AUTO", "HETERO"), ::testing::ValuesIn(multiConfigs))); // // IE Class GetConfig @@ -265,7 +308,7 @@ INSTANTIATE_TEST_SUITE_P( // IE Class Load network INSTANTIATE_TEST_SUITE_P(smoke_CPU_OVClassLoadNetworkWithCorrectSecondaryPropertiesTest, OVClassLoadNetworkWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("CPU", "AUTO:CPU", "MULTI:CPU"), + ::testing::Combine(::testing::Values("CPU", "AUTO:CPU", "MULTI:CPU", "HETERO:CPU"), ::testing::ValuesIn(configsWithSecondaryProperties))); INSTANTIATE_TEST_SUITE_P(smoke_Multi_OVClassLoadNetworkWithSecondaryPropertiesTest, @@ -278,12 +321,21 @@ INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassLoadNetworkWithSecondaryPropertiesTes ::testing::Combine(::testing::Values("AUTO"), ::testing::ValuesIn(autoConfigsWithSecondaryProperties))); +INSTANTIATE_TEST_SUITE_P(smoke_HETERO_OVClassLoadNetworkWithSecondaryPropertiesTest, + OVClassLoadNetworkWithCorrectPropertiesTest, + ::testing::Combine(::testing::Values("HETERO"), + ::testing::ValuesIn(heteroConfigsWithSecondaryProperties))); + // IE Class load and check network with ov::device::properties INSTANTIATE_TEST_SUITE_P(smoke_CPU_OVClassLoadNetworkAndCheckWithSecondaryPropertiesTest, OVClassLoadNetworkAndCheckSecondaryPropertiesTest, ::testing::Combine(::testing::Values("CPU", "MULTI:CPU"), ::testing::ValuesIn(configsDeviceProperties))); +INSTANTIATE_TEST_SUITE_P(smoke_CPU_OVClassLoadNetworkAndCheckWithSecondaryPropertiesDoubleTest, + OVClassLoadNetworkAndCheckSecondaryPropertiesTest, + ::testing::Combine(::testing::Values("CPU", "MULTI:CPU"), + ::testing::ValuesIn(configsDevicePropertiesDouble))); INSTANTIATE_TEST_SUITE_P( smoke_OVClassLoadNetworkTest, OVClassLoadNetworkTest, ::testing::Values("CPU")); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 6d365255cd6..c9c87035310 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -152,14 +152,18 @@ const std::vector> automultiExeDeviceConfigs INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiCompileModelBehaviorTests, OVCompileModelGetExecutionDeviceTests, ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_AUTO, - CommonTestUtils::DEVICE_MULTI), + CommonTestUtils::DEVICE_MULTI, + CommonTestUtils::DEVICE_HETERO), ::testing::ValuesIn(automultiExeDeviceConfigs)), OVCompileModelGetExecutionDeviceTests::getTestCaseName); const std::vector auto_multi_device_properties = { {ov::device::priorities(CommonTestUtils::DEVICE_CPU), ov::device::properties("CPU", ov::num_streams(4))}, {ov::device::priorities(CommonTestUtils::DEVICE_CPU), - ov::device::properties("CPU", ov::num_streams(4), ov::enable_profiling(true))}}; + ov::device::properties("CPU", ov::num_streams(4), ov::enable_profiling(true))}, + {ov::device::priorities(CommonTestUtils::DEVICE_CPU), + ov::device::properties(ov::AnyMap{{"CPU", ov::AnyMap{{ov::num_streams(4), ov::enable_profiling(true)}}}})} +}; const std::vector auto_multi_incorrect_device_properties = { {ov::device::priorities(CommonTestUtils::DEVICE_CPU), @@ -170,16 +174,18 @@ const std::vector auto_multi_incorrect_device_properties = { ov::device::properties("CPU", ov::num_streams(4), ov::enable_profiling(true))}}; INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsNoThrow, - OVSetSupportPropComplieModleWithoutConfigTests, + OVSetSupportPropCompileModelWithoutConfigTests, ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_AUTO, - CommonTestUtils::DEVICE_MULTI), + CommonTestUtils::DEVICE_MULTI, + CommonTestUtils::DEVICE_HETERO), ::testing::ValuesIn(auto_multi_device_properties)), - OVSetSupportPropComplieModleWithoutConfigTests::getTestCaseName); + OVSetSupportPropCompileModelWithoutConfigTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_AutoMultiSetAndCompileModelBehaviorTestsThrow, - OVSetUnsupportPropComplieModleWithoutConfigTests, + OVSetUnsupportPropCompileModelWithoutConfigTests, ::testing::Combine(::testing::Values(CommonTestUtils::DEVICE_AUTO, - CommonTestUtils::DEVICE_MULTI), + CommonTestUtils::DEVICE_MULTI, + CommonTestUtils::DEVICE_HETERO), ::testing::ValuesIn(auto_multi_incorrect_device_properties)), - OVSetUnsupportPropComplieModleWithoutConfigTests::getTestCaseName); + OVSetUnsupportPropCompileModelWithoutConfigTests::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 602e2f75cee..3b1aaa55cd1 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -144,9 +144,6 @@ std::vector disabledTestPatterns() { R"(.*OVInferRequestCheckTensorPrecision.*type=i4.*)", R"(.*OVInferRequestCheckTensorPrecision.*type=u1.*)", R"(.*OVInferRequestCheckTensorPrecision.*type=u4.*)", - // Issue: 75022 - R"(.*OVExecutableNetworkBaseTest.*LoadNetworkToDefaultDeviceNoThrow.*)", - R"(.*IEClassBasicTest.*LoadNetworkToDefaultDeviceNoThrow.*)", // Issue: 77390 R"(.*LoopLayerCPUTest.*exec_cond=0.*)", R"(.*LoopLayerCPUTest.*trip_count=0.*)", @@ -168,15 +165,8 @@ std::vector disabledTestPatterns() { // Issue: 95590 R"(.*CachingSupportCase.*CompileModelCacheTestBase.*(TIwithLSTMcell1|MatMulBias|2InputSubtract)_(u|i).*)", // Issue: 95607 - R"(.*OVClass.*LoadNetwork.*LoadNetwork(HETEROAndDeviceIDThrows|MULTIwithAUTONoThrow|HETEROwithMULTINoThrow|MULTIwithHETERONoThrow).*)", - R"(.*OVClass.*LoadNetwork.*LoadNetwork(HETEROWithDeviceIDNoThrow|WithDeviceID|WithBigDeviceIDThrows|WithInvalidDeviceIDThrows|HETEROWithBigDeviceIDThrows).*)", - R"(.*OVClass.*QueryNetwork.*QueryNetwork(HETEROWithDeviceIDNoThrow|WithDeviceID|WithBigDeviceIDThrows|WithInvalidDeviceIDThrows|HETEROWithBigDeviceIDThrows).*)", - R"(.*OVClass.*LoadNetwork.*(DeviceID|MultiWithoutSettingDevicePrioritiesThrows).*)", - R"(.*OVClassLoadNetworkTest.*QueryNetwork(MULTIWithHETERO|HETEROWithMULTI)NoThrow_V10.*)", R"(.*CachingSupportCase.*LoadNetworkCacheTestBase.*(TIwithLSTMcell1|MatMulBias|2InputSubtract)_(i|u).*)", R"(.*CachingSupportCase.*ReadConcatSplitAssign.*)", - R"(.*IEClassQueryNetworkTest.*QueryNetwork.*)", - R"(.*IEClassLoadNetworkTest.*(Load|Query)Network.*)", // Issue: 95239 // HETERO plugin lacks caching_properties definition R"(smoke_Hetero_CachingSupportCase.*)", diff --git a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp index 2d93a6fa5c9..f57484850d3 100644 --- a/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp +++ b/src/plugins/intel_gna/tests/functional/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp @@ -28,6 +28,7 @@ public: ngraph::Shape{4}, ngraph::Shape{1, 1, 1, 1024}), false); + param0->set_friendly_name("input"); auto conv1 = ngraph::builder::makeConvolution(reshape, ngraph::element::Type_t::f32, {1, 7}, @@ -72,50 +73,54 @@ public: } }; -// TODO: Convolution with 3D input is not supported on GNA -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassExecutableNetworkGetMetricTest, - OVClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::Values("GNA" /*, "MULTI:GNA", "HETERO:GNA" */)); +using OVGNAClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS = OVClassBaseTestGNAP; +using OVGNAClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS = OVClassBaseTestGNAP; +using OVGNAClassExecutableNetworkGetMetricTest_NETWORK_NAME = OVClassBaseTestGNAP; +using OVGNAClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS = OVClassBaseTestGNAP; +using OVGNAClassExecutableNetworkGetMetricTest_ThrowsUnsupported = OVClassBaseTestGNAP; -// TODO: Convolution with 3D input is not supported on GNA -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassExecutableNetworkGetMetricTest, - OVClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::Values("GNA" /*, "MULTI:GNA", "HETERO:GNA" */)); +INSTANTIATE_TEST_SUITE_P(smoke_OVClassExecutableNetworkGetMetricTest, + OVGNAClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, + ::testing::Values("GNA" /*, "MULTI:GNA"*/, "HETERO:GNA")); -// TODO: this metric is not supported by the plugin -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassExecutableNetworkGetMetricTest, - OVClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::Values("GNA", /* "MULTI:GNA", */ "HETERO:GNA")); +INSTANTIATE_TEST_SUITE_P(smoke_OVClassExecutableNetworkGetMetricTest, + OVGNAClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, + ::testing::Values("GNA" /*, "MULTI:GNA"*/, "HETERO:GNA")); -// TODO: Convolution with 3D input is not supported on GNA -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassExecutableNetworkGetMetricTest, - OVClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::Values("GNA" /*, "MULTI:GNA", "HETERO:GNA" */)); +INSTANTIATE_TEST_SUITE_P(smoke_OVClassExecutableNetworkGetMetricTest, + OVGNAClassExecutableNetworkGetMetricTest_NETWORK_NAME, + ::testing::Values("GNA" /*, "MULTI:GNA"*/, "HETERO:GNA")); -// TODO: Convolution with 3D input is not supported on GNA -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassExecutableNetworkGetMetricTest, - OVClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::Values("GNA", /* "MULTI:GNA", */ "HETERO:GNA")); +INSTANTIATE_TEST_SUITE_P(smoke_OVClassExecutableNetworkGetMetricTest, + OVGNAClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, + ::testing::Values("GNA" /*, "MULTI:GNA"*/, "HETERO:GNA")); + +INSTANTIATE_TEST_SUITE_P(smoke_OVClassExecutableNetworkGetMetricTest, + OVGNAClassExecutableNetworkGetMetricTest_ThrowsUnsupported, + ::testing::Values("GNA" /*, "MULTI:GNA"*/, "HETERO:GNA")); // // Executable Network GetConfig / SetConfig // -// TODO: Convolution with 3D input is not supported on GNA -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassExecutableNetworkGetConfigTest, - OVClassExecutableNetworkGetConfigTest, +using OVGNAClassExecutableNetworkGetConfigTest = OVClassBaseTestGNAP; +using OVGNAClassExecutableNetworkSetConfigTest = OVClassBaseTestGNAP; + +INSTANTIATE_TEST_SUITE_P(moke_OVClassExecutableNetworkGetConfigTest, + OVGNAClassExecutableNetworkGetConfigTest, ::testing::Values("GNA")); -// TODO: Convolution with 3D input is not supported on GNA -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassExecutableNetworkSetConfigTest, - OVClassExecutableNetworkSetConfigTest, +INSTANTIATE_TEST_SUITE_P(smoke_OVClassExecutableNetworkSetConfigTest, + OVGNAClassExecutableNetworkSetConfigTest, ::testing::Values("GNA")); +using OVGNAClassExecutableNetworkSupportedConfigTest = OVClassExecutableNetworkGetMetricTestForSpecificConfigGNA; +using OVGNAClassExecutableNetworkUnsupportedConfigTest = OVClassExecutableNetworkGetMetricTestForSpecificConfigGNA; + IE_SUPPRESS_DEPRECATED_START -// TODO: Convolution with 3D input is not supported on GNA INSTANTIATE_TEST_SUITE_P( - DISABLED_smoke_OVClassExecutableNetworkSupportedConfigTest, - OVClassExecutableNetworkSupportedConfigTest, + smoke_OVClassExecutableNetworkSupportedConfigTest, + OVGNAClassExecutableNetworkSupportedConfigTest, ::testing::Combine( ::testing::Values("GNA"), ::testing::Values(std::make_pair(GNA_CONFIG_KEY(DEVICE_MODE), InferenceEngine::GNAConfigParams::GNA_HW), @@ -124,10 +129,9 @@ INSTANTIATE_TEST_SUITE_P( std::make_pair(GNA_CONFIG_KEY(DEVICE_MODE), InferenceEngine::GNAConfigParams::GNA_AUTO)))); IE_SUPPRESS_DEPRECATED_END -// TODO: Convolution with 3D input is not supported on GNA INSTANTIATE_TEST_SUITE_P( - DISABLED_smoke_OVClassExecutableNetworkUnsupportedConfigTest, - OVClassExecutableNetworkUnsupportedConfigTest, + smoke_OVClassExecutableNetworkUnsupportedConfigTest, + OVGNAClassExecutableNetworkUnsupportedConfigTest, ::testing::Combine(::testing::Values("GNA"), ::testing::Values(std::make_pair(GNA_CONFIG_KEY(DEVICE_MODE), InferenceEngine::GNAConfigParams::GNA_SW_FP32), @@ -197,26 +201,59 @@ INSTANTIATE_TEST_SUITE_P( ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::GNA_3_0), ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_3_0)))); +using OVClassExecutableNetworkDevicePropertiesTest = OVClassExecutableNetworkGetMetricTestForSpecificConfigGNA; +TEST_P(OVClassExecutableNetworkDevicePropertiesTest, DevicePropertiesNoThrow) { + ov::Core ie; + ASSERT_NO_THROW(auto compiled_model = + ie.compile_model(gnaSimpleNetwork, + deviceName, + ov::device::properties("GNA", ov::AnyMap{{configKey, configValue}}))); +} + +INSTANTIATE_TEST_SUITE_P( + smoke_OVClassExecutableNetworkDevicePropertiesTest, + OVClassExecutableNetworkDevicePropertiesTest, + ::testing::Combine( + ::testing::Values("HETERO:GNA"), + ::testing::Values(ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW), + ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::HW_WITH_SW_FBACK), + ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_EXACT), + ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::SW_FP32), + ov::intel_gna::execution_mode(ov::intel_gna::ExecutionMode::AUTO), + ov::intel_gna::scale_factors_per_input(std::map{{"input", 1.0f}}), + ov::inference_precision(ov::element::i8), + ov::inference_precision(ov::element::i16), + ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), + ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), + ov::hint::performance_mode(ov::hint::PerformanceMode::UNDEFINED), + ov::hint::num_requests(1), + ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_2_0), + ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::GNA_3_0), + ov::intel_gna::execution_target(ov::intel_gna::HWGeneration::UNDEFINED), + ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::GNA_2_0), + ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::GNA_3_0), + ov::intel_gna::compile_target(ov::intel_gna::HWGeneration::UNDEFINED), + ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::RECURSIVE_DESCENT), + ov::intel_gna::pwl_design_algorithm(ov::intel_gna::PWLDesignAlgorithm::UNIFORM_DISTRIBUTION), + ov::intel_gna::pwl_max_error_percent(0.05), + ov::log::level(ov::log::Level::NO)))); + // // Hetero Executable Network GetMetric // - -// TODO: verify hetero interop -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassHeteroExecutableNetworlGetMetricTest, +INSTANTIATE_TEST_SUITE_P(smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, ::testing::Values("GNA")); -// TODO: verify hetero interop -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassHeteroExecutableNetworlGetMetricTest, +INSTANTIATE_TEST_SUITE_P(smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, ::testing::Values("GNA")); -// TODO: verify hetero interop -INSTANTIATE_TEST_SUITE_P(DISABLED_smoke_OVClassHeteroExecutableNetworlGetMetricTest, +INSTANTIATE_TEST_SUITE_P(smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, ::testing::Values("GNA")); -INSTANTIATE_TEST_SUITE_P(smoke_OVClassHeteroExecutableNetworlGetMetricTest, +INSTANTIATE_TEST_SUITE_P(smoke_OVClassHeteroExecutableNetworkGetMetricTest, OVClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, ::testing::Values("GNA")); } // namespace diff --git a/src/plugins/template/src/config.cpp b/src/plugins/template/src/config.cpp index cc229f8cab9..90842ead1df 100644 --- a/src/plugins/template/src/config.cpp +++ b/src/plugins/template/src/config.cpp @@ -27,7 +27,7 @@ Configuration::Configuration(const ov::AnyMap& config, const Configuration& defa } else if (streamExecutorConfigKeys.end() != std::find(std::begin(streamExecutorConfigKeys), std::end(streamExecutorConfigKeys), key)) { streams_executor_config.set_property(key, value); - } else if (CONFIG_KEY(DEVICE_ID) == key) { + } else if (ov::device::id.name() == key) { device_id = std::stoi(value.as()); OPENVINO_ASSERT(device_id <= 0, "Device ID ", device_id, " is not supported"); } else if (CONFIG_KEY(PERF_COUNT) == key) { diff --git a/src/plugins/template/tests/functional/skip_tests_config.cpp b/src/plugins/template/tests/functional/skip_tests_config.cpp index 3671cc03a52..58c7ec69b02 100644 --- a/src/plugins/template/tests/functional/skip_tests_config.cpp +++ b/src/plugins/template/tests/functional/skip_tests_config.cpp @@ -112,10 +112,8 @@ std::vector disabledTestPatterns() { // CVS-71891 R"(.*ReferenceTileTest.*rType=i4.*)", R"(.*ReferenceTileTest.*rType=u4.*)", - R"(.*DeviceID.*)", // CVS-95608 R"(.*CachingSupportCase.*CompileModelCacheTestBase.*)", - R"(.*OVClassLoadNetworkTest.*QueryNetworkMULTIWithHETERONoThrow_V10.*)", // New plugin API doesn't support legacy NV12 I420 preprocessing R"(.*ConvertNV12WithLegacyTest.*)", R"(.*ConvertI420WithLegacyTest.*)", diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index 7c2f70edf55..7c223fc92b8 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -108,7 +108,7 @@ auto multiConfigs = []() { INSTANTIATE_TEST_SUITE_P( smoke_OVClassSetDevicePriorityConfigTest, OVClassSetDevicePriorityConfigTest, - ::testing::Combine(::testing::Values("MULTI", "AUTO"), + ::testing::Combine(::testing::Values("MULTI", "AUTO", "HETERO"), ::testing::ValuesIn(multiConfigs()))); // // GPU specific metrics @@ -752,7 +752,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_OVClassLoadNetworkWithCorrectSecondaryPropertiesT INSTANTIATE_TEST_SUITE_P(smoke_AUTO_OVClassLoadNetworkWithCorrectSecondaryPropertiesTest, OVClassLoadNetworkWithCorrectPropertiesTest, - ::testing::Combine(::testing::Values("AUTO:GPU", "MULTI:GPU"), + ::testing::Combine(::testing::Values("AUTO:GPU", "MULTI:GPU", "HETERO:GPU"), ::testing::ValuesIn(gpuCorrectConfigsWithSecondaryProperties()))); auto autoCorrectConfigs = []() { diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index f700c67064e..623388b36d8 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -53,14 +53,12 @@ const std::vector auto_properties = { }; -const std::vector auto_Multi_compiled_empty_properties = { +const std::vector compiled_empty_properties = { {} }; -const std::vector multi_plugin_Incorrect_properties = { - {ov::device::priorities("NONE")} -}; -const std::vector auto_plugin_Incorrect_properties = { +const std::vector incorrect_device_priorities_properties = { + {ov::device::priorities("NONE")}, {ov::device::priorities("NONE", "GPU")}, {ov::device::priorities("-", "GPU")}, {ov::device::priorities("-NONE", "CPU")}, @@ -86,19 +84,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_MultiBehaviorTests, OVPropertiesTests, ::testing::ValuesIn(multi_properties)), OVPropertiesTests::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_AutoBehaviorIncorrectPropertiesTests, OVSetPropComplieModleWihtIncorrectPropTests, +INSTANTIATE_TEST_SUITE_P(smoke_BehaviorIncorrectPropertiesTests, OVSetPropCompileModelWithIncorrectPropTests, ::testing::Combine( - ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(auto_plugin_Incorrect_properties), - ::testing::ValuesIn(auto_Multi_compiled_empty_properties)), - OVSetPropComplieModleWihtIncorrectPropTests::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_MultiBehaviorIncorrectPropertiesTests, OVSetPropComplieModleWihtIncorrectPropTests, - ::testing::Combine( - ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(multi_plugin_Incorrect_properties), - ::testing::ValuesIn(auto_Multi_compiled_empty_properties)), - OVSetPropComplieModleWihtIncorrectPropTests::getTestCaseName); + ::testing::Values(CommonTestUtils::DEVICE_AUTO, + CommonTestUtils::DEVICE_MULTI, + CommonTestUtils::DEVICE_HETERO), + ::testing::ValuesIn(incorrect_device_priorities_properties), + ::testing::ValuesIn(compiled_empty_properties)), + OVSetPropCompileModelWithIncorrectPropTests::getTestCaseName); const std::vector gpu_setcore_properties = { {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT), diff --git a/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp b/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp index 573dab8aef2..ba2233499f7 100644 --- a/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/executable_network/get_metric.hpp @@ -331,15 +331,10 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN } ASSERT_LT(0, heteroMetricValues.size()); - const std::vector heteroSpecificMetrics = { - METRIC_KEY(SUPPORTED_METRICS), - METRIC_KEY(SUPPORTED_CONFIG_KEYS) - }; - - // check that all device metric values are present in hetero case + // check that all device metric values are unavailable in hetero case for (auto &&deviceMetricName : deviceMetricValues) { auto it = std::find(heteroMetricValues.begin(), heteroMetricValues.end(), deviceMetricName); - if (it == heteroMetricValues.end()) { + if ((it == heteroMetricValues.end()) && (deviceMetricName != "SUPPORTED_PROPERTIES")) { ASSERT_THROW(InferenceEngine::Parameter heteroMetricValue = heteroExeNetwork.GetMetric(deviceMetricName), InferenceEngine::Exception); } } diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp index 82b89b56d92..96b09fe68f8 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp @@ -909,9 +909,9 @@ TEST_P(OVClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) { auto deviceIDs = ie.get_property(target_device, ov::available_devices); if (deviceIDs.empty()) GTEST_FAIL(); - OV_ASSERT_NO_THROW(ie.query_model(actualNetwork, - CommonTestUtils::DEVICE_HETERO, - ov::device::priorities(target_device + "." + deviceIDs[0], target_device))); + ie.query_model(actualNetwork, + CommonTestUtils::DEVICE_HETERO, + ov::device::priorities(target_device + "." + deviceIDs[0], target_device)); } else { GTEST_FAIL() << "Device does not support DeviceID property" << std::endl; } @@ -921,7 +921,10 @@ TEST_P(OVClassQueryNetworkTest, QueryNetworkWithDeviceID) { ov::Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, target_device)) { - ie.query_model(simpleNetwork, target_device + ".0"); + auto deviceIDs = ie.get_property(target_device, ov::available_devices); + if (deviceIDs.empty()) + GTEST_FAIL(); + ie.query_model(simpleNetwork, target_device + "." + deviceIDs[0]); } else { GTEST_FAIL() << "Device does not support DeviceID property" << std::endl; } @@ -1112,8 +1115,29 @@ TEST_P(OVClassLoadNetworkAndCheckSecondaryPropertiesTest, LoadNetworkAndCheckSec ov::Core ie = createCoreWithTemplate(); ov::CompiledModel model; OV_ASSERT_NO_THROW(model = ie.compile_model(actualNetwork, target_device, configuration)); - auto property = configuration.begin()->second.as(); - auto actual = property.begin()->second.as(); + ov::AnyMap property = configuration; + ov::AnyMap::iterator it = configuration.end(); + // device properties in form ov::device::properties(DEVICE, ...) has the first priority + for (it = configuration.begin(); it != configuration.end(); it++) { + if ((it->first.find(ov::device::properties.name()) != std::string::npos) && + (it->first != ov::device::properties.name())) { + break; + } + } + if (it != configuration.end()) { + // DEVICE_PROPERTIES_ found + property = it->second.as(); + } else { + // search for DEVICE_PROPERTIES + it = configuration.find(ov::device::properties.name()); + ASSERT_TRUE(it != configuration.end()); + property = it->second.as().begin()->second.as(); + if (it == configuration.end()) { + it = configuration.find(ov::num_streams.name()); + } + } + ASSERT_TRUE(property.count(ov::num_streams.name())); + auto actual = property.at(ov::num_streams.name()).as(); ov::Any value; OV_ASSERT_NO_THROW(value = model.get_property(ov::num_streams.name())); int32_t expect = value.as(); @@ -1198,13 +1222,12 @@ TEST_P(OVClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) { devices += ','; } } - OV_ASSERT_NO_THROW( - ie.compile_model(actualNetwork, - CommonTestUtils::DEVICE_HETERO, - ov::device::properties(CommonTestUtils::DEVICE_MULTI, - ov::device::priorities(devices)), - ov::device::properties(CommonTestUtils::DEVICE_HETERO, - ov::device::priorities(CommonTestUtils::DEVICE_MULTI, target_device)))); + ie.compile_model(actualNetwork, + CommonTestUtils::DEVICE_HETERO, + ov::device::properties(CommonTestUtils::DEVICE_MULTI, + ov::device::priorities(devices)), + ov::device::properties(CommonTestUtils::DEVICE_HETERO, + ov::device::priorities(CommonTestUtils::DEVICE_MULTI, target_device))); } else { GTEST_FAIL() << "Device does not support DeviceID property" << std::endl; } @@ -1214,19 +1237,18 @@ TEST_P(OVClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) { ov::Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, target_device) && supportsAvailableDevices(ie, target_device)) { - std::string devices; - auto availableDevices = ie.get_property(target_device, ov::available_devices); - for (auto&& device : availableDevices) { - devices += CommonTestUtils::DEVICE_HETERO + std::string(".") + device; - if (&device != &(availableDevices.back())) { - devices += ','; - } + std::string hetero_devices; + auto device_ids = ie.get_property(target_device, ov::available_devices); + for (auto&& device_id : device_ids) { + hetero_devices += target_device + std::string(".") + device_id; + if (&device_id != &device_ids.back()) + hetero_devices += ','; } OV_ASSERT_NO_THROW(ie.compile_model( actualNetwork, CommonTestUtils::DEVICE_MULTI, - ov::device::properties(CommonTestUtils::DEVICE_MULTI, ov::device::priorities(devices)), - ov::device::properties(CommonTestUtils::DEVICE_HETERO, ov::device::priorities(target_device, target_device)))); + ov::device::properties(CommonTestUtils::DEVICE_MULTI, ov::device::priorities(CommonTestUtils::DEVICE_HETERO)), + ov::device::properties(CommonTestUtils::DEVICE_HETERO, ov::device::priorities(hetero_devices)))); } else { GTEST_FAIL() << "Device does not support DeviceID property" << std::endl; } @@ -1285,10 +1307,10 @@ TEST_P(OVClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { GTEST_FAIL() << "Device does not support AvailableDevices property" << std::endl; } std::string devices; - auto availableDevices = ie.get_property(target_device, ov::available_devices); - for (auto&& device : availableDevices) { - devices += std::string(CommonTestUtils::DEVICE_HETERO) + "." + device; - if (&device != &(availableDevices.back())) { + auto device_ids = ie.get_property(target_device, ov::available_devices); + for (auto&& device_id : device_ids) { + devices += target_device + "." + device_id; + if (&device_id != &(device_ids.back())) { devices += ','; } } @@ -1302,9 +1324,9 @@ TEST_P(OVClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { OV_ASSERT_NO_THROW(result = ie.query_model(multinputNetwork, CommonTestUtils::DEVICE_MULTI, ov::device::properties(CommonTestUtils::DEVICE_MULTI, - ov::device::priorities(devices)), + ov::device::priorities(CommonTestUtils::DEVICE_HETERO)), ov::device::properties(CommonTestUtils::DEVICE_HETERO, - ov::device::priorities(target_device, target_device)))); + ov::device::priorities(devices)))); std::unordered_set actualLayers; for (auto&& layer : result) { @@ -1313,26 +1335,6 @@ TEST_P(OVClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { ASSERT_EQ(expectedLayers, actualLayers); } -// TODO: Enable this test with pre-processing -TEST_P(OVClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins) { - ov::Core ie = createCoreWithTemplate(); - { - auto versions = ie.get_versions(std::string(CommonTestUtils::DEVICE_MULTI) + ":" + target_device + "," + - CommonTestUtils::DEVICE_CPU); - ASSERT_EQ(3, versions.size()); - } - ov::AnyMap config; - if (target_device == CommonTestUtils::DEVICE_CPU) { - config.insert(ov::enable_profiling(true)); - } - // OV_ASSERT_NO_THROW({ - // ov::Core ie = createCoreWithTemplate(); - // std::string name = actualNetwork.getInputsInfo().begin()->first; - // actualNetwork.getInputsInfo().at(name)->setPrecision(Precision::U8); - // auto executableNetwork = ie.compile_model(actualNetwork, target_device, config); - // }); -}; - TEST_P(OVClassSetDefaultDeviceIDTest, SetDefaultDeviceIDNoThrow) { ov::Core ie = createCoreWithTemplate(); diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp index a1278388b0a..bd97d5dd9c5 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/properties_tests.hpp @@ -45,8 +45,8 @@ public: using OVPropertiesIncorrectTests = OVPropertiesTests; using OVPropertiesDefaultTests = OVPropertiesTests; -using OVSetSupportPropComplieModleWithoutConfigTests = OVPropertiesTests; -using OVSetUnsupportPropComplieModleWithoutConfigTests = OVPropertiesTests; +using OVSetSupportPropCompileModelWithoutConfigTests = OVPropertiesTests; +using OVSetUnsupportPropCompileModelWithoutConfigTests = OVPropertiesTests; using CompileModelPropertiesParams = std::tuple; class OVSetPropComplieModleGetPropTests : public testing::WithParamInterface, @@ -59,7 +59,7 @@ public: AnyMap compileModelProperties; }; -using OVSetPropComplieModleWihtIncorrectPropTests = OVSetPropComplieModleGetPropTests; +using OVSetPropCompileModelWithIncorrectPropTests = OVSetPropComplieModleGetPropTests; class OVPropertiesTestsWithComplieModelProps : public testing::WithParamInterface, public OVPropertiesBase { diff --git a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp index eec46d15859..a519fc11e3f 100644 --- a/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/plugin/core_integration.hpp @@ -866,7 +866,10 @@ TEST_P(IEClassQueryNetworkTest, QueryNetworkWithDeviceID) { if (!supportsDeviceID(ie, target_device)) { GTEST_FAIL() << "Device does not support DeviceID" << std::endl; } - ASSERT_NO_THROW(ie.QueryNetwork(simpleCnnNetwork, target_device + ".0")); + auto deviceIDs = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); + if (deviceIDs.empty()) + GTEST_FAIL() << "Incorrect DeviceID number" << std::endl; + ASSERT_NO_THROW(ie.QueryNetwork(simpleCnnNetwork, target_device + "." + deviceIDs[0])); } TEST_P(IEClassQueryNetworkTest, QueryNetworkWithBigDeviceIDThrows) { @@ -1105,14 +1108,14 @@ TEST_P(IEClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) { std::string devices; auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); for (auto &&device : availableDevices) { - devices += CommonTestUtils::DEVICE_HETERO + std::string(".") + device; + devices += target_device + std::string(".") + device; if (&device != &(availableDevices.back())) { devices += ','; } } ASSERT_NO_THROW(ie.LoadNetwork(actualCnnNetwork, CommonTestUtils::DEVICE_MULTI, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", target_device + "," + target_device}})); + {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), CommonTestUtils::DEVICE_HETERO}, + {"TARGET_FALLBACK", devices}})); } // @@ -1167,7 +1170,7 @@ TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { std::string devices; auto availableDevices = ie.GetMetric(target_device, METRIC_KEY(AVAILABLE_DEVICES)).as>(); for (auto &&device : availableDevices) { - devices += "HETERO." + device; + devices += target_device + "." + device; if (&device != &(availableDevices.back())) { devices += ','; } @@ -1180,8 +1183,8 @@ TEST_P(IEClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { } InferenceEngine::QueryNetworkResult result; ASSERT_NO_THROW(result = ie.QueryNetwork(multinputCnnNetwork, CommonTestUtils::DEVICE_MULTI, { - {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", target_device + "," + target_device}})); + {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), CommonTestUtils::DEVICE_HETERO}, + {"TARGET_FALLBACK", devices}})); std::unordered_set actualLayers; for (auto &&layer : result.supportedLayersMap) { diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp index 5611d449d30..f6c7912a0ff 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/caching_tests.cpp @@ -321,7 +321,7 @@ void CompileModelLoadFromFileTestBase::run() { } } -TEST_P(CompileModelLoadFromFileTestBase, CanLoadFromFileWithoutExecption) { +TEST_P(CompileModelLoadFromFileTestBase, CanLoadFromFileWithoutException) { run(); } diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp index 9b619d41d99..fb43dc1b4f0 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp @@ -195,11 +195,11 @@ TEST_P(OVSetPropComplieModleGetPropTests, SetPropertyComplieModelGetProperty) { } } -TEST_P(OVSetPropComplieModleWihtIncorrectPropTests, CanNotCompileModelWithIncorrectProperties) { +TEST_P(OVSetPropCompileModelWithIncorrectPropTests, CanNotCompileModelWithIncorrectProperties) { ASSERT_THROW(core->compile_model(model, target_device, properties), ov::Exception); } -TEST_P(OVSetSupportPropComplieModleWithoutConfigTests, SetPropertyCompiledModelWithCorrectProperty) { +TEST_P(OVSetSupportPropCompileModelWithoutConfigTests, SetPropertyCompiledModelWithCorrectProperty) { ASSERT_NO_THROW(core->compile_model(model, target_device, properties)); } diff --git a/src/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp b/src/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp index c75e9888c04..b59e56a850e 100644 --- a/src/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp +++ b/src/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp @@ -62,6 +62,8 @@ public: MOCK_CONST_METHOD1(DeviceSupportsImportExport, bool(const std::string&)); // NOLINT not a cast to bool MOCK_METHOD2(GetSupportedConfig, std::map(const std::string&, const std::map&)); + MOCK_CONST_METHOD2(get_supported_property, + ov::AnyMap(const std::string&, const ov::AnyMap&)); MOCK_CONST_METHOD0(isNewAPI, bool()); MOCK_METHOD1(GetDefaultContext, InferenceEngine::RemoteContext::Ptr(const std::string&)); diff --git a/src/tests/unit/auto/auto_default_perf_hint_test.cpp b/src/tests/unit/auto/auto_default_perf_hint_test.cpp index 5d98eb1ddb9..a213d31df9f 100644 --- a/src/tests/unit/auto/auto_default_perf_hint_test.cpp +++ b/src/tests/unit/auto/auto_default_perf_hint_test.cpp @@ -81,17 +81,12 @@ public: for (auto& device : targetDevices) { result << device << "_"; } - auto cpuConfig = deviceConfigs.find("CPU"); - auto gpuConfig = deviceConfigs.find("GPU"); - auto priority = deviceConfigs.find("MULTI_DEVICE_PRIORITIES"); - result << "properties_"; - if (cpuConfig != deviceConfigs.end()) - result << "CPU_" << cpuConfig->second << "_"; - if (gpuConfig != deviceConfigs.end()) - result << "GPU_" << gpuConfig->second << "_"; - if (priority != deviceConfigs.end()) - result << "priority_" << priority->second; - return result.str(); + for (auto& item : deviceConfigs) { + result << item.first << "_" << item.second << "_"; + } + auto name = result.str(); + name.pop_back(); + return name; } static std::vector CreateNumStreamsAndDefaultPerfHintTestConfigs() { @@ -101,7 +96,7 @@ public: testConfigs.push_back( ConfigParams{"AUTO", {"CPU"}, - {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, @@ -110,23 +105,23 @@ public: testConfigs.push_back(ConfigParams{ "AUTO", {"CPU", "GPU"}, - {{"CPU", "NUM_STREAMS 3"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:get default_hint:lantency testConfigs.push_back(ConfigParams{ "AUTO", {"CPU", "GPU"}, - {{"GPU", "NUM_STREAMS 3"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get default_hint:lantency GPU:no perf_hint testConfigs.push_back( ConfigParams{"AUTO", {"CPU"}, - {{"CPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", {"GPU"}, {{"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get default_hint:lantency testConfigs.push_back( ConfigParams{"AUTO", {"GPU"}, - {{"GPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", @@ -135,18 +130,17 @@ public: testConfigs.push_back( ConfigParams{"MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"CPU", "NUM_STREAMS 3"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: get default_hint:tput testConfigs.push_back( ConfigParams{"MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"GPU", "NUM_STREAMS 3"}, + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get default_hint:tput GPU: no perf_hint testConfigs.push_back( ConfigParams{"MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"CPU", "NUM_STREAMS 3"}, - {"GPU", "NUM_STREAMS 3"}, + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3},{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: no perf_hint return testConfigs; } @@ -156,44 +150,42 @@ public: testConfigs.push_back(ConfigParams{ "AUTO", {"CPU"}, - {{"CPU", "PERFORMANCE_HINT THROUGHPUT"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get perf_hint:tput + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: get perf_hint:tput testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, - {{"CPU", "PERFORMANCE_HINT THROUGHPUT"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get default_hint:lantency testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, - {{"CPU", "PERFORMANCE_HINT THROUGHPUT"}, - {"GPU", "PERFORMANCE_HINT THROUGHPUT"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT},GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get perf_hint:tput testConfigs.push_back(ConfigParams{"AUTO", {"CPU"}, - {{"CPU", "PERFORMANCE_HINT THROUGHPUT"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get perf_hint:tput testConfigs.push_back(ConfigParams{ "AUTO", {"GPU"}, - {{"GPU", "PERFORMANCE_HINT THROUGHPUT"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get perf_hint:tput + {{"DEVICE_PROPERTIES", "GPU:{PERFORMANCE_HINT:THROUGHPUT}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: get perf_hint:tput testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"CPU", "PERFORMANCE_HINT LATENCY"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:LATENCY}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get perf_hint:latency GPU: get default_hint:tput testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"GPU", "PERFORMANCE_HINT LATENCY"}, + {{"DEVICE_PROPERTIES", "{GPU:{PERFORMANCE_HINT:LATENCY}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get default_hint:tput GPU: get perf_hint:latency testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"CPU", "PERFORMANCE_HINT LATENCY"}, - {"GPU", "PERFORMANCE_HINT LATENCY"}, + {{"DEVICE_PROPERTIES", "{CPU:{PERFORMANCE_HINT:LATENCY},GPU:{PERFORMANCE_HINT:LATENCY}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get perf_hint:lantency GPU: get perf_hint:lantency return testConfigs; } @@ -203,44 +195,42 @@ public: testConfigs.push_back(ConfigParams{ "AUTO", {"CPU"}, - {{"CPU", "ALLOW_AUTO_BATCHING TRUE"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); // CPU: no perf_hint testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, - {{"CPU", "ALLOW_AUTO_BATCHING TRUE"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:get default_hint:lantency testConfigs.push_back( ConfigParams{"AUTO", {"CPU", "GPU"}, - {{"CPU", "ALLOW_AUTO_BATCHING TRUE"}, - {"GPU", "ALLOW_AUTO_BATCHING TRUE"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:TRUE}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); // CPU: as helper, get perf_hint:lantency GPU:no perf_hint testConfigs.push_back(ConfigParams{"AUTO", {"CPU"}, - {{"CPU", "ALLOW_AUTO_BATCHING FALSE"}, + {{"DEVICE_PROPERTIES", "{CPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint testConfigs.push_back(ConfigParams{ "AUTO", {"GPU"}, - {{"GPU", "ALLOW_AUTO_BATCHING FALSE"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint + {{"DEVICE_PROPERTIES", "GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); // GPU: no perf_hint testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"CPU", "ALLOW_AUTO_BATCHING FALSE"}, + {{"CPU", "{ALLOW_AUTO_BATCHING:FALSE}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: get default_hint:tput testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"GPU", "ALLOW_AUTO_BATCHING FALSE"}, + {{"DEVICE_PROPERTIES", "GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: get default_hint:tput GPU: no perf_hint testConfigs.push_back(ConfigParams{ "MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"CPU", "ALLOW_AUTO_BATCHING TRUE"}, - {"GPU", "ALLOW_AUTO_BATCHING FALSE"}, + {{"DEVICE_PROPERTIES", "CPU:{ALLOW_AUTO_BATCHING:TRUE},GPU:{ALLOW_AUTO_BATCHING:FALSE}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); // CPU: no perf_hint GPU: no perf_hint return testConfigs; } diff --git a/src/tests/unit/auto/auto_load_network_properties_test.cpp b/src/tests/unit/auto/auto_load_network_properties_test.cpp index 7859d5408ba..776a160cd42 100644 --- a/src/tests/unit/auto/auto_load_network_properties_test.cpp +++ b/src/tests/unit/auto/auto_load_network_properties_test.cpp @@ -85,42 +85,40 @@ public: for (auto& device : targetDevices) { result << device << "_"; } - auto cpuConfig = deviceConfigs.find("CPU"); - auto gpuConfig = deviceConfigs.find("GPU"); - result << "device_properties_"; - if (cpuConfig != deviceConfigs.end()) - result << "CPU_" << cpuConfig->second << "_"; - if (gpuConfig != deviceConfigs.end()) - result << "GPU_" << gpuConfig->second; - return result.str(); + for (auto& item : deviceConfigs) { + result << item.first << "_" << item.second << "_"; + } + auto name = result.str(); + name.pop_back(); + return name; } static std::vector CreateConfigs() { testConfigs.clear(); testConfigs.push_back( - ConfigParams{"AUTO", {"CPU"}, {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + ConfigParams{"AUTO", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO", {"CPU", "GPU"}, {{"GPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + ConfigParams{"AUTO", {"CPU", "GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:CPU", {"CPU"}, {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); + ConfigParams{"AUTO:CPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:CPU,GPU", {"CPU"}, {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + ConfigParams{"AUTO:CPU,GPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back( - ConfigParams{"AUTO:GPU", {"GPU"}, {{"GPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); + ConfigParams{"AUTO:GPU", {"GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); testConfigs.push_back(ConfigParams{"AUTO:GPU,CPU", {"CPU", "GPU"}, - {{"GPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); testConfigs.push_back( - ConfigParams{"MULTI:CPU", {"CPU"}, {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); + ConfigParams{"MULTI:CPU", {"CPU"}, {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU"}}}); testConfigs.push_back(ConfigParams{"MULTI:CPU,GPU", {"CPU", "GPU"}, - {{"CPU", "NUM_STREAMS 3"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); + {{"DEVICE_PROPERTIES", "{CPU:{NUM_STREAMS:3}}"}, {"MULTI_DEVICE_PRIORITIES", "CPU,GPU"}}}); testConfigs.push_back( - ConfigParams{"MULTI:GPU", {"GPU"}, {{"GPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); + ConfigParams{"MULTI:GPU", {"GPU"}, {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU"}}}); testConfigs.push_back(ConfigParams{"MULTI:GPU,CPU", {"CPU", "GPU"}, - {{"GPU", "NUM_STREAMS 5"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); + {{"DEVICE_PROPERTIES", "{GPU:{NUM_STREAMS:5}}"}, {"MULTI_DEVICE_PRIORITIES", "GPU,CPU"}}}); return testConfigs; } diff --git a/tools/benchmark_tool/openvino/tools/benchmark/main.py b/tools/benchmark_tool/openvino/tools/benchmark/main.py index f99bb3d58c2..3fe5a80907f 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/main.py +++ b/tools/benchmark_tool/openvino/tools/benchmark/main.py @@ -5,7 +5,7 @@ import os import sys from datetime import datetime -from openvino.runtime import Dimension +from openvino.runtime import Dimension,properties from openvino.tools.benchmark.benchmark import Benchmark from openvino.tools.benchmark.parameters import parse_args @@ -18,7 +18,7 @@ from openvino.tools.benchmark.utils.utils import next_step, get_number_iteration process_help_inference_string, print_perf_counters, print_perf_counters_sort, dump_exec_graph, get_duration_in_milliseconds, \ get_command_line_arguments, parse_value_per_device, parse_devices, get_inputs_info, \ print_inputs_and_outputs_info, get_network_batch_size, load_config, dump_config, get_latency_groups, \ - check_for_static, can_measure_as_static, parse_value_for_virtual_device + check_for_static, can_measure_as_static, parse_value_for_virtual_device, is_virtual_device, is_virtual_device_found from openvino.tools.benchmark.utils.statistics_report import StatisticsReport, JsonStatisticsReport, CsvStatisticsReport, \ averageCntReport, detailedCntReport @@ -108,19 +108,23 @@ def main(): # --------------------- 3. Setting device configuration -------------------------------------------------------- next_step() - for device in devices: - supported_properties = benchmark.core.get_property(device, 'SUPPORTED_PROPERTIES') - if 'PERFORMANCE_HINT' in supported_properties: + def get_performance_hint(device) -> properties.hint.PerformanceMode: + perf_hint = properties.hint.PerformanceMode.UNDEFINED + supported_properties = benchmark.core.get_property(device, properties.supported_properties()) + if properties.hint.performance_mode() in supported_properties: if is_flag_set_in_command_line('hint'): if args.perf_hint=='none': logger.warning(f"No device {device} performance hint is set.") - args.perf_hint = 'UNDEFINED' + perf_hint = properties.hint.PerformanceMode.UNDEFINED + else: + perf_hint = properties.hint.PerformanceMode(args.perf_hint.upper()) else: - args.perf_hint = "THROUGHPUT" if benchmark.api_type == "async" else "LATENCY" + perf_hint = properties.hint.PerformanceMode.THROUGHPUT if benchmark.api_type == "async" else properties.hint.PerformanceMode.LATENCY logger.warning(f"Performance hint was not explicitly specified in command line. " + - f"Device({device}) performance hint will be set to " + args.perf_hint + ".") + f"Device({device}) performance hint will be set to {perf_hint}.") else: logger.warning(f"Device {device} does not support performance hint property(-hint).") + return perf_hint def get_device_type_from_name(name) : new_name = str(name) @@ -142,10 +146,9 @@ def main(): perf_counts = False # check if using the virtual device hw_devices_list = devices.copy() - if_auto = AUTO_DEVICE_NAME in devices - if_multi = MULTI_DEVICE_NAME in devices - # Remove the hardware devices if AUTO/MULTI appears in the devices list. - if if_auto or if_multi: + # Remove the hardware devices if AUTO/MULTI/HETERO appears in the devices list. + is_virtual = is_virtual_device_found(devices) + if is_virtual: devices.clear() # Parse out the currect virtual device as the target device. virtual_device = device_name.partition(":")[0] @@ -155,72 +158,89 @@ def main(): parse_value_for_virtual_device(virtual_device, device_infer_precision) for device in devices: - supported_properties = benchmark.core.get_property(device, 'SUPPORTED_PROPERTIES') + supported_properties = benchmark.core.get_property(device, properties.supported_properties()) if device not in config.keys(): config[device] = {} + ## high-level performance modes + if properties.hint.performance_mode() not in config[device].keys(): + config[device][properties.hint.performance_mode()] = get_performance_hint(device) + + perf_hint = config[device][properties.hint.performance_mode()] + + if is_flag_set_in_command_line('nireq'): + config[device][properties.hint.num_requests()] = str(args.number_infer_requests) + ## Set performance counter if is_flag_set_in_command_line('pc'): ## set to user defined value - config[device]['PERF_COUNT'] = 'YES' if args.perf_counts else 'NO' - elif 'PERF_COUNT' in config[device].keys() and config[device]['PERF_COUNT'] == 'YES': + config[device][properties.enable_profiling()] = True if args.perf_counts else False + elif properties.enable_profiling() in config[device].keys() and config[device][properties.enable_profiling()] == True: logger.warning(f"Performance counters for {device} device is turned on. " + "To print results use -pc option.") elif args.report_type in [ averageCntReport, detailedCntReport ]: logger.warning(f"Turn on performance counters for {device} device " + f"since report type is {args.report_type}.") - config[device]['PERF_COUNT'] = 'YES' + config[device][properties.enable_profiling()] = True elif args.exec_graph_path is not None: logger.warning(f"Turn on performance counters for {device} device " + "due to execution graph dumping.") - config[device]['PERF_COUNT'] = 'YES' + config[device][properties.enable_profiling()] = True elif is_flag_set_in_command_line('pcsort'): ## set to default value logger.warning(f"Turn on performance counters for {device} device " + f"since pcsort value is {args.perf_counts_sort}.") - config[device]['PERF_COUNT'] = 'YES' if args.perf_counts_sort else 'NO' + config[device][properties.enable_profiling()] = True if args.perf_counts_sort else False else: ## set to default value - config[device]['PERF_COUNT'] = 'YES' if args.perf_counts else 'NO' - perf_counts = True if config[device]['PERF_COUNT'] == 'YES' else perf_counts - - ## high-level performance hints - config[device]['PERFORMANCE_HINT'] = args.perf_hint.upper() - if is_flag_set_in_command_line('nireq'): - config[device]['PERFORMANCE_HINT_NUM_REQUESTS'] = str(args.number_infer_requests) + config[device][properties.enable_profiling()] = args.perf_counts + perf_counts = True if config[device][properties.enable_profiling()] == True else perf_counts ## insert or append property into hw device properties list def update_configs(hw_device, property_name, property_value): - is_set_streams_auto = property_name == 'NUM_STREAMS' and property_value == 'AUTO' + (key, value) = properties.device.properties({hw_device:{property_name:property_value}}) + is_set_streams_auto = property_name == properties.num_streams() and property_value == properties.streams.Num.AUTO if not is_set_streams_auto and is_load_config and is_dev_set_property[hw_device] and hw_device in config[device].keys(): # overwrite the device properties loaded from configuration file if # 1. not setting 'NUM_STREAMS' to default value 'AUTO', # 2. enable loading device properties from configuration file, # 3. device properties in config[device] is loaded from configuration file, and never setting device properties before is_dev_set_property[hw_device] = False - del config[device][hw_device] + del config[device][key] # add property into hw device properties list. - if hw_device not in config[device].keys(): - config[device][hw_device] = ' '.join([property_name, property_value]) + if key not in config[device].keys(): + config[device][key] = value else: - config[device][hw_device] += " " + property_name + " " + property_value + current_config = config[device][key].get() + if hw_device not in current_config.keys(): + current_config.update(value.get()) + else: + current_device_config = current_config[hw_device].get() + for prop in value.get().items(): + current_device_config.update(prop[1].get()) + current_config[hw_device].set(current_device_config) + config[device][key].set(current_config) + + def update_device_config_for_virtual_device(value, config, key): + # check if the element contains the hardware device property + if len(value.split(':')) == 1: + config[device][key] = device_infer_precision[device] + else: + # set device nstreams properties in the AUTO/MULTI plugin + value_vec = value[value.find('{') + 1:value.rfind('}')].split(',') + device_properties = {value_vec[i].split(':')[0] : value_vec[i].split(':')[1] for i in range(0, len(value_vec))} + for hw_device in device_properties.keys(): + update_configs(hw_device, key, device_properties[hw_device]) ## infer precision def set_infer_precision(): + key = properties.hint.inference_precision() if device in device_infer_precision.keys(): ## set to user defined value - if 'INFERENCE_PRECISION_HINT' in supported_properties: - config[device]['INFERENCE_PRECISION_HINT'] = device_infer_precision[device] - elif device in [MULTI_DEVICE_NAME, AUTO_DEVICE_NAME]: - # check if the element contains the hardware device property - value_vec = device_infer_precision[device].split(' ') - if len(value_vec) == 1: - config[device]['INFERENCE_PRECISION_HINT'] = device_infer_precision[device] - else: - # set device nstreams properties in the AUTO/MULTI plugin - device_properties = {value_vec[i]: value_vec[i + 1] for i in range(0, len(value_vec), 2)} - for hw_device in device_properties.keys(): - update_configs(hw_device, "INFERENCE_PRECISION_HINT", device_properties[hw_device]) + if key in supported_properties: + config[device][key] = device_infer_precision[device] + elif is_virtual_device(device): + update_device_config_for_virtual_device(device_infer_precision[device], config, key) else: raise Exception(f"Device {device} doesn't support config key INFERENCE_PRECISION_HINT!" \ " Please specify -infer_precision for correct devices in format" \ @@ -234,24 +254,16 @@ def main(): ## set to user defined value if key in supported_properties: config[device][key] = device_number_streams[device] - elif "NUM_STREAMS" in supported_properties: - key = "NUM_STREAMS" + elif properties.streams.num() in supported_properties: + key = properties.streams.num() config[device][key] = device_number_streams[device] - elif device in [MULTI_DEVICE_NAME, AUTO_DEVICE_NAME]: - # check if the element contains the hardware device property - value_vec = device_number_streams[device].split(' ') - if len(value_vec) == 1: - key = "NUM_STREAMS" - config[device][key] = device_number_streams[key] - else: - # set device nstreams properties in the AUTO/MULTI plugin - device_properties = {value_vec[i]: value_vec[i + 1] for i in range(0, len(value_vec), 2)} - for hw_device in device_properties.keys(): - update_configs(hw_device, "NUM_STREAMS", device_properties[hw_device]) + elif is_virtual_device(device): + key = properties.streams.num() + update_device_config_for_virtual_device(device_number_streams[device], config, key) else: raise Exception(f"Device {device} doesn't support config key '{key}'! " + "Please specify -nstreams for correct devices in format :,:") - elif key not in config[device].keys() and args.api_type == "async" \ + elif key not in config[device].keys() and args.api_type == "async" and key not in config[device].keys() \ and 'PERFORMANCE_HINT' in config[device].keys() and config[device]['PERFORMANCE_HINT'] == '': ## set the _AUTO value for the #streams logger.warning(f"-nstreams default value is determined automatically for {device} device. " + @@ -259,18 +271,18 @@ def main(): "but it still may be non-optimal for some cases, for more information look at README.") if key in supported_properties: config[device][key] = get_device_type_from_name(device) + "_THROUGHPUT_AUTO" - elif "NUM_STREAMS" in supported_properties: - key = "NUM_STREAMS" + elif properties.streams.Num() in supported_properties: + key = properties.streams.Num() config[device][key] = "-1" # Set AUTO mode for streams number - elif device in [MULTI_DEVICE_NAME, AUTO_DEVICE_NAME]: + elif is_virtual_device(device): # Set nstreams to default value auto if no nstreams specified from cmd line. for hw_device in hw_devices_list: - hw_supported_properties = benchmark.core.get_property(hw_device, 'SUPPORTED_PROPERTIES') + hw_supported_properties = benchmark.core.get_property(hw_device, properties.supported_properties()) key = get_device_type_from_name(hw_device) + "_THROUGHPUT_STREAMS" value = get_device_type_from_name(hw_device) + "_THROUGHPUT_AUTO" if key not in hw_supported_properties: - key = "NUM_STREAMS" - value = "AUTO" + key = properties.streams.Num() + value = properties.streams.Num.AUTO if key in hw_supported_properties: update_configs(hw_device, key, value) if key in config[device].keys(): @@ -278,15 +290,15 @@ def main(): return def set_nthreads_pin(property_name, property_value): - if property_name == "AFFINITY": + if property_name == properties.affinity(): if property_value == "YES": - property_value = "CORE" + property_value = properties.Affinity.CORE elif property_value == "NO": - property_value = "NONE" + property_value = properties.Affinity.NONE if property_name in supported_properties or device_name == AUTO_DEVICE_NAME: # create nthreads/pin primary property for HW device or AUTO if -d is AUTO directly. config[device][property_name] = property_value - elif if_auto or if_multi: + elif is_virtual: # Create secondary property of -nthreads/-pin only for CPU if CPU device appears in the devices # list specified by -d. if CPU_DEVICE_NAME in hw_devices_list: @@ -295,36 +307,16 @@ def main(): if args.number_threads and is_flag_set_in_command_line("nthreads"): # limit threading for CPU portion of inference - set_nthreads_pin('INFERENCE_NUM_THREADS', str(args.number_threads)) + set_nthreads_pin(properties.inference_num_threads(), str(args.number_threads)) + if is_flag_set_in_command_line('pin'): ## set for CPU to user defined value - set_nthreads_pin('AFFINITY', args.infer_threads_pinning) - if CPU_DEVICE_NAME in device: # CPU supports few special performance-oriented keys - ## for CPU execution, more throughput-oriented execution via streams - set_throughput_streams() - set_infer_precision() - elif GPU_DEVICE_NAME in device: - ## for GPU execution, more throughput-oriented execution via streams - set_throughput_streams() - set_infer_precision() - elif AUTO_DEVICE_NAME in device: - set_throughput_streams() - set_infer_precision() - if device in device_number_streams.keys(): - del device_number_streams[device] - elif MULTI_DEVICE_NAME in device: - set_throughput_streams() - set_infer_precision() - if CPU_DEVICE_NAME in device and GPU_DEVICE_NAME in device: - logger.warning("Turn on GPU throttling. Multi-device execution with the CPU + GPU performs best with GPU throttling hint, " + - "which releases another CPU thread (that is otherwise used by the GPU driver for active polling)") - update_configs(GPU_DEVICE_NAME, 'GPU_PLUGIN_THROTTLE', '1') - # limit threading for CPU portion of inference - if not is_flag_set_in_command_line('pin'): - if CPU_DEVICE_NAME in config[device].keys() and 'CPU_BIND_THREAD' in config[device][CPU_DEVICE_NAME]: - logger.warning(f"Turn off threads pinning for {device} " + - "device since multi-scenario with GPU device is used.") - update_configs(CPU_DEVICE_NAME, 'CPU_BIND_THREAD', 'NO') + set_nthreads_pin(properties.affinity(), args.infer_threads_pinning) + + set_throughput_streams() + set_infer_precision() + + if is_virtual_device(device): if device in device_number_streams.keys(): del device_number_streams[device] @@ -419,9 +411,9 @@ def main(): # --------------------- 7. Loading the model to the device ------------------------------------------------- next_step() - start_time = datetime.utcnow() compiled_model = benchmark.core.compile_model(model, benchmark.device, device_config) + duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}" logger.info(f"Compile model took {duration_ms} ms") if statistics: @@ -459,11 +451,20 @@ def main(): next_step() ## actual device-deduced settings - keys = compiled_model.get_property('SUPPORTED_PROPERTIES') + keys = compiled_model.get_property(properties.supported_properties()) logger.info("Model:") for k in keys: - if k not in ('SUPPORTED_METRICS', 'SUPPORTED_CONFIG_KEYS', 'SUPPORTED_PROPERTIES'): - logger.info(f' {k}: {compiled_model.get_property(k)}') + skip_keys = ('SUPPORTED_METRICS', 'SUPPORTED_CONFIG_KEYS', properties.supported_properties()) + if k not in skip_keys: + value = compiled_model.get_property(k) + if k == properties.device.properties(): + for device_key in value.keys(): + logger.info(f' {device_key}:') + for k2, value2 in value.get(device_key).get().items(): + if k2 not in skip_keys: + logger.info(f' {k2}: {value2}') + else: + logger.info(f' {k}: {value}') # Update number of streams for device in device_number_streams.keys(): diff --git a/tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py b/tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py index 0ec27995ea7..8cdbfcaae86 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py +++ b/tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py @@ -3,11 +3,12 @@ from collections import defaultdict from datetime import timedelta -from openvino.runtime import Core, Model, PartialShape, Dimension, Layout, Type, serialize +import enum +from openvino.runtime import Core, Model, PartialShape, Dimension, Layout, Type, serialize, properties from openvino.preprocess import PrePostProcessor from .constants import DEVICE_DURATION_IN_SECS, UNKNOWN_DEVICE_TYPE, \ - AUTO_DEVICE_NAME, MULTI_DEVICE_NAME + AUTO_DEVICE_NAME, MULTI_DEVICE_NAME, HETERO_DEVICE_NAME from .logging import logger import json @@ -275,6 +276,12 @@ def can_measure_as_static(app_input_info): return False return True +meta_plugins = [ MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, AUTO_DEVICE_NAME ] +def is_virtual_device(device_name) -> bool: + return device_name in meta_plugins + +def is_virtual_device_found(device_names) -> bool: + return any(is_virtual_device(device_name) for device_name in device_names) def parse_devices(device_string): result = [] @@ -323,8 +330,8 @@ def parse_value_for_virtual_device(device, values_string): # Remove the element that the key is virtual device MULTI # e.g. MULTI:xxx -nstreams 2 will set nstreams 2 to xxx. values_string.pop(device) - elif device == AUTO_DEVICE_NAME: - # Just keep the element that the key is virtual device AUTO + elif device == AUTO_DEVICE_NAME or device == HETERO_DEVICE_NAME: + # Just keep the element that the key is virtual device AUTO/HETERO # e.g. AUTO:xxx,xxx -nstreams 2 will trigger exception that AUTO plugin didn't support nstream property. value = values_string.get(device) values_string.clear() @@ -332,11 +339,15 @@ def parse_value_for_virtual_device(device, values_string): keys = values_string.keys() for key in list(values_string): if device not in list(values_string): - values_string[device] = '' - values_string[device] += key + " " + values_string.get(key) + " " + values_string[device] = '{' + else: + values_string[device] += ',' + values_string[device] += key + ":" + values_string.get(key) del values_string[key] if device in values_string.keys(): values_string[device] = values_string[device].strip() + if values_string[device] != '': + values_string[device] += '}' return def process_help_inference_string(benchmark_app, device_number_streams): @@ -402,14 +413,14 @@ def print_perf_counters_sort(perf_counts_list,sort_flag="sort"): elif sort_flag=="simple_sort": total_detail_data = sorted(total_detail_data,key=lambda tmp_data:tmp_data[-4],reverse=True) total_detail_data = [tmp_data for tmp_data in total_detail_data if str(tmp_data[1])!="Status.NOT_RUN"] - print_detail_result(total_detail_data) + print_detail_result(total_detail_data) print(f'Total time: {total_time / 1000:.3f} milliseconds') print(f'Total CPU time: {total_time_cpu / 1000:.3f} milliseconds') print(f'Total proportion: {"%.2f"%(round(total_real_time_proportion)*100)} % \n') return total_detail_data def print_detail_result(result_list): - """ Print_perf_counters_sort result + """ Print_perf_counters_sort result """ max_print_length = 20 for tmp_result in result_list: @@ -748,28 +759,15 @@ def show_available_devices(): def dump_config(filename, config): - properties = {} - for device in config: - properties[device] = {} - supported_properties = Core().get_property(device, 'SUPPORTED_PROPERTIES') - # check if ov::device::properties exists in the config - if device not in (AUTO_DEVICE_NAME, MULTI_DEVICE_NAME): - properties[device] = config[device] - continue - for property_name in config[device]: - property_value = config[device][property_name] - if property_name in supported_properties: - properties[device][property_name] = property_value - else: - properties[device].setdefault('DEVICE_PROPERTIES', {}) - properties[device]['DEVICE_PROPERTIES'].setdefault(property_name, {}) - array = property_value.split(' ') - properties_dict = {array[i]: array[i + 1] for i in range(0, len(array), 2)} - for key in properties_dict: - properties[device]['DEVICE_PROPERTIES'][property_name][key] = properties_dict[key] + json_config = {} + for device_name, device_config in config.items(): + json_config[device_name] = {} + for key, value in device_config.items(): + value_string = value.name if isinstance(value, properties.hint.PerformanceMode) else str(value) + json_config[device_name][key] = value_string with open(filename, 'w') as f: - json.dump(properties, f, indent=4) + json.dump(json_config, f, indent=4) def load_config(filename, config): @@ -778,14 +776,4 @@ def load_config(filename, config): for device in original_config: config[device] = {} for property_name in original_config[device]: - property_value = original_config[device][property_name] - if property_name != 'DEVICE_PROPERTIES': - config[device][property_name] = property_value - continue - for hw_device in property_value: - hw_device_config = property_value[hw_device] - array = "" - for key in hw_device_config: - value = hw_device_config[key] - array += key + ' ' + value + ' ' - config[device][hw_device] = array.strip() + config[device][property_name] = original_config[device][property_name] \ No newline at end of file