diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp index 14414c37331..7c519bc35d9 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp @@ -20,13 +20,13 @@ const std::vector netPrecisions = { ov::element::f16, ov::element::f32, }; -const std::vector> configs = { +const std::vector configs = { {}, }; -const std::vector> multiConfigs = { +const std::vector multiConfigs = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_TEMPLATE}}}; -const std::vector> heteroConfigs = { +const std::vector heteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp index 09d5131fbcc..643464dbc76 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -9,7 +9,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp index 8fe367d2619..93a979523e7 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -7,7 +7,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index ec547842e24..6423833d678 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -10,11 +10,11 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; -const std::vector> HeteroConfigs = { +const std::vector HeteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestDynamicTests, diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index 846840f3a78..dc12f8e1f60 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -9,11 +9,11 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; -const std::vector> HeteroConfigs = { +const std::vector HeteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferenceChaining, diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index 160e19c6869..02e02231a7c 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -9,7 +9,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; @@ -38,12 +38,12 @@ std::vector prcs = { ov::element::u64, }; -const std::vector> emptyConfigs = {{}}; +const std::vector emptyConfigs = {{}}; -const std::vector> HeteroConfigs = { +const std::vector HeteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_TEMPLATE}}}; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_TEMPLATE}} }; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index 88b55b4fa5d..ccaf590ca30 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -10,7 +10,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp index b4a67559c44..583d6e440fd 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -9,7 +9,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp index b05c660ef5a..3558aae7131 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -110,7 +110,7 @@ std::vector eltwiseOpTypesSingleThread = { ngraph::helpers::EltwiseTypes::POWER, }; -std::map additional_config_single_thread = { +ov::AnyMap additional_config_single_thread = { {"CPU_THREADS_NUM", "1"} }; diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp index 6c15e50e9cb..2950d6bc02a 100644 --- a/docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp @@ -40,7 +40,7 @@ const auto params2D_static = testing::Combine( testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape2D)), testing::ValuesIn(axis2D), testing::Values(CommonTestUtils::DEVICE_TEMPLATE), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); const auto params2D_dynamic = testing::Combine( @@ -50,7 +50,7 @@ const auto params2D_dynamic = testing::Combine( testing::ValuesIn(inputDynamicShape2D), testing::ValuesIn(axis2D), testing::Values(CommonTestUtils::DEVICE_TEMPLATE), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); INSTANTIATE_TEST_SUITE_P( @@ -88,7 +88,7 @@ const auto params4Dstatic = testing::Combine( testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape4D)), testing::ValuesIn(axis4D), testing::Values(CommonTestUtils::DEVICE_TEMPLATE), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); const auto params4Ddynamic = testing::Combine( @@ -98,7 +98,7 @@ const auto params4Ddynamic = testing::Combine( testing::ValuesIn(inputDynamicShape4D), testing::ValuesIn(axis4D), testing::Values(CommonTestUtils::DEVICE_TEMPLATE), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); INSTANTIATE_TEST_SUITE_P( diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index b953ce90150..024a3f05d48 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -158,8 +158,7 @@ int main(int argc, char* argv[]) { std::map device_nstreams = parse_nstreams_value_per_device(devices, FLAGS_nstreams); // Load device config file if specified - std::map> config; - + std::map config; if (!FLAGS_load_config.empty()) { load_config(FLAGS_load_config, config); } @@ -187,15 +186,15 @@ int main(int argc, char* argv[]) { config["GPU"][CONFIG_KEY(CONFIG_FILE)] = FLAGS_c; } if (config.count("GPU") && config.at("GPU").count(CONFIG_KEY(CONFIG_FILE))) { - auto ext = config.at("GPU").at(CONFIG_KEY(CONFIG_FILE)); - core.set_config({{CONFIG_KEY(CONFIG_FILE), ext}}, "GPU"); + auto ext = config.at("GPU").at(CONFIG_KEY(CONFIG_FILE)).as(); + core.set_property("GPU", {{CONFIG_KEY(CONFIG_FILE), ext}}); slog::info << "GPU extensions is loaded " << ext << slog::endl; } if (FLAGS_hint.empty()) { for (auto& device : devices) { std::vector supported_config_keys = - core.get_metric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + core.get_property(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); if (std::find(supported_config_keys.begin(), supported_config_keys.end(), CONFIG_KEY(PERFORMANCE_HINT)) != supported_config_keys.end()) { @@ -244,7 +243,7 @@ int main(int argc, char* argv[]) { for (auto& device : devices) { if (!config.count(device)) config[device] = {}; - std::map& device_config = config.at(device); + auto& device_config = config.at(device); // high-level performance modes if (!ov_perf_hint.empty()) { @@ -258,7 +257,7 @@ int main(int argc, char* argv[]) { // set to user defined value device_config[CONFIG_KEY(PERF_COUNT)] = FLAGS_pc ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO); } else if (device_config.count(CONFIG_KEY(PERF_COUNT)) && - (device_config.at(CONFIG_KEY(PERF_COUNT)) == "YES")) { + (device_config.at(CONFIG_KEY(PERF_COUNT)).as() == "YES")) { slog::warn << "Performance counters for " << device << " device is turned on. To print results use -pc option." << slog::endl; } else if (FLAGS_report_type == detailedCntReport || FLAGS_report_type == averageCntReport) { @@ -273,7 +272,8 @@ int main(int argc, char* argv[]) { // set to default value device_config[CONFIG_KEY(PERF_COUNT)] = FLAGS_pc ? CONFIG_VALUE(YES) : CONFIG_VALUE(NO); } - perf_counts = (device_config.at(CONFIG_KEY(PERF_COUNT)) == CONFIG_VALUE(YES)) ? true : perf_counts; + perf_counts = + (device_config.at(CONFIG_KEY(PERF_COUNT)).as() == CONFIG_VALUE(YES)) ? true : perf_counts; // the rest are individual per-device settings (overriding the values set with perf modes) auto setThroughputStreams = [&]() { @@ -281,7 +281,7 @@ int main(int argc, char* argv[]) { if (device_nstreams.count(device)) { // set to user defined value std::vector supported_config_keys = - core.get_metric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + core.get_property(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); if (std::find(supported_config_keys.begin(), supported_config_keys.end(), key) == supported_config_keys.end()) { throw std::logic_error("Device " + device + " doesn't support config key '" + key + "'! " + @@ -303,7 +303,7 @@ int main(int argc, char* argv[]) { device_config[key] = std::string(getDeviceTypeFromName(device) + "_THROUGHPUT_AUTO"); } if (device_config.count(key)) - device_nstreams[device] = device_config.at(key); + device_nstreams[device] = device_config.at(key).as(); }; if (device.find("CPU") != std::string::npos) { // CPU supports few special performance-oriented keys @@ -351,7 +351,7 @@ int main(int argc, char* argv[]) { device_config[GNA_CONFIG_KEY(PRECISION)] = "I16"; } else { std::vector supported_config_keys = - core.get_metric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + core.get_property(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); auto supported = [&](const std::string& key) { return std::find(std::begin(supported_config_keys), std::end(supported_config_keys), key) != std::end(supported_config_keys); @@ -369,7 +369,7 @@ int main(int argc, char* argv[]) { } for (auto&& item : config) { - core.set_config(item.second, item.first); + core.set_property(item.first, item.second); } size_t batchSize = FLAGS_b; @@ -380,7 +380,7 @@ int main(int argc, char* argv[]) { // Takes priority over config from file if (!FLAGS_cache_dir.empty()) { - core.set_config({{CONFIG_KEY(CACHE_DIR), FLAGS_cache_dir}}); + core.set_property({{CONFIG_KEY(CACHE_DIR), FLAGS_cache_dir}}); } bool isDynamicNetwork = false; @@ -464,7 +464,7 @@ int main(int argc, char* argv[]) { next_step(); auto preproc = ov::preprocess::PrePostProcessor(model); - ov::ConfigMap user_precisions_map; + std::map user_precisions_map; if (!FLAGS_iop.empty()) { user_precisions_map = parseArgMap(FLAGS_iop); } @@ -618,11 +618,12 @@ int main(int argc, char* argv[]) { next_step(); // output of the actual settings that the device selected for (const auto& device : devices) { - std::vector supported_config_keys = core.get_metric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + std::vector supported_config_keys = + core.get_property(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); slog::info << "Device: " << device << slog::endl; for (const auto& cfg : supported_config_keys) { try { - slog::info << " {" << cfg << " , " << compiledModel.get_config(cfg).as(); + slog::info << " {" << cfg << " , " << compiledModel.get_property(cfg).as(); slog::info << " }" << slog::endl; } catch (...) { }; @@ -632,7 +633,7 @@ int main(int argc, char* argv[]) { // Update number of streams for (auto&& ds : device_nstreams) { const std::string key = getDeviceTypeFromName(ds.first) + "_THROUGHPUT_STREAMS"; - device_nstreams[ds.first] = core.get_config(ds.first, key).as(); + device_nstreams[ds.first] = core.get_property(ds.first, key).as(); } // Number of requests @@ -643,7 +644,7 @@ int main(int argc, char* argv[]) { } else { std::string key = METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS); try { - nireq = compiledModel.get_metric(key).as(); + nireq = compiledModel.get_property(key).as(); } catch (const std::exception& ex) { IE_THROW() << "Every device used with the benchmark_app should " << "support OPTIMAL_NUMBER_OF_INFER_REQUESTS metric. " diff --git a/samples/cpp/benchmark_app/utils.cpp b/samples/cpp/benchmark_app/utils.cpp index fde8f0d261e..65dbed8e5e7 100644 --- a/samples/cpp/benchmark_app/utils.cpp +++ b/samples/cpp/benchmark_app/utils.cpp @@ -647,7 +647,7 @@ std::vector get_inputs_info(const std::string& shape_ } #ifdef USE_OPENCV -void dump_config(const std::string& filename, const std::map>& config) { +void dump_config(const std::string& filename, const std::map& config) { slog::warn << "YAML and XML formats for config file won't be supported soon." << slog::endl; auto plugin_to_opencv_format = [](const std::string& str) -> std::string { if (str.find("_") != std::string::npos) { @@ -668,14 +668,18 @@ void dump_config(const std::string& filename, const std::mapfirst) << "{:"; - for (auto param_it = device_it->second.begin(); param_it != device_it->second.end(); ++param_it) - fs << param_it->first << param_it->second; + std::stringstream strm; + for (auto param_it = device_it->second.begin(); param_it != device_it->second.end(); ++param_it) { + strm << param_it->first; + param_it->second.print(strm); + } + fs << strm.str(); fs << "}"; } fs.release(); } -void load_config(const std::string& filename, std::map>& config) { +void load_config(const std::string& filename, std::map& config) { slog::warn << "YAML and XML formats for config file won't be supported soon." << slog::endl; auto opencv_to_plugin_format = [](const std::string& str) -> std::string { std::string new_str(str); @@ -701,12 +705,14 @@ void load_config(const std::string& filename, std::map>& config) { +void dump_config(const std::string& filename, const std::map& config) { nlohmann::json jsonConfig; for (const auto& item : config) { std::string deviceName = item.first; for (const auto& option : item.second) { - jsonConfig[deviceName][option.first] = option.second; + std::stringstream strm; + option.second.print(strm); + jsonConfig[deviceName][option.first] = strm.str(); } } @@ -718,7 +724,7 @@ void dump_config(const std::string& filename, const std::map>& config) { +void load_config(const std::string& filename, std::map& config) { std::ifstream ifs(filename); if (!ifs.is_open()) { throw std::runtime_error("Can't load config file \"" + filename + "\"."); diff --git a/samples/cpp/benchmark_app/utils.hpp b/samples/cpp/benchmark_app/utils.hpp index d649bbfed17..a331d591a89 100644 --- a/samples/cpp/benchmark_app/utils.hpp +++ b/samples/cpp/benchmark_app/utils.hpp @@ -122,8 +122,8 @@ std::vector get_inputs_info(const std::string& shape_ const std::string& mean_string, const std::vector>& input_info); -void dump_config(const std::string& filename, const std::map>& config); -void load_config(const std::string& filename, std::map>& config); +void dump_config(const std::string& filename, const std::map& config); +void load_config(const std::string& filename, std::map& config); extern const std::vector supported_image_extensions; extern const std::vector supported_binary_extensions; diff --git a/samples/cpp/common/utils/include/samples/common.hpp b/samples/cpp/common/utils/include/samples/common.hpp index 64d3bd23b39..16d6889f82e 100644 --- a/samples/cpp/common/utils/include/samples/common.hpp +++ b/samples/cpp/common/utils/include/samples/common.hpp @@ -979,7 +979,7 @@ std::map parseConfig(const std::string& configName, ch inline std::string getFullDeviceName(ov::Core& core, std::string device) { ov::Any p; try { - p = core.get_metric(device, METRIC_KEY(FULL_DEVICE_NAME)); + p = core.get_property(device, METRIC_KEY(FULL_DEVICE_NAME)); return p.as(); } catch (ov::Exception&) { return ""; diff --git a/samples/cpp/hello_query_device/main.cpp b/samples/cpp/hello_query_device/main.cpp index 31673560a25..0ec17053d69 100644 --- a/samples/cpp/hello_query_device/main.cpp +++ b/samples/cpp/hello_query_device/main.cpp @@ -107,11 +107,11 @@ int main(int argc, char* argv[]) { // Query supported metrics and print all of them slog::info << "\tSUPPORTED_METRICS: " << slog::endl; - std::vector supportedMetrics = core.get_metric(device, METRIC_KEY(SUPPORTED_METRICS)); + std::vector supportedMetrics = core.get_property(device, METRIC_KEY(SUPPORTED_METRICS)); for (auto&& metricName : supportedMetrics) { if (metricName != METRIC_KEY(SUPPORTED_METRICS) && metricName != METRIC_KEY(SUPPORTED_CONFIG_KEYS)) { slog::info << "\t\t" << metricName << " : " << slog::flush; - print_any_value(core.get_metric(device, metricName)); + print_any_value(core.get_property(device, metricName)); } } @@ -120,10 +120,10 @@ int main(int argc, char* argv[]) { supportedMetrics.end()) { slog::info << "\tSUPPORTED_CONFIG_KEYS (default values): " << slog::endl; std::vector supportedConfigKeys = - core.get_metric(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + core.get_property(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); for (auto&& configKey : supportedConfigKeys) { slog::info << "\t\t" << configKey << " : " << slog::flush; - print_any_value(core.get_config(device, configKey)); + print_any_value(core.get_property(device, configKey)); } } diff --git a/samples/cpp/speech_sample/main.cpp b/samples/cpp/speech_sample/main.cpp index 504e284947e..01cd3fac734 100644 --- a/samples/cpp/speech_sample/main.cpp +++ b/samples/cpp/speech_sample/main.cpp @@ -106,8 +106,8 @@ int main(int argc, char* argv[]) { // ----------------------------------------------------------------------------------------------------- // --------------------------- Set parameters and scale factors ------------------------------------- /** Setting parameter for per layer metrics **/ - std::map gnaPluginConfig; - std::map genericPluginConfig; + ov::AnyMap gnaPluginConfig; + ov::AnyMap genericPluginConfig; if (useGna) { std::string gnaDevice = useHetero ? FLAGS_d.substr(FLAGS_d.find("GNA"), FLAGS_d.find(",") - FLAGS_d.find("GNA")) : FLAGS_d; diff --git a/src/bindings/python/src/pyopenvino/core/common.cpp b/src/bindings/python/src/pyopenvino/core/common.cpp index a43ff60dc8e..85b1e34319e 100644 --- a/src/bindings/python/src/pyopenvino/core/common.cpp +++ b/src/bindings/python/src/pyopenvino/core/common.cpp @@ -296,11 +296,11 @@ PyAny from_ov_any(const ov::Any& any) { uint32_t get_optimal_number_of_requests(const ov::CompiledModel& actual) { try { - auto parameter_value = actual.get_metric(METRIC_KEY(SUPPORTED_METRICS)); + auto parameter_value = actual.get_property(METRIC_KEY(SUPPORTED_METRICS)); auto supported_metrics = parameter_value.as>(); const std::string key = METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS); if (std::find(supported_metrics.begin(), supported_metrics.end(), key) != supported_metrics.end()) { - parameter_value = actual.get_metric(key); + parameter_value = actual.get_property(key); if (parameter_value.is()) return parameter_value.as(); else diff --git a/src/bindings/python/src/pyopenvino/core/compiled_model.cpp b/src/bindings/python/src/pyopenvino/core/compiled_model.cpp index 2f36abd63ef..0787980efbd 100644 --- a/src/bindings/python/src/pyopenvino/core/compiled_model.cpp +++ b/src/bindings/python/src/pyopenvino/core/compiled_model.cpp @@ -43,14 +43,14 @@ void regclass_CompiledModel(py::module m) { cls.def( "get_config", [](ov::CompiledModel& self, const std::string& name) -> py::object { - return Common::from_ov_any(self.get_config(name)).as(); + return Common::from_ov_any(self.get_property(name)).as(); }, py::arg("name")); cls.def( "get_metric", [](ov::CompiledModel& self, const std::string& name) -> py::object { - return Common::from_ov_any(self.get_metric(name)).as(); + return Common::from_ov_any(self.get_property(name)).as(); }, py::arg("name")); diff --git a/src/bindings/python/src/pyopenvino/core/core.cpp b/src/bindings/python/src/pyopenvino/core/core.cpp index 684d9a082d6..db61876f10a 100644 --- a/src/bindings/python/src/pyopenvino/core/core.cpp +++ b/src/bindings/python/src/pyopenvino/core/core.cpp @@ -27,36 +27,55 @@ void regclass_Core(py::module m) { cls.def(py::init(), py::arg("xml_config_file") = ""); - cls.def("set_config", - (void (ov::Core::*)(const ConfigMap&, const std::string&)) & ov::Core::set_config, - py::arg("config"), - py::arg("device_name") = ""); + cls.def( + "set_config", + [](ov::Core& self, const std::map& config, const std::string& device_name) { + self.set_property(device_name, {config.begin(), config.end()}); + }, + py::arg("config"), + py::arg("device_name") = ""); - cls.def("compile_model", - (ov::CompiledModel( - ov::Core::*)(const std::shared_ptr&, const std::string&, const ConfigMap&)) & - ov::Core::compile_model, - py::arg("model"), - py::arg("device_name"), - py::arg("config") = py::dict()); + cls.def( + "compile_model", + [](ov::Core& self, + const std::shared_ptr& model, + const std::string& device_name, + const std::map& config) { + return self.compile_model(model, device_name, {config.begin(), config.end()}); + }, + py::arg("model"), + py::arg("device_name"), + py::arg("config") = py::dict()); - cls.def("compile_model", - (ov::CompiledModel(ov::Core::*)(const std::shared_ptr&, const ConfigMap&)) & - ov::Core::compile_model, - py::arg("model"), - py::arg("config") = py::dict()); + cls.def( + "compile_model", + [](ov::Core& self, + const std::shared_ptr& model, + const std::map& config) { + return self.compile_model(model, ov::AnyMap{config.begin(), config.end()}); + }, + py::arg("model"), + py::arg("config") = py::dict()); - cls.def("compile_model", - (ov::CompiledModel(ov::Core::*)(const std::string&, const std::string&, const ConfigMap&)) & - ov::Core::compile_model, - py::arg("model_path"), - py::arg("device_name"), - py::arg("config") = py::dict()); + cls.def( + "compile_model", + [](ov::Core& self, + const std::string& model_path, + const std::string& device_name, + const std::map& config) { + return self.compile_model(model_path, device_name, {config.begin(), config.end()}); + }, + py::arg("model_path"), + py::arg("device_name"), + py::arg("config") = py::dict()); - cls.def("compile_model", - (ov::CompiledModel(ov::Core::*)(const std::string&, const ConfigMap&)) & ov::Core::compile_model, - py::arg("model_path"), - py::arg("config") = py::dict()); + cls.def( + "compile_model", + [](ov::Core& self, const std::string& model_path, const std::map& config) { + return self.compile_model(model_path, ov::AnyMap{config.begin(), config.end()}); + }, + py::arg("model_path"), + py::arg("config") = py::dict()); cls.def("get_versions", &ov::Core::get_versions, py::arg("device_name")); @@ -102,7 +121,12 @@ void regclass_Core(py::module m) { cls.def( "import_model", - (ov::CompiledModel(ov::Core::*)(std::istream&, const std::string&, const ConfigMap&)) & ov::Core::import_model, + [](ov::Core& self, + std::istream& model_file, + const std::string& device_name, + const std::map& config) { + return self.import_model(model_file, device_name, {config.begin(), config.end()}); + }, py::arg("model_file"), py::arg("device_name"), py::arg("config") = py::none()); @@ -110,7 +134,7 @@ void regclass_Core(py::module m) { cls.def( "get_config", [](ov::Core& self, const std::string& device_name, const std::string& name) -> py::object { - return Common::from_ov_any(self.get_config(device_name, name)).as(); + return Common::from_ov_any(self.get_property(device_name, name)).as(); }, py::arg("device_name"), py::arg("name")); @@ -118,7 +142,7 @@ void regclass_Core(py::module m) { cls.def( "get_metric", [](ov::Core& self, const std::string device_name, const std::string name) -> py::object { - return Common::from_ov_any(self.get_metric(device_name, name)).as(); + return Common::from_ov_any(self.get_property(device_name, name)).as(); }, py::arg("device_name"), py::arg("name")); @@ -129,13 +153,17 @@ void regclass_Core(py::module m) { cls.def("unload_plugin", &ov::Core::unload_plugin, py::arg("device_name")); - cls.def("query_model", - (ov::SupportedOpsMap( - ov::Core::*)(const std::shared_ptr&, const std::string&, const ConfigMap&)) & - ov::Core::query_model, - py::arg("model"), - py::arg("device_name"), - py::arg("config") = py::dict()); + cls.def( + "query_model", + [](ov::Core& self, + const std::shared_ptr& model, + const std::string& device_name, + const std::map& config) { + return self.query_model(model, device_name, {config.begin(), config.end()}); + }, + py::arg("model"), + py::arg("device_name"), + py::arg("config") = py::dict()); cls.def("add_extension", static_cast(&ov::Core::add_extension), diff --git a/src/core/include/openvino/core/any.hpp b/src/core/include/openvino/core/any.hpp index 2743dcfbd02..74b805f1b31 100644 --- a/src/core/include/openvino/core/any.hpp +++ b/src/core/include/openvino/core/any.hpp @@ -29,6 +29,7 @@ namespace ov { class Node; class RuntimeAttribute; + class CompiledModel; class RemoteContext; class RemoteTensor; @@ -59,6 +60,19 @@ class OPENVINO_API Any { constexpr static const auto value = std::is_same(nullptr))>::value; }; + template + struct Istreamable { + template + static auto test(U*) -> decltype(std::declval() >> std::declval(), std::true_type()) { + return {}; + } + template + static auto test(...) -> std::false_type { + return {}; + } + constexpr static const auto value = std::is_same(nullptr))>::value; + }; + template struct EqualityComparable { static void* conv(bool); @@ -160,6 +174,7 @@ class OPENVINO_API Any { virtual Base::Ptr copy() const = 0; virtual bool equal(const Base& rhs) const = 0; virtual void print(std::ostream& os) const = 0; + virtual void read(std::istream& os) = 0; virtual const DiscreteTypeInfo& get_type_info() const = 0; virtual std::shared_ptr as_runtime_attribute() const; @@ -260,6 +275,10 @@ class OPENVINO_API Any { os << runtime_attribute->to_string(); } + void read(std::istream&) override { + throw ov::Exception{"Pointer to runtime attribute is not readable from std::istream"}; + } + T runtime_attribute; }; @@ -328,6 +347,20 @@ class OPENVINO_API Any { print_impl(os, value); } + template + static typename std::enable_if::value>::type read_impl(std::istream& is, U& value) { + is >> value; + } + + template + static typename std::enable_if::value>::type read_impl(std::istream&, U&) { + throw ov::Exception{"Could print type without std::istream& operator>>(std::istream&, T) defined"}; + } + + void read(std::istream& is) override { + read_impl(is, value); + } + T value; }; @@ -602,6 +635,14 @@ public: throw ov::Exception{std::string{"Bad cast from: "} + _impl->type_info().name() + " to: " + typeid(T).name()}; } + operator bool&() & = delete; + + operator const bool&() const& = delete; + + operator const bool() const& = delete; + + operator const bool &&() && = delete; + /** * @brief Converts to specified type * @tparam T type @@ -689,6 +730,15 @@ public: */ void print(std::ostream& stream) const; + /** + * @brief Read into underlying object from the given input stream. + * Uses operator>> if it is defined, leaves stream unchanged otherwise. + * In case of empty any or nullptr stream immediately returns. + * + * @param stream Output stream object will be printed to. + */ + void read(std::istream& stream); + /** * @brief Return pointer to underlined interface * @return underlined interface @@ -725,7 +775,9 @@ struct AsTypePtr { }; } // namespace util -using RTMap = std::map; +using AnyMap = std::map; + +using RTMap = AnyMap; using AnyVector = std::vector; diff --git a/src/core/src/any.cpp b/src/core/src/any.cpp index 9177170c743..6dfb4fa22f5 100644 --- a/src/core/src/any.cpp +++ b/src/core/src/any.cpp @@ -70,6 +70,12 @@ void Any::print(std::ostream& ostream) const { } } +void Any::read(std::istream& istream) { + if (_impl != nullptr) { + _impl->read(istream); + } +} + bool Any::operator==(const Any& other) const { if (_impl == nullptr && other._impl == nullptr) { return false; diff --git a/src/core/tests/any.cpp b/src/core/tests/any.cpp index 6b03c13a97c..88490cdfffd 100644 --- a/src/core/tests/any.cpp +++ b/src/core/tests/any.cpp @@ -336,6 +336,16 @@ TEST_F(AnyTests, PrintToIntAny) { ASSERT_EQ(stream.str(), std::to_string(value)); } +TEST_F(AnyTests, ReadToIntAny) { + int value = -5; + std::stringstream strm; + strm << value; + Any p = int{}; + ASSERT_NO_THROW(p.read(strm)); + ASSERT_FALSE(strm.fail()); + ASSERT_EQ(value, p.as()); +} + TEST_F(AnyTests, PrintToUIntAny) { unsigned int value = 5; Any p = value; diff --git a/src/inference/dev_api/ie_icore.hpp b/src/inference/dev_api/ie_icore.hpp index ea4dbc15739..5a0a6bd9241 100644 --- a/src/inference/dev_api/ie_icore.hpp +++ b/src/inference/dev_api/ie_icore.hpp @@ -169,6 +169,15 @@ public: virtual InferenceEngine::RemoteContext::Ptr CreateContext(const std::string& deviceName, const InferenceEngine::ParamMap&) = 0; + /** + * @brief Get only configs that are suppored by device + * @param deviceName Name of a device + * @param config Map of configs that can contains configs that are not supported by device + * @return map of configs that are supported by device + */ + virtual std::map GetSupportedConfig(const std::string& deviceName, + const std::map& config) = 0; + virtual bool isNewAPI() const = 0; /** diff --git a/src/inference/include/ie/ie_parameter.hpp b/src/inference/include/ie/ie_parameter.hpp index c63c8407a6a..1856739ba61 100644 --- a/src/inference/include/ie/ie_parameter.hpp +++ b/src/inference/include/ie/ie_parameter.hpp @@ -20,7 +20,8 @@ #include #include "ie_blob.h" -#include "openvino/runtime/parameter.hpp" +#include "openvino/core/any.hpp" +#include "openvino/core/except.hpp" namespace InferenceEngine { @@ -28,6 +29,6 @@ namespace InferenceEngine { * @brief Alias for type that can store any value */ using Parameter = ov::Any; -using ov::ParamMap; +using ParamMap = ov::AnyMap; } // namespace InferenceEngine diff --git a/src/inference/include/openvino/runtime/common.hpp b/src/inference/include/openvino/runtime/common.hpp index 8aa8bbf280f..8e4ae6178f1 100644 --- a/src/inference/include/openvino/runtime/common.hpp +++ b/src/inference/include/openvino/runtime/common.hpp @@ -9,10 +9,10 @@ */ #pragma once -#include #include #include +#include "openvino/core/any.hpp" #include "openvino/core/visibility.hpp" #if defined(OPENVINO_STATIC_LIBRARY) || defined(USE_STATIC_IE) @@ -43,10 +43,6 @@ namespace InferenceEngine {} namespace ov { namespace ie = InferenceEngine; -/** - * @brief This type of map is commonly used to pass set of device configuration parameters - */ -using ConfigMap = std::map; /** * @brief This type of map is used for result of Core::query_model @@ -56,7 +52,6 @@ using ConfigMap = std::map; using SupportedOpsMap = std::map; namespace runtime { -using ov::ConfigMap; using ov::SupportedOpsMap; } // namespace runtime diff --git a/src/inference/include/openvino/runtime/compiled_model.hpp b/src/inference/include/openvino/runtime/compiled_model.hpp index 0b715f3435c..d7df1ac7e9b 100644 --- a/src/inference/include/openvino/runtime/compiled_model.hpp +++ b/src/inference/include/openvino/runtime/compiled_model.hpp @@ -18,7 +18,7 @@ #include "openvino/core/model.hpp" #include "openvino/runtime/infer_request.hpp" -#include "openvino/runtime/parameter.hpp" +#include "openvino/runtime/properties.hpp" #include "openvino/runtime/remote_context.hpp" namespace InferenceEngine { @@ -50,6 +50,8 @@ class OPENVINO_RUNTIME_API CompiledModel { friend class ov::Core; friend class ov::InferRequest; + void get_property(const std::string& name, ov::Any& to) const; + public: /** * @brief A default constructor. @@ -62,7 +64,7 @@ public: ~CompiledModel(); /** - * @brief Get executable model information from a device + * @brief Get runtime model information from a device * This object represents the internal device specific model which is optimized for particular * accelerator. It contains device specific nodes, runtime information and can be used only * to understand how the source model is optimized and which kernels, element types and layouts @@ -159,34 +161,52 @@ public: void export_model(std::ostream& model_stream); /** - * @brief Sets configuration for current compiled model - * @param config Map of pairs: (config parameter name, config parameter value) + * @brief Sets properties for current compiled model + * + * @param properties Map of pairs: (property name, property value) */ - void set_config(const ParamMap& config); + void set_property(const AnyMap& properties); - /** @brief Gets configuration for a compiled model. + /** + * @brief Sets properties for current compiled model + * + * @tparam Properties Should be the pack of `std::pair` types + * @param properties Optional pack of pairs: (property name, property value) + * @return nothing + */ + template + util::EnableIfAllProperties set_property(Properties&&... properties) { + set_property(AnyMap{std::forward(properties)...}); + } + + /** @brief Gets properties for current compiled model * * The method is responsible to extract information * which affects compiled model inference. The list of supported configuration values can be extracted via - * CompiledModel::get_metric with the SUPPORTED_CONFIG_KEYS key, but some of these keys cannot be changed - * dynamically, e.g. DEVICE_ID cannot changed if a compiled model has already been compiled for particular + * CompiledModel::get_property with the ov::supported_properties key, but some of these keys cannot be changed + * dynamically, e.g. ov::device::id cannot changed if a compiled model has already been compiled for particular * device. * - * @param key_name config key, can be found in ie_plugin_config.hpp - * @return Configuration parameter value + * @param name property key, can be found in openvino/runtime/properties.hpp + * @return Property value */ - Any get_config(const std::string& key_name) const; + Any get_property(const std::string& name) const; /** - * @brief Gets general runtime metric for a compiled model. + * @brief Gets properties dedicated to device behaviour. * - * It can be model name, actual device ID on - * which compiled model is running or all other properties which cannot be changed dynamically. + * The method is targeted to extract information which can be set via set_property method. * - * @param metric_name metric name to request - * @return Metric parameter value + * @tparam T - type of returned value + * @param property - property object. + * @return Value of property. */ - Any get_metric(const std::string& metric_name) const; + template + T get_property(const ov::Property& property) const { + auto to = Any::make(); + get_property(property.name(), to); + return to.template as(); + } /** * @brief Returns pointer to device-specific shared context diff --git a/src/inference/include/openvino/runtime/core.hpp b/src/inference/include/openvino/runtime/core.hpp index cd8960b671f..22c1f96603c 100644 --- a/src/inference/include/openvino/runtime/core.hpp +++ b/src/inference/include/openvino/runtime/core.hpp @@ -42,6 +42,8 @@ class OPENVINO_RUNTIME_API Core { class Impl; std::shared_ptr _impl; + void get_property(const std::string& device_name, const std::string& name, ov::Any& to) const; + public: /** @brief Constructs OpenVINO Core instance using XML configuration file with * devices and their plugins description. @@ -116,11 +118,32 @@ public: * them simultaneously (up to the limitation of the hardware resources) * * @param model Model object acquired from Core::read_model - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load + * @param properties Optional map of pairs: (property name, property value) relevant only for this load * operation * @return A compiled model */ - CompiledModel compile_model(const std::shared_ptr& model, const ConfigMap& config = {}); + CompiledModel compile_model(const std::shared_ptr& model, const AnyMap& properties = {}); + + /** + * @brief Creates and loads a compiled model from a source model to the default OpenVINO device selected by AUTO + * plugin. + * + * Users can create as many compiled models as they need and use + * them simultaneously (up to the limitation of the hardware resources) + * + * @tparam Properties Should be the pack of `std::pair` types + * @param model Model object acquired from Core::read_model + * @param properties Optional pack of pairs: (property name, property value) relevant only for this + * load operation + * + * @return A compiled model + */ + template + util::EnableIfAllProperties compile_model( + const std::shared_ptr& model, + Properties&&... properties) { + return compile_model(model, AnyMap{std::forward(properties)...}); + } /** * @brief Creates a compiled model from a source model object. @@ -130,13 +153,33 @@ public: * * @param model Model object acquired from Core::read_model * @param device_name Name of device to load model to - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load + * @param properties Optional map of pairs: (property name, property value) relevant only for this load * operation * @return A compiled model */ CompiledModel compile_model(const std::shared_ptr& model, const std::string& device_name, - const ConfigMap& config = {}); + const AnyMap& properties = {}); + + /** + * @brief Creates a compiled model from a source model object. + * + * Users can create as many compiled models as they need and use + * them simultaneously (up to the limitation of the hardware resources) + * @tparam Properties Should be the pack of `std::pair` types + * @param model Model object acquired from Core::read_model + * @param device_name Name of device to load model to + * @param properties Optional pack of pairs: (property name, property value) relevant only for this + * load operation + * @return A compiled model + */ + template + util::EnableIfAllProperties compile_model( + const std::shared_ptr& model, + const std::string& device_name, + Properties&&... properties) { + return compile_model(model, device_name, AnyMap{std::forward(properties)...}); + } /** * @brief Reads and loads a compiled model from IR / ONNX / PDPD file to the default OpenVINI device selected by @@ -146,12 +189,32 @@ public: * especially for cases when caching is enabled and cached model is available * * @param model_path path to model - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load + * @param properties Optional map of pairs: (property name, property value) relevant only for this load * operation/ * * @return A compiled model */ - CompiledModel compile_model(const std::string& model_path, const ConfigMap& config = {}); + CompiledModel compile_model(const std::string& model_path, const AnyMap& properties = {}); + + /** + * @brief Reads and loads a compiled model from IR / ONNX / PDPD file to the default OpenVINI device selected by + * AUTO plugin. + * + * This can be more efficient than using read_model + compile_model(Model) flow + * especially for cases when caching is enabled and cached model is available + * + * @tparam Properties Should be the pack of `std::pair` types + * @param model_path path to model + * @param properties Optional pack of pairs: (property name, property value) relevant only for this + * load operation + * + * @return A compiled model + */ + template + util::EnableIfAllProperties compile_model(const std::string& model_path, + Properties&&... properties) { + return compile_model(model_path, AnyMap{std::forward(properties)...}); + } /** * @brief Reads model and creates a compiled model from IR / ONNX / PDPD file @@ -161,26 +224,64 @@ public: * * @param model_path Path to a model * @param device_name Name of device to load a model to - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load + * @param properties Optional map of pairs: (property name, property value) relevant only for this load * operation/ * * @return A compiled model */ CompiledModel compile_model(const std::string& model_path, const std::string& device_name, - const ConfigMap& config = {}); + const AnyMap& properties = {}); + + /** + * @brief Reads model and creates a compiled model from IR / ONNX / PDPD file + * + * This can be more efficient than using read_model + compile_model(Model) flow + * especially for cases when caching is enabled and cached model is available + * + * @tparam Properties Should be the pack of `std::pair` types + * @param model_path path to model + * @param device_name Name of device to load model to + * @param properties Optional pack of pairs: (property name, property value) relevant only for this + * load operation + * + * @return A compiled model + */ + template + util::EnableIfAllProperties compile_model(const std::string& model_path, + const std::string& device_name, + Properties&&... properties) { + return compile_model(model_path, device_name, AnyMap{std::forward(properties)...}); + } /** * @brief Creates a compiled model from a source model within a specified remote context. * @param model Model object acquired from Core::read_model * @param context A reference to a RemoteContext object - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load + * @param properties Optional map of pairs: (property name, property value) relevant only for this load * operation * @return A compiled model object */ CompiledModel compile_model(const std::shared_ptr& model, const RemoteContext& context, - const ConfigMap& config = {}); + const AnyMap& properties = {}); + + /** + * @brief Creates a compiled model from a source model within a specified remote context. + * @tparam Properties Should be the pack of `std::pair` types + * @param model Model object acquired from Core::read_model + * @param context Pointer to RemoteContext object + * @param properties Optional pack of pairs: (property name, property value) relevant only for this + * load operation + * @return A compiled model object + */ + template + util::EnableIfAllProperties compile_model( + const std::shared_ptr& model, + const RemoteContext& context, + Properties&&... properties) { + return compile_model(model, context, AnyMap{std::forward(properties)...}); + } /** * @deprecated This method is deprecated. Please use other Core::add_extension methods @@ -268,13 +369,30 @@ public: * ov::CompiledModel::export_model method * @param device_name Name of device to import compiled model for. Note, if @p device_name device was not used to * compile the original mode, an exception is thrown - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load + * @param properties Optional map of pairs: (property name, property value) relevant only for this load * operation* * @return A compiled model */ CompiledModel import_model(std::istream& model_stream, const std::string& device_name, - const ConfigMap& config = {}); + const AnyMap& properties = {}); + + /** + * @brief Imports a compiled model from a previously exported one + * @tparam Properties Should be the pack of `std::pair` types + * @param model_stream Model stream + * @param device_name Name of device to import compiled model for. Note, if @p device_name device was not used to + * compile the original mode, an exception is thrown + * @param properties Optional pack of pairs: (property name, property value) relevant only for this + * load operation + * @return A compiled model + */ + template + util::EnableIfAllProperties import_model(std::istream& model_stream, + const std::string& device_name, + Properties&&... properties) { + return import_model(model_stream, device_name, AnyMap{std::forward(properties)...}); + } /** * @brief Imports a compiled model from a previously exported one with a specified remote context. @@ -282,55 +400,130 @@ public: * ov::CompiledModel::export_model * @param context A reference to a RemoteContext object. Note, if the device from @p context was not used to compile * the original mode, an exception is thrown - * @param config Optional map of pairs: (config parameter name, config parameter value) relevant only for this load + * @param properties Optional map of pairs: (property name, property value) relevant only for this load * operation * @return A compiled model */ - CompiledModel import_model(std::istream& model_stream, const RemoteContext& context, const ConfigMap& config = {}); + CompiledModel import_model(std::istream& model_stream, const RemoteContext& context, const AnyMap& properties = {}); /** - * @brief Query device if it supports specified model with specified configuration + * @brief Imports a compiled model from a previously exported one with a specified remote context. + * @tparam Properties Should be the pack of `std::pair` types + * @param model_stream Model stream + * @param context Pointer to RemoteContext object + * @param properties Optional pack of pairs: (property name, property value) relevant only for this + * load operation + * @return A compiled model + */ + template + util::EnableIfAllProperties import_model(std::istream& model_stream, + const RemoteContext& context, + Properties&&... properties) { + return import_model(model_stream, context, AnyMap{std::forward(properties)...}); + } + + /** + * @brief Query device if it supports specified model with specified properties * * @param device_name A name of a device to query * @param model Model object to query - * @param config Optional map of pairs: (config parameter name, config parameter value) + * @param properties Optional map of pairs: (property name, property value) * @return An object containing a map of pairs a operation name -> a device name supporting this operation. */ SupportedOpsMap query_model(const std::shared_ptr& model, const std::string& device_name, - const ConfigMap& config = {}) const; + const AnyMap& properties = {}) const; /** - * @brief Sets configuration for device, acceptable keys can be found in ie_plugin_config.hpp + * @brief Query device if it supports specified model with specified properties * - * @param device_name An optional name of a device. If device name is not specified, the config is set for all the - * registered devices. - * - * @param config Map of pairs: (config parameter name, config parameter value) + * @tparam Properties Should be the pack of `std::pair` types + * @param device_name A name of a device to query + * @param model Model object to query + * @param properties Optional pack of pairs: (property name, property value) relevant only for this + * query operation + * @return An object containing a map of pairs a operation name -> a device name supporting this operation. */ - void set_config(const ConfigMap& config, const std::string& device_name = {}); + template + util::EnableIfAllProperties query_model( + const std::shared_ptr& model, + const std::string& device_name, + Properties&&... properties) const { + return query_model(model, device_name, AnyMap{std::forward(properties)...}); + } /** - * @brief Gets configuration dedicated to device behaviour. - * The method is targeted to extract information which can be set via Core::set_config method. + * @brief Sets properties for all the + * registered devices, acceptable keys can be found in openvino/runtime/properties.hpp * - * @param device_name A name of a device to get a configuration value. - * @param config_key_name A config key name. - * @return Value of config corresponding to config key. + * @param properties Map of pairs: (property name, property value) */ - Any get_config(const std::string& device_name, const std::string& config_key_name) const; + void set_property(const AnyMap& properties); /** - * @brief Gets general runtime metric for dedicated hardware. + * @brief Sets properties for all the + * registered devices, acceptable keys can be found in openvino/runtime/properties.hpp + * + * @tparam Properties Should be the pack of `std::pair` types + * @param properties Optional pack of pairs: (property name, property value) + * @return nothing + */ + template + util::EnableIfAllProperties set_property(Properties&&... properties) { + set_property(AnyMap{std::forward(properties)...}); + } + + /** + * @brief Sets properties for device, acceptable keys can be found in openvino/runtime/properties.hpp + * + * @param device_name An name of a device. + * + * @param properties Map of pairs: (property name, property value) + */ + void set_property(const std::string& device_name, const AnyMap& properties); + + /** + * @brief Sets properties for device, acceptable keys can be found in openvino/runtime/properties.hpp + * + * @tparam Properties Should be the pack of `std::pair` types + * @param device_name An name of a device. + * @param properties Optional pack of pairs: (property name, property value) + * @return nothing + */ + template + util::EnableIfAllProperties set_property(const std::string& device_name, + Properties&&... properties) { + set_property(device_name, AnyMap{std::forward(properties)...}); + } + + /** + * @brief Gets properties dedicated to device behaviour. + * + * The method is targeted to extract information which can be set via set_property method. + * + * @param device_name - A name of a device to get a properties value. + * @param name - property name. + * @return Value of property corresponding to property name. + */ + Any get_property(const std::string& device_name, const std::string& name) const; + + /** + * @brief Gets properties dedicated to device behaviour. * * The method is needed to request common device or system properties. * It can be device name, temperature, other devices-specific values. * - * @param device_name A name of a device to get a metric value. - * @param metric_key_name A metric name to request. - * @return Metric value corresponding to metric key. + * @tparam T - type of returned value + * @param deviceName - A name of a device to get a properties value. + * @param property - property object. + * @return Property value. */ - Any get_metric(const std::string& device_name, const std::string& metric_key_name) const; + template + T get_property(const std::string& deviceName, const ov::Property& property) const { + auto to = Any::make(); + get_property(deviceName, property.name(), to); + return to.template as(); + } /** * @brief Returns devices available for inference @@ -339,7 +532,7 @@ public: * @return A vector of devices. The devices are returned as { CPU, GPU.0, GPU.1, MYRIAD } * If there more than one device of specific type, they are enumerated with .# suffix. * Such enumerated device can later be used as a device name in all Core methods like Core::compile_model, - * Core::query_model, Core::set_config and so on. + * Core::query_model, Core::set_property and so on. */ std::vector get_available_devices() const; @@ -391,7 +584,7 @@ public: * - `location` specifies absolute path to dynamic library with a plugin. * A path can also be relative to inference engine shared library. It allows to have common config * for different systems with different configurations. - * - `properties` are set to a plugin via the ov::Core::set_config method. + * - `properties` are set to a plugin via the ov::Core::set_property method. * - `extensions` are set to a plugin via the ov::Core::add_extension method. * * @param xml_config_file A path to .xml file with plugins to register. @@ -402,10 +595,24 @@ public: * @brief Create a new remote shared context object on specified accelerator device * using specified plugin-specific low level device API parameters (device handle, pointer, context, etc.) * @param device_name A name of a device to create a new shared context on. - * @param params A map of device-specific shared context parameters. + * @param properties Map of device-specific shared context properties. * @return A reference to a created remote context. */ - RemoteContext create_context(const std::string& device_name, const ParamMap& params); + RemoteContext create_context(const std::string& device_name, const AnyMap& properties); + + /** + * @brief Create a new shared context object on specified accelerator device + * using specified plugin-specific low level device API properties (device handle, pointer, etc.) + * @tparam Properties Should be the pack of `std::pair` types + * @param device_name Name of a device to create new shared context on. + * @param properties Pack of device-specific shared context properties. + * @return A shared pointer to a created remote context. + */ + template + util::EnableIfAllProperties create_context(const std::string& device_name, + Properties&&... properties) { + return create_context(device_name, AnyMap{std::forward(properties)...}); + } /** * @brief Get a pointer to default (plugin-supplied) shared context object for specified accelerator device. diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp index b4e1db1c9c5..11aaab765c5 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/dx.hpp @@ -132,7 +132,7 @@ public: */ D3DContext(Core& core, ID3D11Device* device, int target_tile_id = -1) : ClContext(core, (cl_context) nullptr) { // clang-format off - ParamMap context_params = { + AnyMap context_params = { {GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED)}, {GPU_PARAM_KEY(VA_DEVICE), static_cast(device)}, {GPU_PARAM_KEY(TILE_ID), target_tile_id} @@ -149,7 +149,7 @@ public: * @return A pair of remote tensors for each plane */ std::pair create_tensor_nv12(const size_t height, const size_t width, ID3D11Texture2D* nv12_surf) { - ParamMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, + AnyMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), static_cast(nv12_surf)}, {GPU_PARAM_KEY(VA_PLANE), uint32_t(0)}}; auto y_tensor = create_tensor(element::u8, {1, 1, height, width}, tensor_params); @@ -167,7 +167,7 @@ public: * @return A remote tensor instance */ D3DBufferTensor create_tensor(const element::Type type, const Shape& shape, ID3D11Buffer* buffer) { - ParamMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(DX_BUFFER)}, + AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(DX_BUFFER)}, {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), static_cast(buffer)}}; create_tensor(type, shape, params).as(); } @@ -185,7 +185,7 @@ public: const Shape& shape, ID3D11Texture2D* surface, uint32_t plane = 0) { - ParamMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, + AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), static_cast(surface)}, {GPU_PARAM_KEY(VA_PLANE), plane}}; return create_tensor(type, shape, params).as(); diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp index 8d1ed73e204..6faff0955f8 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/ocl.hpp @@ -179,9 +179,9 @@ public: * @param ctx_device_id An ID of device to be used from ctx */ ClContext(Core& core, cl_context ctx, int ctx_device_id = 0) { - ParamMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(OCL)}, - {GPU_PARAM_KEY(OCL_CONTEXT), static_cast(ctx)}, - {GPU_PARAM_KEY(OCL_CONTEXT_DEVICE_ID), ctx_device_id}}; + AnyMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(OCL)}, + {GPU_PARAM_KEY(OCL_CONTEXT), static_cast(ctx)}, + {GPU_PARAM_KEY(OCL_CONTEXT_DEVICE_ID), ctx_device_id}}; *this = core.create_context(device_name, context_params).as(); } @@ -196,9 +196,9 @@ public: auto res = clGetCommandQueueInfo(queue, CL_QUEUE_CONTEXT, sizeof(cl_context), &ctx, nullptr); if (res != CL_SUCCESS) IE_THROW() << "Can't get context from given opencl queue"; - ParamMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(OCL)}, - {GPU_PARAM_KEY(OCL_CONTEXT), static_cast(ctx)}, - {GPU_PARAM_KEY(OCL_QUEUE), static_cast(queue)}}; + AnyMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(OCL)}, + {GPU_PARAM_KEY(OCL_CONTEXT), static_cast(ctx)}, + {GPU_PARAM_KEY(OCL_QUEUE), static_cast(queue)}}; *this = core.create_context(device_name, context_params).as(); } @@ -237,8 +237,8 @@ public: const cl::Image2D& nv12_image_plane_uv) { size_t width = nv12_image_plane_y.getImageInfo(); size_t height = nv12_image_plane_y.getImageInfo(); - ParamMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_IMAGE2D)}, - {GPU_PARAM_KEY(MEM_HANDLE), static_cast(nv12_image_plane_y.get())}}; + AnyMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_IMAGE2D)}, + {GPU_PARAM_KEY(MEM_HANDLE), static_cast(nv12_image_plane_y.get())}}; auto y_tensor = create_tensor(element::u8, {1, 1, height, width}, tensor_params); tensor_params[GPU_PARAM_KEY(MEM_HANDLE)] = static_cast(nv12_image_plane_uv.get()); auto uv_tensor = create_tensor(element::u8, {1, 2, height / 2, width / 2}, tensor_params); @@ -253,8 +253,8 @@ public: * @return A remote tensor instance */ ClBufferTensor create_tensor(const element::Type type, const Shape& shape, const cl_mem buffer) { - ParamMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_BUFFER)}, - {GPU_PARAM_KEY(MEM_HANDLE), static_cast(buffer)}}; + AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_BUFFER)}, + {GPU_PARAM_KEY(MEM_HANDLE), static_cast(buffer)}}; return create_tensor(type, shape, params).as(); } @@ -277,8 +277,8 @@ public: * @return A remote tensor instance */ ClImage2DTensor create_tensor(const element::Type type, const Shape& shape, const cl::Image2D& image) { - ParamMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_IMAGE2D)}, - {GPU_PARAM_KEY(MEM_HANDLE), static_cast(image.get())}}; + AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(OCL_IMAGE2D)}, + {GPU_PARAM_KEY(MEM_HANDLE), static_cast(image.get())}}; return create_tensor(type, shape, params).as(); } @@ -290,8 +290,8 @@ public: * @return A remote tensor instance */ USMTensor create_tensor(const element::Type type, const Shape& shape, void* usm_ptr) { - ParamMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_USER_BUFFER)}, - {GPU_PARAM_KEY(MEM_HANDLE), static_cast(usm_ptr)}}; + AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_USER_BUFFER)}, + {GPU_PARAM_KEY(MEM_HANDLE), static_cast(usm_ptr)}}; return create_tensor(type, shape, params).as(); } @@ -302,7 +302,7 @@ public: * @return A remote tensor instance */ USMTensor create_usm_host_tensor(const element::Type type, const Shape& shape) { - ParamMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_HOST_BUFFER)}}; + AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_HOST_BUFFER)}}; return create_tensor(type, shape, params).as(); } @@ -313,7 +313,7 @@ public: * @return A remote tensor instance */ USMTensor create_usm_device_tensor(const element::Type type, const Shape& shape) { - ParamMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_DEVICE_BUFFER)}}; + AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(USM_DEVICE_BUFFER)}}; return create_tensor(type, shape, params).as(); } }; diff --git a/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp index 2f5bdc09e06..7ab313d4d65 100644 --- a/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp +++ b/src/inference/include/openvino/runtime/intel_gpu/ocl/va.hpp @@ -101,9 +101,9 @@ public: * that root device should be used */ VAContext(Core& core, VADisplay device, int target_tile_id = -1) : ClContext(core, (cl_context) nullptr) { - ParamMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED)}, - {GPU_PARAM_KEY(VA_DEVICE), static_cast(device)}, - {GPU_PARAM_KEY(TILE_ID), target_tile_id}}; + AnyMap context_params = {{GPU_PARAM_KEY(CONTEXT_TYPE), GPU_PARAM_VALUE(VA_SHARED)}, + {GPU_PARAM_KEY(VA_DEVICE), static_cast(device)}, + {GPU_PARAM_KEY(TILE_ID), target_tile_id}}; *this = core.create_context(device_name, context_params).as(); } @@ -118,9 +118,9 @@ public: std::pair create_tensor_nv12(const size_t height, const size_t width, const VASurfaceID nv12_surf) { - ParamMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, - {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), nv12_surf}, - {GPU_PARAM_KEY(VA_PLANE), uint32_t(0)}}; + AnyMap tensor_params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, + {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), nv12_surf}, + {GPU_PARAM_KEY(VA_PLANE), uint32_t(0)}}; auto y_tensor = create_tensor(element::u8, {1, 1, height, width}, tensor_params); tensor_params[GPU_PARAM_KEY(VA_PLANE)] = uint32_t(1); auto uv_tensor = create_tensor(element::u8, {1, 2, height / 2, width / 2}, tensor_params); @@ -139,9 +139,9 @@ public: const Shape& shape, const VASurfaceID surface, const uint32_t plane = 0) { - ParamMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, - {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), surface}, - {GPU_PARAM_KEY(VA_PLANE), plane}}; + AnyMap params = {{GPU_PARAM_KEY(SHARED_MEM_TYPE), GPU_PARAM_VALUE(VA_SURFACE)}, + {GPU_PARAM_KEY(DEV_OBJECT_HANDLE), surface}, + {GPU_PARAM_KEY(VA_PLANE), plane}}; return create_tensor(type, shape, params).as(); } }; diff --git a/src/inference/include/openvino/runtime/parameter.hpp b/src/inference/include/openvino/runtime/parameter.hpp deleted file mode 100644 index 5793308d54e..00000000000 --- a/src/inference/include/openvino/runtime/parameter.hpp +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (C) 2018-2022 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -/** - * @brief A header file for the Parameter class - * @file openvino/runtime/parameter.hpp - */ -#pragma once - -#include - -#include "openvino/core/any.hpp" - -namespace ov { - -/** - * @brief An std::map object containing parameters - */ -using ParamMap = std::map; - -namespace runtime { -using ov::ParamMap; -} // namespace runtime - -} // namespace ov diff --git a/src/inference/include/openvino/runtime/properties.hpp b/src/inference/include/openvino/runtime/properties.hpp new file mode 100644 index 00000000000..462bb9f5bc8 --- /dev/null +++ b/src/inference/include/openvino/runtime/properties.hpp @@ -0,0 +1,657 @@ +// Copyright (C) 2018-2022 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +/** + * @brief A header for advanced hardware specific properties for OpenVINO runtime devices + * To use in set_property, compile_model, import_model, get_property methods + * + * @file openvino/runtime/properties.hpp + */ +#pragma once + +#include +#include +#include +#include +#include + +#include "ie_precision.hpp" +#include "openvino/core/any.hpp" +#include "openvino/runtime/common.hpp" + +namespace ov { + +/** + * @brief Enum to define property value mutability + */ +enum class PropertyMutability { + RO, //!< Read-only property values can not be passed as input parameter + RW, //!< Read/Write property key may change readability in runtime +}; + +/** + * @brief This class is used to return property name and its mutability attribute + */ +struct PropertyName : public std::string { + using std::string::string; + + /** + * @brief Constructs property name object + * @param str property name + * @param mutability property mutability + */ + PropertyName(const std::string& str, PropertyMutability mutability = PropertyMutability::RW) + : std::string{str}, + _mutability{mutability} {} + + /** + * @brief check property mutability + * @return true if property is mutable + */ + bool is_mutable() const { + return _mutability == PropertyMutability::RW; + } + +private: + PropertyMutability _mutability = PropertyMutability::RW; +}; + +/** @cond INTERNAL */ +namespace util { +template +struct AllProperties; + +template +struct AllProperties { + constexpr static const bool value = + std::is_convertible>::value && AllProperties::value; +}; + +template +struct AllProperties { + constexpr static const bool value = std::is_convertible>::value; +}; + +template +using EnableIfAllProperties = typename std::enable_if::value, T>::type; + +/** + * @brief This class is used to bind property name with property type + * @tparam T type of value used to pass or get property + */ +template +struct BaseProperty { + using value_type = T; //!< Property type + constexpr static const auto mutability = mutability_; //!< Property readability + + /** + * @brief Constructs property access variable + * @param str_ property name + */ + constexpr BaseProperty(const char* name_) : _name{name_} {} + + /** + * @brief return property name + * @return Pointer to const string key representation + */ + const char* name() const { + return _name; + } + + /** + * @brief compares property name + * @return true if string is the same + */ + bool operator==(const std::string& str) const { + return _name == str; + } + + /** + * @brief compares property name + * @return true if string is the same + */ + friend bool operator==(const std::string& str, const BaseProperty& property) { + return property == str; + } + +private: + const char* _name = nullptr; +}; +template +inline std::ostream& operator<<(std::ostream& os, const BaseProperty& property) { + return os << property.name(); +} +} // namespace util +/** @endcond */ + +/** + * @brief This class is used to bind property name with value type + * @tparam T type of value used to set or get property + */ +template +struct Property : public util::BaseProperty { + using util::BaseProperty::BaseProperty; + /** + * @brief Constructs property + * @tparam Args property constructor arguments types + * @param args property constructor arguments + * @return Pair of name and type erased value. + */ + template + inline std::pair operator()(Args&&... args) const { + return {this->name(), Any::make(std::forward(args)...)}; + } +}; + +/** + * @brief This class is used to bind read-only property name with value type + * @tparam T type of value used to pass or get property + */ +template +struct Property : public util::BaseProperty { + using util::BaseProperty::BaseProperty; +}; + +/** + * @brief Read-only property to get a std::vector of supported read-only properies. + * + * This can be used as a compiled model property as well. + * + */ +static constexpr Property, PropertyMutability::RO> supported_properties{ + "SUPPORTED_PROPERTIES"}; + +/** + * @brief Read-only property to get a std::vector of available device IDs + */ +static constexpr Property, PropertyMutability::RO> available_devices{"AVAILABLE_DEVICES"}; + +/** + * @brief Read-only property to get a name of name of a model + */ +static constexpr Property model_name{"NETWORK_NAME"}; + +/** + * @brief Read-only property to get an unsigned integer value of optimal number of compiled model infer requests. + */ +static constexpr Property optimal_number_of_infer_requests{ + "OPTIMAL_NUMBER_OF_INFER_REQUESTS"}; + +namespace hint { + +/** + * @brief Hint for device to use specified precision for inference + */ +static constexpr Property inference_precision{"INFERENCE_PRECISION_HINT"}; + +/** + * @brief Enum to define possible model priorities hints + */ +enum class ModelPriority { + LOW = 0, + MEDIUM = 1, + HIGH = 2, +}; + +/** @cond INTERNAL */ +inline std::ostream& operator<<(std::ostream& os, const ModelPriority& model_priority) { + switch (model_priority) { + case ModelPriority::LOW: + return os << "LOW"; + case ModelPriority::MEDIUM: + return os << "MEDIUM"; + case ModelPriority::HIGH: + return os << "HIGH"; + default: + throw ov::Exception{"Unsupported performance measure hint"}; + } +} + +inline std::istream& operator>>(std::istream& is, ModelPriority& model_priority) { + std::string str; + is >> str; + if (str == "LOW") { + model_priority = ModelPriority::LOW; + } else if (str == "MEDIUM") { + model_priority = ModelPriority::MEDIUM; + } else if (str == "HIGH") { + model_priority = ModelPriority::HIGH; + } else { + throw ov::Exception{"Unsupported model priority: " + str}; + } + return is; +} +/** @endcond */ + +/** + * @brief High-level OpenVINO model priority hint + * Defines what model should be provided with more performant bounded resource first + */ +static constexpr Property model_priority{"MODEL_PRIORITY"}; + +/** + * @brief Enum to define possible performance mode hints + */ +enum class PerformanceMode { + LATENCY = 0, + THROUGHPUT = 1, +}; + +/** @cond INTERNAL */ +inline std::ostream& operator<<(std::ostream& os, const PerformanceMode& performance_mode) { + switch (performance_mode) { + case PerformanceMode::LATENCY: + return os << "LATENCY"; + case PerformanceMode::THROUGHPUT: + return os << "THROUGHPUT"; + default: + throw ov::Exception{"Unsupported performance mode hint"}; + } +} + +inline std::istream& operator>>(std::istream& is, PerformanceMode& performance_mode) { + std::string str; + is >> str; + if (str == "LATENCY") { + performance_mode = PerformanceMode::LATENCY; + } else if (str == "THROUGHPUT") { + performance_mode = PerformanceMode::THROUGHPUT; + } else { + throw ov::Exception{"Unsupported performance mode: " + str}; + } + return is; +} +/** @endcond */ + +/** + * @brief High-level OpenVINO Performance Hints + * unlike low-level properties that are individual (per-device), the hints are something that every device accepts + * and turns into device-specific settings + */ +static constexpr Property performance_mode{"PERFORMANCE_HINT"}; + +/** + * @brief (Optional) property that backs the (above) Performance Hints + * by giving additional information on how many inference requests the application will be keeping in flight + * usually this value comes from the actual use-case (e.g. number of video-cameras, or other sources of inputs) + */ +static constexpr Property num_requests{"PERFORMANCE_HINT_NUM_REQUESTS"}; +} // namespace hint + +/** + * @brief The name for setting performance counters option. + * + * It is passed to Core::set_property() + */ +static constexpr Property enable_profiling{"PERF_COUNT"}; + +namespace log { + +/** + * @brief Enum to define possible log levels + */ +enum class Level { + NO = -1, //!< disable any loging + ERR = 0, //!< error events that might still allow the application to continue running + WARNING = 1, //!< potentially harmful situations which may further lead to ERROR + INFO = 2, //!< informational messages that display the progress of the application at coarse-grained level + DEBUG = 3, //!< fine-grained events that are most useful to debug an application. + TRACE = 4, //!< finer-grained informational events than the DEBUG +}; + +/** @cond INTERNAL */ +inline std::ostream& operator<<(std::ostream& os, const Level& level) { + switch (level) { + case Level::NO: + return os << "NO"; + case Level::ERR: + return os << "LOG_ERROR"; + case Level::WARNING: + return os << "LOG_WARNING"; + case Level::INFO: + return os << "LOG_INFO"; + case Level::DEBUG: + return os << "LOG_DEBUG"; + case Level::TRACE: + return os << "LOG_TRACE"; + default: + throw ov::Exception{"Unsupported log level"}; + } +} + +inline std::istream& operator>>(std::istream& is, Level& level) { + std::string str; + is >> str; + if (str == "NO") { + level = Level::NO; + } else if (str == "LOG_ERROR") { + level = Level::ERR; + } else if (str == "LOG_WARNING") { + level = Level::WARNING; + } else if (str == "LOG_INFO") { + level = Level::INFO; + } else if (str == "LOG_DEBUG") { + level = Level::DEBUG; + } else if (str == "LOG_TRACE") { + level = Level::TRACE; + } else { + throw ov::Exception{"Unsupported log level: " + str}; + } + return is; +} +/** @endcond */ + +/** + * @brief the property for setting desirable log level. + */ +static constexpr Property level{"LOG_LEVEL"}; +} // namespace log + +/** + * @brief This property defines the directory which will be used to store any data cached by plugins. + * + * The underlying cache structure is not defined and might differ between OpenVINO releases + * Cached data might be platform / device specific and might be invalid after OpenVINO version change + * If this property is not specified or value is empty string, then caching is disabled. + * The property might enable caching for the plugin using the following code: + * + * @code + * ie.set_property("GPU", ov::cache_dir("cache/")); // enables cache for GPU plugin + * @endcode + * + * The following code enables caching of compiled network blobs for devices where import/export is supported + * + * @code + * ie.set_property(ov::cache_dir("cache/")); // enables models cache + * @endcode + */ +static constexpr Property cache_dir{"CACHE_DIR"}; + +/** + * @brief Read-only property to provide information about a range for streams on platforms where streams are supported. + * + * Property returns a value of std::tuple type, where: + * - First value is bottom bound. + * - Second value is upper bound. + */ +static constexpr Property, PropertyMutability::RO> range_for_streams{ + "RANGE_FOR_STREAMS"}; + +/** + * @brief Read-only property to query information optimal batch size for the given device and the network + * + * Property returns a value of unsigned int type, + * Returns optimal batch size for a given network on the given device. The returned value is aligned to power of 2. + * Also, MODEL_PTR is the required option for this metric since the optimal batch size depends on the model, + * so if the MODEL_PTR is not given, the result of the metric is always 1. + * For the GPU the metric is queried automatically whenever the OpenVINO performance hint for the throughput is used, + * so that the result (>1) governs the automatic batching (transparently to the application). + * The automatic batching can be disabled with ALLOW_AUTO_BATCHING set to NO + */ +static constexpr Property optimal_batch_size{"OPTIMAL_BATCH_SIZE"}; + +/** + * @brief Read-only property to provide a hint for a range for number of async infer requests. If device supports + * streams, the metric provides range for number of IRs per stream. + * + * Property returns a value of std::tuple type, where: + * - First value is bottom bound. + * - Second value is upper bound. + * - Third value is step inside this range. + */ +static constexpr Property, PropertyMutability::RO> + range_for_async_infer_requests{"RANGE_FOR_ASYNC_INFER_REQUESTS"}; + +namespace device { + +/** + * @brief the property for setting of required device to execute on + * values: device id starts from "0" - first device, "1" - second device, etc + */ +static constexpr Property id{"DEVICE_ID"}; + +/** + * @brief Type for device Priorities config option, with comma-separated devices listed in the desired priority + */ +struct Priorities : public Property { +private: + template + static inline std::string concat(const H& head, T&&... tail) { + return head + std::string{','} + concat(std::forward(tail)...); + } + + template + static inline std::string concat(const H& head) { + return head; + } + +public: + using Property::Property; + + /** + * @brief Constructs device priorities + * @tparam Args property constructor arguments types + * @param args property constructor arguments + * @return Pair of name and type erased value. + */ + template + inline std::pair operator()(Args&&... args) const { + return {name(), Any{concat(std::forward(args)...)}}; + } +}; + +/** + * @brief Device Priorities config option, with comma-separated devices listed in the desired priority + */ +static constexpr Priorities priorities{"MULTI_DEVICE_PRIORITIES"}; + +/** + * @brief Type for property to pass set of properties to specified device + */ +struct Properties { + /** + * @brief Constructs property + * @param device_name device plugin alias + * @param config set of property values with names + * @return Pair of string key representation and type erased property value. + */ + inline std::pair operator()(const std::string& device_name, const AnyMap& config) const { + return {device_name, config}; + } + + /** + * @brief Constructs property + * @tparam Properties Should be the pack of `std::pair` types + * @param device_name device plugin alias + * @param configs Optional pack of pairs: (config parameter name, config parameter value) + * @return Pair of string key representation and type erased property value. + */ + template + inline util::EnableIfAllProperties, Properties...> operator()( + const std::string& device_name, + Properties&&... configs) const { + return {device_name, AnyMap{std::pair{configs}...}}; + } +}; + +/** + * @brief Property to pass set of property values to specified device + * Usage Example: + * @code + * core.compile_model("HETERO" + * ov::target_falLback("GPU", "CPU"), + * ov::device::properties("CPU", ov::enable_profiling(true)), + * ov::device::properties("GPU", ov::enable_profiling(false))); + * @endcode + */ +static constexpr Properties properties; + +/** + * @brief Read-only property to get a std::string value representing a full device name. + */ +static constexpr Property full_name{"FULL_DEVICE_NAME"}; + +/** + * @brief Read-only property which defines the device architecture. + */ +static constexpr Property architecture{"DEVICE_ARCHITECTURE"}; + +/** + * @brief Enum to define possible device types + */ +enum class Type { + INTEGRATED = 0, //!< Device is integrated into host system + DISCRETE = 1, //!< Device is not integrated into host system +}; + +/** @cond INTERNAL */ +inline std::ostream& operator<<(std::ostream& os, const Type& device_type) { + switch (device_type) { + case Type::DISCRETE: + return os << "discrete"; + case Type::INTEGRATED: + return os << "integrated"; + default: + throw ov::Exception{"Unsupported device type"}; + } +} + +inline std::istream& operator>>(std::istream& is, Type& device_type) { + std::string str; + is >> str; + if (str == "discrete") { + device_type = Type::DISCRETE; + } else if (str == "integrated") { + device_type = Type::INTEGRATED; + } else { + throw ov::Exception{"Unsupported device type: " + str}; + } + return is; +} +/** @endcond */ + +/** + * @brief Read-only property to get a type of device. See Type enum definition for possible return values + */ +static constexpr Property type{"DEVICE_TYPE"}; + +/** + * @brief Read-only property which defines Giga OPS per second count (GFLOPS or GIOPS) for a set of precisions supported + * by specified device + */ +static constexpr Property, PropertyMutability::RO> gops{"DEVICE_GOPS"}; + +/** + * @brief Read-only property to get a float of device thermal + */ +static constexpr Property thermal{"DEVICE_THERMAL"}; + +/** + * @brief Read-only property to get a std::vector of capabilities options per device. + */ +static constexpr Property, PropertyMutability::RO> capabilities{"OPTIMIZATION_CAPABILITIES"}; +namespace capability { +constexpr static const auto FP32 = "FP32"; //!< Device supports fp32 inference +constexpr static const auto BF16 = "BF16"; //!< Device supports bf16 inference +constexpr static const auto FP16 = "FP16"; //!< Device supports fp16 inference +constexpr static const auto INT8 = "INT8"; //!< Device supports int8 inference +constexpr static const auto BIN = "BIN"; //!< Device supports binary inference +constexpr static const auto WINOGRAD = "WINOGRAD"; //!< Device supports winograd optimization +constexpr static const auto EXPORT_IMPORT = "EXPORT_IMPORT"; //!< Device supports model export and import +} // namespace capability +} // namespace device + +/** + * @brief The key with the list of device targets used to fallback unsupported layers + * by HETERO plugin + */ +static constexpr device::Priorities target_fallback{"TARGET_FALLBACK"}; + +/** + * @brief The key for enabling of dumping the topology with details of layers and details how + * this network would be executed on different devices to the disk in GraphViz format. + */ +static constexpr Property dump_graph_dot{"HETERO_DUMP_GRAPH_DOT"}; + +namespace streams { +/** + * @brief Special value for ov::execution::streams::num property. + * Creates bare minimum of streams to improve the performance + */ +static constexpr const int32_t AUTO = -1; +/** + * @brief Special value for ov::execution::streams::num property. + * Creates as many streams as needed to accommodate NUMA and avoid associated penalties + */ +static constexpr const int32_t NUMA = -2; + +/** + * @brief The number of executor logical partitions + */ +static constexpr Property num{"NUM_STREAMS"}; +} // namespace streams + +/** + * @brief Maximum number of threads that can be used for inference tasks + */ +static constexpr Property inference_num_threads{"INFERENCE_NUM_THREADS"}; + +/** + * @brief Maximum number of threads that can be used for compilation tasks + */ +static constexpr Property compilation_num_threads{"COMPILATION_NUM_THREADS"}; + +/** + * @brief Enum to define possible affinity patterns + */ +enum class Affinity { + NONE = -1, //!< Disable threads affinity pinning + CORE = 0, //!< Pin threads to cores, best for static benchmarks + NUMA = 1, //!< Pin threads to NUMA nodes, best for real-life, contented cases. On the Windows and MacOS* this + //!< option behaves as CORE + HYBRID_AWARE = 2, //!< Let the runtime to do pinning to the cores types, e.g. prefer the "big" cores for latency + //!< tasks. On the hybrid CPUs this option is default +}; + +/** @cond INTERNAL */ +inline std::ostream& operator<<(std::ostream& os, const Affinity& affinity) { + switch (affinity) { + case Affinity::NONE: + return os << "NONE"; + case Affinity::CORE: + return os << "CORE"; + case Affinity::NUMA: + return os << "NUMA"; + case Affinity::HYBRID_AWARE: + return os << "HYBRID_AWARE"; + default: + throw ov::Exception{"Unsupported affinity pattern"}; + } +} + +inline std::istream& operator>>(std::istream& is, Affinity& affinity) { + std::string str; + is >> str; + if (str == "NONE") { + affinity = Affinity::NONE; + } else if (str == "CORE") { + affinity = Affinity::CORE; + } else if (str == "NUMA") { + affinity = Affinity::NUMA; + } else if (str == "HYBRID_AWARE") { + affinity = Affinity::HYBRID_AWARE; + } else { + throw ov::Exception{"Unsupported affinity pattern: " + str}; + } + return is; +} +/** @endcond */ + +/** + * @brief The name for setting CPU affinity per thread option. + * @note The setting is ignored, if the OpenVINO compiled with OpenMP and any affinity-related OpenMP's + * environment variable is set (as affinity is configured explicitly) + */ +static constexpr Property affinity{"AFFINITY"}; +} // namespace ov diff --git a/src/inference/include/openvino/runtime/remote_context.hpp b/src/inference/include/openvino/runtime/remote_context.hpp index 4fc3234dbf7..6e23ae9efdb 100644 --- a/src/inference/include/openvino/runtime/remote_context.hpp +++ b/src/inference/include/openvino/runtime/remote_context.hpp @@ -15,7 +15,7 @@ #include "openvino/core/shape.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/runtime/common.hpp" -#include "openvino/runtime/parameter.hpp" +#include "openvino/runtime/properties.hpp" #include "openvino/runtime/remote_tensor.hpp" namespace InferenceEngine { @@ -142,7 +142,7 @@ public: * @param params Map of the low-level tensor object parameters. * @return A pointer to plugin object that implements RemoteTensor interface. */ - RemoteTensor create_tensor(const element::Type& type, const Shape& shape, const ParamMap& params = {}); + RemoteTensor create_tensor(const element::Type& type, const Shape& shape, const AnyMap& params = {}); /** * @brief Returns a map of device-specific parameters required for low-level @@ -153,7 +153,7 @@ public: * Abstract method. * @return A map of name/parameter elements. */ - ParamMap get_params() const; + AnyMap get_params() const; /** * @brief This method is used to create host tensor object friendly for the device in current context diff --git a/src/inference/include/openvino/runtime/remote_tensor.hpp b/src/inference/include/openvino/runtime/remote_tensor.hpp index 70c344f1ed3..98a219f892d 100644 --- a/src/inference/include/openvino/runtime/remote_tensor.hpp +++ b/src/inference/include/openvino/runtime/remote_tensor.hpp @@ -10,7 +10,6 @@ #pragma once #include "openvino/runtime/common.hpp" -#include "openvino/runtime/parameter.hpp" #include "openvino/runtime/tensor.hpp" namespace ov { @@ -53,7 +52,7 @@ public: * Abstract method. * @return A map of name/parameter elements. */ - runtime::ParamMap get_params() const; + ov::AnyMap get_params() const; /** * @brief Returns name of the device on which underlying object is allocated. diff --git a/src/inference/src/any_copy.cpp b/src/inference/src/any_copy.cpp new file mode 100644 index 00000000000..64363e90687 --- /dev/null +++ b/src/inference/src/any_copy.cpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "any_copy.hpp" + +#include + +#include "openvino/runtime/properties.hpp" + +namespace ov { +std::map any_copy(const ov::AnyMap& params) { + std::function to_config_string = [&](const Any& any) -> std::string { + if (any.is()) { + return any.as() ? "YES" : "NO"; + } else if (any.is()) { + std::stringstream strm; + for (auto&& val : any.as()) { + strm << val.first << " " << to_config_string(val.second) << " "; + } + return strm.str(); + } else { + std::stringstream strm; + any.print(strm); + return strm.str(); + } + }; + std::map result; + for (auto&& value : params) { + result.emplace(value.first, to_config_string(value.second)); + } + return result; +} + +void any_lexical_cast(const ov::Any& from, ov::Any& to) { + if (!from.is()) { + to = from; + } else { + auto str = from.as(); + if (to.is()) { + to = from; + } else if (to.is()) { + if (str == "YES") { + to = true; + } else if (str == "NO") { + to = false; + } else { + OPENVINO_UNREACHABLE("Unsupported lexical cast to bool from: ", str); + } + } else { + std::stringstream strm(str); + to.read(strm); + if (strm.fail()) { + OPENVINO_UNREACHABLE("Unsupported lexical cast to ", to.type_info().name(), " from: ", str); + } + } + } +} +} // namespace ov diff --git a/src/inference/src/any_copy.hpp b/src/inference/src/any_copy.hpp new file mode 100644 index 00000000000..3b6c2d7042a --- /dev/null +++ b/src/inference/src/any_copy.hpp @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ov { +std::map any_copy(const ov::AnyMap& config_map); + +void any_lexical_cast(const Any& any, ov::Any& to); + +} // namespace ov diff --git a/src/inference/src/cpp/ie_executable_network.cpp b/src/inference/src/cpp/ie_executable_network.cpp index 08d37882222..ad4f5a5d472 100644 --- a/src/inference/src/cpp/ie_executable_network.cpp +++ b/src/inference/src/cpp/ie_executable_network.cpp @@ -4,10 +4,12 @@ #include "cpp/ie_executable_network.hpp" +#include "any_copy.hpp" #include "cpp/exception2status.hpp" #include "cpp_interfaces/interface/ie_iexecutable_network_internal.hpp" #include "ie_common.h" #include "ie_executable_network_base.hpp" +#include "ie_plugin_config.hpp" #include "ie_remote_context.hpp" #include "openvino/core/except.hpp" #include "openvino/runtime/compiled_model.hpp" @@ -206,16 +208,39 @@ void CompiledModel::export_model(std::ostream& networkModel) { OV_EXEC_NET_CALL_STATEMENT(_impl->Export(networkModel)); } -void CompiledModel::set_config(const ie::ParamMap& config) { +void CompiledModel::set_property(const AnyMap& config) { OV_EXEC_NET_CALL_STATEMENT(_impl->SetConfig(config)); } -ie::Parameter CompiledModel::get_config(const std::string& name) const { - OV_EXEC_NET_CALL_STATEMENT(return {_impl->GetConfig(name), _so}); +Any CompiledModel::get_property(const std::string& name) const { + OV_EXEC_NET_CALL_STATEMENT({ + if (ov::supported_properties == name) { + try { + return {_impl->GetMetric(name), _so}; + } catch (ie::Exception&) { + auto ro_properties = _impl->GetMetric(METRIC_KEY(SUPPORTED_METRICS)).as>(); + auto rw_properties = _impl->GetConfig(METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); + std::vector supported_properties; + for (auto&& ro_property : ro_properties) { + supported_properties.emplace_back(ro_property, PropertyMutability::RO); + } + for (auto&& rw_property : rw_properties) { + supported_properties.emplace_back(rw_property, PropertyMutability::RW); + } + supported_properties.emplace_back(ov::supported_properties.name(), PropertyMutability::RO); + return supported_properties; + } + } + try { + return {_impl->GetMetric(name), _so}; + } catch (ie::Exception&) { + return {_impl->GetConfig(name), _so}; + } + }); } -ie::Parameter CompiledModel::get_metric(const std::string& name) const { - OV_EXEC_NET_CALL_STATEMENT(return {_impl->GetMetric(name), _so}); +void CompiledModel::get_property(const std::string& name, Any& to) const { + any_lexical_cast(get_property(name), to); } RemoteContext CompiledModel::get_context() const { diff --git a/src/inference/src/cpp/ie_plugin.hpp b/src/inference/src/cpp/ie_plugin.hpp index ae0a40030e7..2f215f50dc1 100644 --- a/src/inference/src/cpp/ie_plugin.hpp +++ b/src/inference/src/cpp/ie_plugin.hpp @@ -19,6 +19,7 @@ #include "cpp_interfaces/interface/ie_iplugin_internal.hpp" #include "so_ptr.hpp" #include "openvino/runtime/common.hpp" +#include "any_copy.hpp" #if defined __GNUC__ # pragma GCC diagnostic push @@ -174,26 +175,26 @@ public: OV_PLUGIN_CALL_STATEMENT(_ptr->AddExtension(extension)); } - void set_config(const ConfigMap& config) { + void set_config(const std::map& config) { OV_PLUGIN_CALL_STATEMENT(_ptr->SetConfig(config)); } - SoPtr compile_model(const ie::CNNNetwork& network, const ConfigMap& config) { + SoPtr compile_model(const ie::CNNNetwork& network, const std::map& config) { OV_PLUGIN_CALL_STATEMENT(return {_ptr->LoadNetwork(network, config), _so}); } SoPtr compile_model(const ie::CNNNetwork& network, const std::shared_ptr& context, - const ConfigMap& config) { + const std::map& config) { OV_PLUGIN_CALL_STATEMENT(return {_ptr->LoadNetwork(network, config, context), _so}); } - SoPtr compile_model(const std::string& modelPath, const ConfigMap& config) { + SoPtr compile_model(const std::string& modelPath, const std::map& config) { OV_PLUGIN_CALL_STATEMENT(return {_ptr->LoadNetwork(modelPath, config), _so}); } ie::QueryNetworkResult query_model(const ie::CNNNetwork& network, - const ConfigMap& config) const { + const std::map& config) const { ie::QueryNetworkResult res; OV_PLUGIN_CALL_STATEMENT(res = _ptr->QueryNetwork(network, config)); OPENVINO_ASSERT(res.rc == ie::OK, res.resp.msg); @@ -201,34 +202,34 @@ public: } SoPtr import_model(const std::string& modelFileName, - const ConfigMap& config) { + const std::map& config) { OV_PLUGIN_CALL_STATEMENT(return {_ptr->ImportNetwork(modelFileName, config), _so}); } SoPtr import_model(std::istream& networkModel, - const ConfigMap& config) { + const std::map& config) { OV_PLUGIN_CALL_STATEMENT(return {_ptr->ImportNetwork(networkModel, config), _so}); } SoPtr import_model(std::istream& networkModel, const std::shared_ptr& context, - const ConfigMap& config) { + const std::map& config) { OV_PLUGIN_CALL_STATEMENT(return {_ptr->ImportNetwork(networkModel, context, config), _so}); } - ie::Parameter get_metric(const std::string& name, const ie::ParamMap& options) const { + Any get_metric(const std::string& name, const AnyMap& options) const { OV_PLUGIN_CALL_STATEMENT(return {_ptr->GetMetric(name, options), _so}); } - SoPtr create_context(const ie::ParamMap& params) { + SoPtr create_context(const AnyMap& params) { OV_PLUGIN_CALL_STATEMENT(return {_ptr->CreateContext(params), _so}); } - SoPtr get_default_context(const ie::ParamMap& params) { + SoPtr get_default_context(const AnyMap& params) { OV_PLUGIN_CALL_STATEMENT(return {_ptr->GetDefaultContext(params), _so}); } - ie::Parameter get_config(const std::string& name, const ie::ParamMap& options) const { + Any get_config(const std::string& name, const AnyMap& options) const { OV_PLUGIN_CALL_STATEMENT(return {_ptr->GetConfig(name, options), _so}); } }; diff --git a/src/inference/src/cpp/ie_remote_context.cpp b/src/inference/src/cpp/ie_remote_context.cpp index 87489d0ed41..fad5888bfc0 100644 --- a/src/inference/src/cpp/ie_remote_context.cpp +++ b/src/inference/src/cpp/ie_remote_context.cpp @@ -6,6 +6,7 @@ #include +#include "any_copy.hpp" #include "ie_ngraph_utils.hpp" #include "ie_remote_blob.hpp" #include "openvino/core/except.hpp" @@ -60,12 +61,10 @@ std::string RemoteContext::get_device_name() const { OV_REMOTE_CONTEXT_STATEMENT(return _impl->getDeviceName()); } -RemoteTensor RemoteContext::create_tensor(const element::Type& element_type, - const Shape& shape, - const ParamMap& params) { +RemoteTensor RemoteContext::create_tensor(const element::Type& type, const Shape& shape, const AnyMap& params) { OV_REMOTE_CONTEXT_STATEMENT({ auto blob = _impl->CreateBlob( - {ie::details::convertPrecision(element_type), shape, ie::TensorDesc::getLayoutByRank(shape.size())}, + {ie::details::convertPrecision(type), shape, ie::TensorDesc::getLayoutByRank(shape.size())}, params); blob->allocate(); return {blob, _so}; @@ -81,8 +80,8 @@ Tensor RemoteContext::create_host_tensor(const element::Type element_type, const }); } -ParamMap RemoteContext::get_params() const { - ParamMap paramMap; +AnyMap RemoteContext::get_params() const { + AnyMap paramMap; OV_REMOTE_CONTEXT_STATEMENT({ for (auto&& param : _impl->getParams()) { paramMap.emplace(param.first, Any{param.second, _so}); diff --git a/src/inference/src/ie_core.cpp b/src/inference/src/ie_core.cpp index 8f68bb41b4d..789a7a553aa 100644 --- a/src/inference/src/ie_core.cpp +++ b/src/inference/src/ie_core.cpp @@ -12,6 +12,7 @@ #include #include +#include "any_copy.hpp" #include "cnn_network_ngraph_impl.hpp" #include "compilation_context.hpp" #include "cpp/ie_cnn_network.h" @@ -54,6 +55,12 @@ namespace ov { // Specify the default device when no device name is provided. const std::string DEFAULT_DEVICE_NAME = "DEFAULT_DEVICE"; +template +struct Parsed { + std::string _deviceName; + std::map _config; +}; + namespace { #ifndef OPENVINO_STATIC_LIBRARY @@ -71,12 +78,6 @@ std::string parseXmlConfig(const std::string& xmlFile) { #endif -template -struct Parsed { - std::string _deviceName; - std::map _config; -}; - template Parsed parseDeviceNameIntoConfig(const std::string& deviceName, const std::map& config = {}) { auto config_ = config; @@ -116,6 +117,21 @@ void allowNotImplemented(F&& f) { } } +ov::AnyMap flatten_sub_properties(const std::string& device, const ov::AnyMap& properties) { + ov::AnyMap result = properties; + for (auto&& property : properties) { + auto parsed = parseDeviceNameIntoConfig(property.first); + if (device.find(parsed._deviceName) != std::string::npos) { + if (property.second.is()) { + for (auto&& sub_property : property.second.as()) { + result[sub_property.first] = sub_property.second; + } + } + } + } + return result; +} + } // namespace class CoreImpl : public ie::ICore, public std::enable_shared_from_this { @@ -203,7 +219,7 @@ class CoreImpl : public ie::ICore, public std::enable_shared_from_this supportedMetricKeys = plugin.get_metric(METRIC_KEY(SUPPORTED_METRICS), {}); + auto supportedMetricKeys = plugin.get_metric(METRIC_KEY(SUPPORTED_METRICS), {}).as>(); auto it = std::find(supportedMetricKeys.begin(), supportedMetricKeys.end(), METRIC_KEY(IMPORT_EXPORT_SUPPORT)); auto supported = (it != supportedMetricKeys.end()) && plugin.get_metric(METRIC_KEY(IMPORT_EXPORT_SUPPORT), {}).as(); @@ -224,7 +240,7 @@ class CoreImpl : public ie::ICore, public std::enable_shared_from_this configKeys = plugin.get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), {}); + auto configKeys = plugin.get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), {})->as>(); supported = std::find(configKeys.begin(), configKeys.end(), key) != configKeys.end(); } return supported; @@ -330,13 +346,13 @@ class CoreImpl : public ie::ICore, public std::enable_shared_from_this supportedMetricKeys = - plugin.get_metric(METRIC_KEY(SUPPORTED_METRICS), getMetricConfig); + auto supportedMetricKeys = + plugin.get_metric(METRIC_KEY(SUPPORTED_METRICS), getMetricConfig)->as>(); auto archIt = std::find(supportedMetricKeys.begin(), supportedMetricKeys.end(), METRIC_KEY(DEVICE_ARCHITECTURE)); if (archIt != supportedMetricKeys.end()) { auto value = plugin.get_metric(METRIC_KEY(DEVICE_ARCHITECTURE), getMetricConfig); - compileConfig[METRIC_KEY(DEVICE_ARCHITECTURE)] = value.as(); + compileConfig[METRIC_KEY(DEVICE_ARCHITECTURE)] = value->as(); } else { // Take device name if device does not support DEVICE_ARCHITECTURE metric compileConfig[METRIC_KEY(DEVICE_ARCHITECTURE)] = deviceFamily; @@ -749,9 +765,7 @@ public: return res; } - ie::Parameter GetMetric(const std::string& deviceName, - const std::string& name, - const ie::ParamMap& options = {}) const override { + Any GetMetric(const std::string& deviceName, const std::string& name, const AnyMap& options = {}) const override { // HETERO case { if (deviceName.find("HETERO:") == 0) { @@ -787,7 +801,7 @@ public: return GetCPPPluginByName(parsed._deviceName).get_metric(name, parsed._config); } - ie::Parameter GetConfig(const std::string& deviceName, const std::string& name) const override { + Any GetConfig(const std::string& deviceName, const std::string& name) const override { auto parsed = parseDeviceNameIntoConfig(deviceName); return GetCPPPluginByName(parsed._deviceName).get_config(name, parsed._config); } @@ -1092,6 +1106,55 @@ public: } } + /** + * @brief Get device config it is passed as pair of device_name and `AnyMap` + * @param configs All set of configs + * @note `device_name` is not allowed in form of MULTI:CPU, HETERO:GPU,CPU, AUTO:CPU + * just simple forms like CPU, GPU, MULTI, GPU.0, etc + */ + void ExtractAndSetDeviceConfig(const ov::AnyMap& configs) { + for (auto&& config : configs) { + auto parsed = parseDeviceNameIntoConfig(config.first); + auto devices = GetListOfDevicesInRegistry(); + auto config_is_device_name_in_regestry = + std::any_of(devices.begin(), devices.end(), [&](const std::string& device) { + return device == parsed._deviceName; + }); + if (config_is_device_name_in_regestry) { + SetConfigForPlugins(any_copy(config.second.as()), config.first); + } + } + } + + std::map GetSupportedConfig(const std::string& deviceName, + const std::map& configs) override { + std::vector supportedConfigKeys = GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + std::map supportedConfig; + for (auto&& key : supportedConfigKeys) { + auto itKey = configs.find(key); + if (configs.end() != itKey) { + supportedConfig[key] = itKey->second; + } + } + for (auto&& config : configs) { + auto parsed = parseDeviceNameIntoConfig(config.first); + if (deviceName.find(parsed._deviceName) != std::string::npos) { + std::string key, value; + std::stringstream strm(config.second); + while (strm >> key >> value) { + if (supportedConfigKeys.end() != + std::find(supportedConfigKeys.begin(), supportedConfigKeys.end(), key)) { + supportedConfig[key] = value; + } + } + for (auto&& config : parsed._config) { + supportedConfig[config.first] = config.second.as(); + } + } + } + return supportedConfig; + } + /** * @brief Registers the extension in a Core object * Such extensions can be used for both CNNNetwork readers and device plugins @@ -1593,37 +1656,37 @@ ie::CNNNetwork toCNN(const std::shared_ptr& model) { } // namespace -CompiledModel Core::compile_model(const std::shared_ptr& model, const ConfigMap& config) { +CompiledModel Core::compile_model(const std::shared_ptr& model, const AnyMap& config) { return compile_model(model, ov::DEFAULT_DEVICE_NAME, config); } CompiledModel Core::compile_model(const std::shared_ptr& model, const std::string& deviceName, - const ConfigMap& config) { + const AnyMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = _impl->LoadNetwork(toCNN(model), deviceName, config); + auto exec = _impl->LoadNetwork(toCNN(model), deviceName, any_copy(flatten_sub_properties(deviceName, config))); return {exec._ptr, exec._so}; }); } -CompiledModel Core::compile_model(const std::string& modelPath, const ConfigMap& config) { +CompiledModel Core::compile_model(const std::string& modelPath, const AnyMap& config) { return compile_model(modelPath, ov::DEFAULT_DEVICE_NAME, config); } -CompiledModel Core::compile_model(const std::string& modelPath, - const std::string& deviceName, - const ConfigMap& config) { +CompiledModel Core::compile_model(const std::string& modelPath, const std::string& deviceName, const AnyMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = _impl->LoadNetwork(modelPath, deviceName, config); + auto exec = _impl->LoadNetwork(modelPath, deviceName, any_copy(flatten_sub_properties(deviceName, config))); return {exec._ptr, exec._so}; }); } CompiledModel Core::compile_model(const std::shared_ptr& model, const RemoteContext& context, - const ConfigMap& config) { + const AnyMap& config) { OV_CORE_CALL_STATEMENT({ - auto exec = _impl->LoadNetwork(toCNN(model), context._impl, config); + auto exec = _impl->LoadNetwork(toCNN(model), + context._impl, + any_copy(flatten_sub_properties(context.get_device_name(), config))); return {exec._ptr, exec._so}; }); } @@ -1648,15 +1711,15 @@ void Core::add_extension(const std::vector>& exte OV_CORE_CALL_STATEMENT({ _impl->AddOVExtensions(extensions); }); } -CompiledModel Core::import_model(std::istream& modelStream, const std::string& deviceName, const ConfigMap& config) { +CompiledModel Core::import_model(std::istream& modelStream, const std::string& deviceName, const AnyMap& config) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); OV_CORE_CALL_STATEMENT({ - auto exec = _impl->ImportNetwork(modelStream, deviceName, config); + auto exec = _impl->ImportNetwork(modelStream, deviceName, any_copy(flatten_sub_properties(deviceName, config))); return {exec._ptr, exec._so}; }); } -CompiledModel Core::import_model(std::istream& modelStream, const RemoteContext& context, const ConfigMap& config) { +CompiledModel Core::import_model(std::istream& modelStream, const RemoteContext& context, const AnyMap& config) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); using ExportMagic = std::array; @@ -1683,34 +1746,39 @@ CompiledModel Core::import_model(std::istream& modelStream, const RemoteContext& SupportedOpsMap Core::query_model(const std::shared_ptr& model, const std::string& deviceName, - const ConfigMap& config) const { + const AnyMap& config) const { OV_CORE_CALL_STATEMENT({ - auto qnResult = _impl->QueryNetwork(toCNN(model), deviceName, config); + auto qnResult = + _impl->QueryNetwork(toCNN(model), deviceName, any_copy(flatten_sub_properties(deviceName, config))); return qnResult.supportedLayersMap; }); } -void Core::set_config(const ConfigMap& config, const std::string& deviceName) { - OPENVINO_ASSERT(deviceName.find("HETERO:") != 0, - "set_config is supported only for HETERO itself (without devices). " - "You can configure the devices with set_config before creating the HETERO on top."); - OPENVINO_ASSERT(deviceName.find("MULTI:") != 0, - "set_config is supported only for MULTI itself (without devices). " - "You can configure the devices with set_config before creating the MULTI on top."); - OPENVINO_ASSERT(deviceName.find("AUTO:") != 0, - "set_config is supported only for AUTO itself (without devices). " - "You can configure the devices with set_config before creating the AUTO on top."); - +void Core::set_property(const AnyMap& config) { OV_CORE_CALL_STATEMENT({ - if (deviceName.empty()) { - _impl->SetConfigForPlugins(config, std::string()); - } else { - _impl->SetConfigForPlugins(config, deviceName); - } + _impl->ExtractAndSetDeviceConfig(config); + _impl->SetConfigForPlugins(any_copy(config), {}); }); } -Any Core::get_config(const std::string& deviceName, const std::string& name) const { +void Core::set_property(const std::string& deviceName, const AnyMap& config) { + OPENVINO_ASSERT(deviceName.find("HETERO:") != 0, + "set_property is supported only for HETERO itself (without devices). " + "You can configure the devices with set_property before creating the HETERO on top."); + OPENVINO_ASSERT(deviceName.find("MULTI:") != 0, + "set_property is supported only for MULTI itself (without devices). " + "You can configure the devices with set_property before creating the MULTI on top."); + OPENVINO_ASSERT(deviceName.find("AUTO:") != 0, + "set_property is supported only for AUTO itself (without devices). " + "You can configure the devices with set_property before creating the AUTO on top."); + + OV_CORE_CALL_STATEMENT({ + _impl->ExtractAndSetDeviceConfig(config); + _impl->SetConfigForPlugins(any_copy(config), deviceName); + }); +} + +Any Core::get_property(const std::string& deviceName, const std::string& name) const { OPENVINO_ASSERT(deviceName.find("HETERO:") != 0, "You can only get_config of the HETERO itself (without devices). " "get_config is also possible for the individual devices before creating the HETERO on top."); @@ -1723,12 +1791,37 @@ Any Core::get_config(const std::string& deviceName, const std::string& name) con OV_CORE_CALL_STATEMENT({ auto parsed = parseDeviceNameIntoConfig(deviceName); - return _impl->GetCPPPluginByName(parsed._deviceName).get_config(name, parsed._config); + if (ov::supported_properties == name) { + try { + return _impl->GetCPPPluginByName(parsed._deviceName).get_metric(name, parsed._config); + } catch (ie::Exception&) { + auto ro_properties = _impl->GetCPPPluginByName(parsed._deviceName) + .get_metric(METRIC_KEY(SUPPORTED_METRICS), parsed._config) + .as>(); + auto rw_properties = _impl->GetCPPPluginByName(parsed._deviceName) + .get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS), parsed._config) + .as>(); + std::vector supported_properties; + for (auto&& ro_property : ro_properties) { + supported_properties.emplace_back(ro_property, PropertyMutability::RO); + } + for (auto&& rw_property : rw_properties) { + supported_properties.emplace_back(rw_property, PropertyMutability::RW); + } + supported_properties.emplace_back(ov::supported_properties.name(), PropertyMutability::RO); + return supported_properties; + } + } + try { + return _impl->GetCPPPluginByName(parsed._deviceName).get_metric(name, parsed._config); + } catch (ie::Exception&) { + return _impl->GetCPPPluginByName(parsed._deviceName).get_config(name, parsed._config); + } }); } -Any Core::get_metric(const std::string& deviceName, const std::string& name) const { - OV_CORE_CALL_STATEMENT(return _impl->GetMetric(deviceName, name);); +void Core::get_property(const std::string& deviceName, const std::string& name, ov::Any& to) const { + any_lexical_cast(get_property(deviceName, name), to); } std::vector Core::get_available_devices() const { @@ -1752,13 +1845,13 @@ void Core::register_plugins(const std::string& xmlConfigFile) { OV_CORE_CALL_STATEMENT(_impl->RegisterPluginsInRegistry(xmlConfigFile);); } -RemoteContext Core::create_context(const std::string& deviceName, const ParamMap& params) { +RemoteContext Core::create_context(const std::string& deviceName, const AnyMap& params) { OPENVINO_ASSERT(deviceName.find("HETERO") != 0, "HETERO device does not support remote context"); OPENVINO_ASSERT(deviceName.find("MULTI") != 0, "MULTI device does not support remote context"); OPENVINO_ASSERT(deviceName.find("AUTO") != 0, "AUTO device does not support remote context"); OV_CORE_CALL_STATEMENT({ - auto parsed = parseDeviceNameIntoConfig(deviceName, params); + auto parsed = parseDeviceNameIntoConfig(deviceName, flatten_sub_properties(deviceName, params)); auto remoteContext = _impl->GetCPPPluginByName(parsed._deviceName).create_context(parsed._config); return {remoteContext._ptr, remoteContext._so}; }); @@ -1770,7 +1863,7 @@ RemoteContext Core::get_default_context(const std::string& deviceName) { OPENVINO_ASSERT(deviceName.find("AUTO") != 0, "AUTO device does not support remote context"); OV_CORE_CALL_STATEMENT({ - auto parsed = parseDeviceNameIntoConfig(deviceName, ParamMap()); + auto parsed = parseDeviceNameIntoConfig(deviceName, AnyMap{}); auto remoteContext = _impl->GetCPPPluginByName(parsed._deviceName).get_default_context(parsed._config); return {remoteContext._ptr, remoteContext._so}; }); diff --git a/src/inference/src/remote_tensor.cpp b/src/inference/src/remote_tensor.cpp index bd7bb41ede3..1e9f6f73190 100644 --- a/src/inference/src/remote_tensor.cpp +++ b/src/inference/src/remote_tensor.cpp @@ -4,6 +4,7 @@ #include "openvino/runtime/remote_tensor.hpp" +#include "any_copy.hpp" #include "ie_ngraph_utils.hpp" #include "ie_remote_blob.hpp" @@ -32,12 +33,12 @@ void RemoteTensor::type_check(const Tensor& tensor, const std::map(_impl.get()); try { - ParamMap paramMap; + AnyMap paramMap; for (auto&& param : remote_impl->getParams()) { paramMap.emplace(param.first, Any{param.second, _so}); } @@ -51,8 +52,8 @@ ie::ParamMap RemoteTensor::get_params() const { std::string RemoteTensor::get_device_name() const { OPENVINO_ASSERT(_impl != nullptr, "Remote tensor was not initialized."); - type_check(*this); auto remote_impl = static_cast(_impl.get()); + type_check(*this); try { return remote_impl->getDeviceName(); } catch (const std::exception& ex) { diff --git a/src/plugins/auto/plugin.cpp b/src/plugins/auto/plugin.cpp index 3c66b3751ee..3fe949dea37 100644 --- a/src/plugins/auto/plugin.cpp +++ b/src/plugins/auto/plugin.cpp @@ -69,19 +69,6 @@ namespace { std::mutex MultiDeviceInferencePlugin::_mtx; std::map> MultiDeviceInferencePlugin::_priorityMap; -std::map MultiDeviceInferencePlugin::GetSupportedConfig( - const std::map & config, const std::string & deviceName) const { - std::vector supportedConfigKeys = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); - std::map supportedConfig; - for (auto&& key : supportedConfigKeys) { - auto itKey = config.find(key); - if (config.end() != itKey) { - supportedConfig[key] = itKey->second; - } - } - return supportedConfig; -} - std::vector MultiDeviceInferencePlugin::ParseMetaDevices(const std::string& priorities, const std::map & config) const { std::vector metaDevices; @@ -109,13 +96,13 @@ std::vector MultiDeviceInferencePlugin::ParseMetaDevices(cons tconfig[PluginConfigParams::KEY_DEVICE_ID] = deviceIDLocal; } - return GetSupportedConfig(tconfig, deviceName); + return GetCore()->GetSupportedConfig(deviceName, tconfig); }; auto getDefaultDeviceID = [this](std::string deviceName) -> std::string { - std::vector supportedMetrics = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS)); + auto supportedMetrics = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_METRICS)).as>(); if (std::find(supportedMetrics.begin(), supportedMetrics.end(), METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != supportedMetrics.end()) { - std::vector supportKeys = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + auto supportKeys = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); if (std::find(supportKeys.begin(), supportKeys.end(), CONFIG_KEY(DEVICE_ID)) != supportKeys.end()) { return GetCore()->GetConfig(deviceName, CONFIG_KEY(DEVICE_ID)).as(); @@ -622,9 +609,9 @@ std::vector MultiDeviceInferencePlugin::FilterDevice(const st std::vector filterDevice; for (auto&& item : metaDevices) { bool support = true; - std::vector supportedMetrics = GetCore()->GetMetric(item.deviceName, METRIC_KEY(SUPPORTED_METRICS)); + auto supportedMetrics = GetCore()->GetMetric(item.deviceName, METRIC_KEY(SUPPORTED_METRICS)).as>(); if (std::find(supportedMetrics.begin(), supportedMetrics.end(), METRIC_KEY(SUPPORTED_CONFIG_KEYS)) != supportedMetrics.end()) { - std::vector supportKeys = GetCore()->GetMetric(item.deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + auto supportKeys = GetCore()->GetMetric(item.deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); for (auto&& kvp : config) { auto targetKey = std::find(supportKeys.begin(), supportKeys.end(), kvp.first); // if device have the key, we think the device support it diff --git a/src/plugins/auto_batch/auto_batch.cpp b/src/plugins/auto_batch/auto_batch.cpp index a1809984a8a..9a9a54b6c43 100644 --- a/src/plugins/auto_batch/auto_batch.cpp +++ b/src/plugins/auto_batch/auto_batch.cpp @@ -516,20 +516,6 @@ std::map mergeConfigs(std::map AutoBatchInferencePlugin::GetSupportedConfig( - const std::map& config, - const std::string& deviceName) const { - std::vector supportedConfigKeys = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); - std::map supportedConfig; - for (auto&& key : supportedConfigKeys) { - auto itKey = config.find(key); - if (config.end() != itKey) { - supportedConfig[key] = itKey->second; - } - } - return supportedConfig; -} - DeviceInformation AutoBatchInferencePlugin::ParseBatchDevice(const std::string& deviceWithBatch) { auto&& d = deviceWithBatch; auto openingBracket = d.find_first_of('('); @@ -560,7 +546,7 @@ DeviceInformation AutoBatchInferencePlugin::ParseMetaDevice(const std::string& d tconfig[PluginConfigParams::KEY_DEVICE_ID] = deviceIDLocal; } - return GetSupportedConfig(tconfig, deviceName); + return GetCore()->GetSupportedConfig(deviceName, tconfig); }; auto metaDevice = ParseBatchDevice(devicesBatchCfg); diff --git a/src/plugins/hetero/plugin.cpp b/src/plugins/hetero/plugin.cpp index 39689300bbc..bfbf98447e1 100644 --- a/src/plugins/hetero/plugin.cpp +++ b/src/plugins/hetero/plugin.cpp @@ -73,18 +73,6 @@ InferenceEngine::IExecutableNetworkInternal::Ptr Engine::ImportNetwork( return std::make_shared(heteroModel, mergeConfigs(_config, config), this); } -Engine::Configs Engine::GetSupportedConfig(const Engine::Configs& config, const std::string& deviceName) const { - std::vector supportedConfigKeys = GetCore()->GetMetric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)); - Engine::Configs supportedConfig; - for (auto&& key : supportedConfigKeys) { - auto itKey = config.find(key); - if (config.end() != itKey) { - supportedConfig[key] = itKey->second; - } - } - return supportedConfig; -} - Engine::DeviceMetaInformationMap Engine::GetDevicePlugins(const std::string& targetFallback, const Configs& localConfig) const { auto getDeviceConfig = [&](const std::string& deviceWithID) { @@ -98,7 +86,7 @@ Engine::DeviceMetaInformationMap Engine::GetDevicePlugins(const std::string& tar tconfig[KEY_DEVICE_ID] = deviceIDLocal; } - return GetSupportedConfig(tconfig, deviceName); + return GetCore()->GetSupportedConfig(deviceName, tconfig); }; auto fallbackDevices = InferenceEngine::DeviceIDParser::getHeteroDevices(targetFallback); diff --git a/src/plugins/intel_gpu/src/plugin/plugin.cpp b/src/plugins/intel_gpu/src/plugin/plugin.cpp index c84b7ce9c1b..c8352b71a77 100644 --- a/src/plugins/intel_gpu/src/plugin/plugin.cpp +++ b/src/plugins/intel_gpu/src/plugin/plugin.cpp @@ -269,7 +269,7 @@ IExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Plugin::LoadExeNetworkImpl::CreateContext"); std::lock_guard lock(engine_mutex); if (!canReuseDefaultContext()) { - m_defaultContext.reset(new RemoteCLContext(shared_from_this(), ParamMap(), conf)); + m_defaultContext.reset(new RemoteCLContext(shared_from_this(), AnyMap(), conf)); } } @@ -303,7 +303,7 @@ IExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine return std::make_shared(transformedNetwork, casted, conf); } -InferenceEngine::RemoteContext::Ptr Plugin::CreateContext(const ParamMap& params) { +InferenceEngine::RemoteContext::Ptr Plugin::CreateContext(const AnyMap& params) { // parameter map is non-empty std::string contextTypeStr = _StrFromParams(params, GPU_PARAM_KEY(CONTEXT_TYPE)); @@ -320,7 +320,7 @@ InferenceEngine::RemoteContext::Ptr Plugin::CreateContext(const ParamMap& params } } -InferenceEngine::RemoteContext::Ptr Plugin::GetDefaultContext(const ParamMap& params) { +InferenceEngine::RemoteContext::Ptr Plugin::GetDefaultContext(const AnyMap& params) { if (nullptr == m_defaultContext) { m_defaultContext.reset(new RemoteCLContext(shared_from_this(), params, _impl->m_configs.GetDefaultDeviceConfig())); } @@ -361,7 +361,7 @@ QueryNetworkResult Plugin::QueryNetwork(const CNNNetwork& network, if (m_defaultContext == nullptr) { m_defaultContext.reset(new RemoteCLContext( std::const_pointer_cast(shared_from_this()), - ParamMap(), conf)); + AnyMap(), conf)); } Program prog(m_defaultContext->getImpl()->GetEngine(), conf); auto function = network.getFunction(); diff --git a/src/plugins/intel_gpu/src/plugin/remote_context.cpp b/src/plugins/intel_gpu/src/plugin/remote_context.cpp index c8a2b0836f5..0720ec68eb4 100644 --- a/src/plugins/intel_gpu/src/plugin/remote_context.cpp +++ b/src/plugins/intel_gpu/src/plugin/remote_context.cpp @@ -43,7 +43,7 @@ RemoteBlobImpl::RemoteBlobImpl(ClContext::Ptr context, } } -ParamMap RemoteBlobImpl::getParams() const { +AnyMap RemoteBlobImpl::getParams() const { assert(m_memObject != nullptr); auto params = m_memObject->get_internal_params(); @@ -264,7 +264,7 @@ void RemoteAllocator::unlock(void* handle) noexcept { } ExecutionContextImpl::ExecutionContextImpl(const std::shared_ptr plugin, - const ParamMap& params, + const AnyMap& params, const Config& config) : m_plugin(plugin), m_type(ContextType::OCL), @@ -330,8 +330,8 @@ ExecutionContextImpl::ExecutionContextImpl(const std::shared_ptrget_user_context() } }; +AnyMap ExecutionContextImpl::getParams() const { + AnyMap ret = { { GPU_PARAM_KEY(OCL_CONTEXT), m_engine->get_user_context() } }; switch (m_type) { case OCL: diff --git a/src/tests/functional/inference_engine/ov_compiled_model_test.cpp b/src/tests/functional/inference_engine/ov_compiled_model_test.cpp index 63d31be2345..7230a989fd8 100644 --- a/src/tests/functional/inference_engine/ov_compiled_model_test.cpp +++ b/src/tests/functional/inference_engine/ov_compiled_model_test.cpp @@ -61,17 +61,12 @@ TEST(ExecutableNetworkOVTests, throwsOnUninitializedInputIndex) { TEST(ExecutableNetworkOVTests, throwsOnUninitializedSetConfig) { ov::CompiledModel exec; - ASSERT_THROW(exec.set_config({{}}), ov::Exception); -} - -TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetConfig) { - ov::CompiledModel exec; - ASSERT_THROW(exec.get_config({}), ov::Exception); + ASSERT_THROW(exec.set_property({{}}), ov::Exception); } TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetMetric) { ov::CompiledModel exec; - ASSERT_THROW(exec.get_metric({}), ov::Exception); + ASSERT_THROW(exec.get_property({}), ov::Exception); } TEST(ExecutableNetworkOVTests, throwsOnUninitializedGetContext) { diff --git a/src/tests/functional/inference_engine/serialization/single_layer/elementwise.cpp b/src/tests/functional/inference_engine/serialization/single_layer/elementwise.cpp index a8ed14f1095..6cca9d7e0e0 100644 --- a/src/tests/functional/inference_engine/serialization/single_layer/elementwise.cpp +++ b/src/tests/functional/inference_engine/serialization/single_layer/elementwise.cpp @@ -51,7 +51,7 @@ std::vector eltwiseOpTypes = { ngraph::helpers::EltwiseTypes::MOD }; -std::map additionalConfig = {}; +ov::AnyMap additionalConfig = {}; const auto elementiwiseParams = ::testing::Combine( ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes)), diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/include/ov_api_conformance_helpers.hpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/include/ov_api_conformance_helpers.hpp new file mode 100644 index 00000000000..4a2bde21a42 --- /dev/null +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/include/ov_api_conformance_helpers.hpp @@ -0,0 +1,80 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "conformance.hpp" +#include "common_test_utils/test_constants.hpp" + +// TODO: fix namespaces + +namespace ov { +namespace test { +namespace conformance { + +inline const std::string get_plugin_lib_name_by_device(const std::string& deviceName) { + const std::map devices{ + { "AUTO", "ov_auto_plugin" }, + { "HDDL", "HDDLPlugin" }, + { "VPUX", "ov_intel_vpux_plugin" }, + { "AUTO", "ov_auto_plugin" }, + { "CPU", "ov_intel_cpu_plugin" }, + { "GNA", "ov_intel_gna_plugin" }, + { "GPU", "ov_intel_gpu_plugin" }, + { "HETERO", "ov_hetero_plugin" }, + { "MULTI", "ov_multi_plugin" }, + { "MYRIAD", "ov_intel_vpu_plugin" }, + { "TEMPLATE", "ov_template_plugin" }, + }; + if (devices.find(deviceName) == devices.end()) { + throw std::runtime_error("Incorrect device name"); + } + return devices.at(deviceName); +} + + +inline const std::vector generate_configs(const std::string& targetDevice, + const std::vector& config = {}) { + std::pair defaultConfig; + if (targetDevice == std::string(CommonTestUtils::DEVICE_MULTI) || targetDevice == std::string(CommonTestUtils::DEVICE_AUTO)) { + defaultConfig = {MULTI_CONFIG_KEY(DEVICE_PRIORITIES), ov::test::conformance::targetDevice};; + } else if (targetDevice == std::string(CommonTestUtils::DEVICE_HETERO)) { + defaultConfig = { "TARGET_FALLBACK" , ov::test::conformance::targetDevice }; + } else if (targetDevice == std::string(CommonTestUtils::DEVICE_BATCH)) { + defaultConfig = { CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , std::string(ov::test::conformance::targetDevice)}; + } else { + throw std::runtime_error("Incorrect target device: " + targetDevice); + } + + std::vector resultConfig; + if (config.empty()) { + return {{defaultConfig}}; + } + for (auto configItem : config) { + configItem.insert(defaultConfig); + resultConfig.push_back(configItem); + } + return resultConfig; +} + +inline const std::string generate_complex_device_name(const std::string& deviceName) { + return deviceName + ":" + ov::test::conformance::targetDevice; +} + +inline const std::vector return_all_possible_device_combination() { + std::vector res{ov::test::conformance::targetDevice}; + std::vector devices{CommonTestUtils::DEVICE_HETERO, CommonTestUtils::DEVICE_AUTO, CommonTestUtils::DEVICE_MULTI}; + for (const auto& device : devices) { + res.emplace_back(generate_complex_device_name(device)); + } + return res; +} + +const std::vector empty_config = { + {}, +}; + +} // namespace conformance +} // namespace test +} // namespace ov diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/get_metric.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/get_metric.cpp index 86f387ca1d9..81b6afcc34d 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/get_metric.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/executable_network/get_metric.cpp @@ -5,9 +5,9 @@ #include "behavior/executable_network/get_metric.hpp" #include "api_conformance_helpers.hpp" -using namespace ov::test::conformance; using namespace BehaviorTestsDefinitions; using namespace InferenceEngine::PluginConfigParams; +using namespace ov::test::conformance; namespace { diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/callback.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/callback.cpp index 1d0cf8b59d0..e1a04c6c0b6 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/callback.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/callback.cpp @@ -6,8 +6,8 @@ #include "api_conformance_helpers.hpp" namespace { -using namespace ov::test::conformance; using namespace BehaviorTestsDefinitions; +using namespace ov::test::conformance; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCallbackTests, ::testing::Combine( diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/cancellation.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/cancellation.cpp index 8d96e8bf41f..110353b1f7e 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/cancellation.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/cancellation.cpp @@ -12,6 +12,6 @@ using namespace ov::test::conformance; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestCancellationTests, ::testing::Combine( ::testing::Values(targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(std::vector>{})), InferRequestCancellationTests::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/io_blob.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/io_blob.cpp index 2312d08ecd9..6287ca6c709 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/io_blob.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/io_blob.cpp @@ -9,8 +9,8 @@ #include "api_conformance_helpers.hpp" namespace { -using namespace ov::test::conformance; using namespace BehaviorTestsDefinitions; +using namespace ov::test::conformance; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestIOBBlobTest, ::testing::Combine( diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/multitheading.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/multitheading.cpp index 1370bb46418..3cd820860da 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/multitheading.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/multitheading.cpp @@ -16,7 +16,7 @@ using namespace BehaviorTestsDefinitions; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(std::vector>{})), InferRequestMultithreadingTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestMultithreadingTests, diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/perf_counters.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/perf_counters.cpp index 34d343b66f1..b74d4379bf7 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/perf_counters.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/perf_counters.cpp @@ -12,7 +12,7 @@ using namespace BehaviorTestsDefinitions; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(std::vector>{})), InferRequestPerfCountersTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPerfCountersTest, diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/wait.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/wait.cpp index 21aad7d0bdf..dc807858191 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/wait.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/infer_request/wait.cpp @@ -15,7 +15,7 @@ using namespace BehaviorTestsDefinitions; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestWaitTests, ::testing::Combine( ::testing::Values(targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(std::vector>{})), InferRequestWaitTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestWaitTests, diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/exec_graph_info.cpp index 2114b538092..f65db8aef94 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/exec_graph_info.cpp @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // #include "behavior/ov_executable_network/exec_graph_info.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" #include "ie_plugin_config.hpp" #include + using namespace ov::test::behavior; using namespace ov::test::conformance; - namespace { const std::vector ovExecGraphInfoElemTypes = { ov::element::i8, @@ -29,7 +29,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, ::testing::Combine( ::testing::ValuesIn(ovExecGraphInfoElemTypes), ::testing::Values(CommonTestUtils::DEVICE_CPU), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVExecGraphImportExportTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, @@ -37,7 +37,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, ::testing::Combine( ::testing::ValuesIn(ovExecGraphInfoElemTypes), ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVExecGraphImportExportTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, @@ -45,14 +45,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, ::testing::Combine( ::testing::ValuesIn(ovExecGraphInfoElemTypes), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVExecGraphImportExportTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVExecGraphImportExportTest, ::testing::Combine(::testing::ValuesIn(ovExecGraphInfoElemTypes), ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVExecGraphImportExportTest::getTestCaseName); } // namespace \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/exec_network_base.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/exec_network_base.cpp index be3f91a4c1b..726ce6bbbd5 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/exec_network_base.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/exec_network_base.cpp @@ -4,33 +4,33 @@ #include "behavior/ov_executable_network/exec_network_base.hpp" #include "ie_plugin_config.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" -using namespace ov::test::behavior; -using namespace ov::test::conformance; namespace { +using namespace ov::test::behavior; +using namespace ov::test::conformance; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVExecutableNetworkBaseTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_CPU), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVExecutableNetworkBaseTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVExecutableNetworkBaseTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVExecutableNetworkBaseTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVExecutableNetworkBaseTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVExecutableNetworkBaseTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVExecutableNetworkBaseTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVExecutableNetworkBaseTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/get_metric.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/get_metric.cpp index c965665fd7e..a168c14a278 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/get_metric.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_executable_network/get_metric.cpp @@ -4,13 +4,13 @@ #include "behavior/ov_executable_network/get_metric.hpp" #include "openvino/runtime/core.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" + +namespace { using namespace ov::test::behavior; using namespace ov::test::conformance; using namespace InferenceEngine::PluginConfigParams; - -namespace { // // IE Class Common tests with // @@ -19,7 +19,7 @@ namespace { INSTANTIATE_TEST_SUITE_P( smoke_OVClassImportExportTestP, OVClassImportExportTestP, - ::testing::Values(generateComplexDeviceName(CommonTestUtils::DEVICE_HETERO))); + ::testing::Values(generate_complex_device_name(CommonTestUtils::DEVICE_HETERO))); // // Executable Network GetMetric @@ -27,23 +27,23 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_NETWORK_NAME, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassExecutableNetworkGetMetricTest, OVClassExecutableNetworkGetMetricTest_ThrowsUnsupported, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); // // Executable Network GetConfig / SetConfig diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/callback.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/callback.cpp index 478982363b0..1d6ea564140 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/callback.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/callback.cpp @@ -5,33 +5,33 @@ #include #include "behavior/ov_infer_request/callback.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" -using namespace ov::test::behavior; -using namespace ov::test::conformance; namespace { +using namespace ov::test::behavior; +using namespace ov::test::conformance; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCallbackTests, ::testing::Combine( ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestCallbackTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestCallbackTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVInferRequestCallbackTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestCallbackTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVInferRequestCallbackTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestCallbackTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVInferRequestCallbackTests::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/cancellation.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/cancellation.cpp index 444988c1948..2ea38d1a69c 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/cancellation.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/cancellation.cpp @@ -3,15 +3,15 @@ // #include "behavior/ov_infer_request/cancellation.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" -using namespace ov::test::behavior; -using namespace ov::test::conformance; namespace { +using namespace ov::test::behavior; +using namespace ov::test::conformance; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestCancellationTests, ::testing::Combine( ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestCancellationTests::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/infer_request_dynamic.cpp index 8c442592c62..4b52b6faebf 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -5,12 +5,12 @@ #include #include "behavior/ov_infer_request/infer_request_dynamic.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" -using namespace ov::test::behavior; -using namespace ov::test::conformance; namespace { +using namespace ov::test::behavior; +using namespace ov::test::conformance; std::shared_ptr ovGetFunction1() { const std::vector inputShape = {1, 4, 20, 20}; @@ -61,7 +61,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_1, OVInferRequestDynamicTests, {{1, 4, 20, 20}, {1, 4, 20, 20}}, {{2, 4, 20, 20}, {2, 4, 20, 20}}}), ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestDynamicTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_2, OVInferRequestDynamicTests, @@ -71,7 +71,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests_2, OVInferRequestDynamicTests, {{1, 4, 20, 20}, {1, 2, 20, 40}}, {{2, 4, 20, 20}, {2, 2, 20, 40}}}), ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestDynamicTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests, @@ -81,7 +81,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestDynamicTests, {{1, 4, 20, 20}, {1, 2, 20, 40}}, {{2, 4, 20, 20}, {2, 2, 20, 40}}}), ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVInferRequestDynamicTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestDynamicTests, @@ -91,7 +91,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestDynamicTests, {{1, 4, 20, 20}, {1, 2, 20, 40}}, {{2, 4, 20, 20}, {2, 2, 20, 40}}}), ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVInferRequestDynamicTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestDynamicTests, @@ -101,6 +101,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestDynamicTests, {{1, 4, 20, 20}, {1, 2, 20, 40}}, {{2, 4, 20, 20}, {2, 2, 20, 40}}}), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVInferRequestDynamicTests::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/inference_chaining.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/inference_chaining.cpp index f7a9c48d25c..1724b183dc7 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/inference_chaining.cpp @@ -4,33 +4,33 @@ #include "behavior/ov_infer_request/inference_chaining.hpp" #include "common_test_utils/test_constants.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" -using namespace ov::test::behavior; -using namespace ov::test::conformance; namespace { +using namespace ov::test::behavior; +using namespace ov::test::conformance; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferenceChaining, ::testing::Combine( ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferenceChaining::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferenceChaining, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVInferenceChaining::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferenceChaining, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVInferenceChaining::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferenceChaining, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVInferenceChaining::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/io_tensor.cpp index 1e69a2124d5..aadfd346c86 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/io_tensor.cpp @@ -6,7 +6,7 @@ #include "behavior/ov_infer_request/io_tensor.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" using namespace ov::test::behavior; using namespace ov::test::conformance; @@ -15,25 +15,25 @@ namespace { INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine( ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestIOTensorTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVInferRequestIOTensorTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVInferRequestIOTensorTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVInferRequestIOTensorTest::getTestCaseName); std::vector ovIOTensorElemTypes = { @@ -59,27 +59,27 @@ INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorSetPrecision ::testing::Combine( ::testing::ValuesIn(ovIOTensorElemTypes), ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, ::testing::Combine( ::testing::ValuesIn(ovIOTensorElemTypes), ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, ::testing::Combine( ::testing::ValuesIn(ovIOTensorElemTypes), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestIOTensorSetPrecisionTest, ::testing::Combine( ::testing::ValuesIn(ovIOTensorElemTypes), ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVInferRequestIOTensorSetPrecisionTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/multithreading.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/multithreading.cpp index 26e6aeb1aa7..3ae65e36c92 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/multithreading.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/multithreading.cpp @@ -6,7 +6,7 @@ #include "behavior/ov_infer_request/multithreading.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" using namespace ov::test::behavior; using namespace ov::test::conformance; @@ -15,25 +15,25 @@ namespace { INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestMultithreadingTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVInferRequestMultithreadingTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVInferRequestMultithreadingTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestMultithreadingTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVInferRequestMultithreadingTests::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/perf_counters.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/perf_counters.cpp index ab92672a8ea..07796fa7153 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/perf_counters.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/perf_counters.cpp @@ -3,7 +3,7 @@ // #include "behavior/ov_infer_request/perf_counters.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" using namespace ov::test::behavior; using namespace ov::test::conformance; @@ -12,24 +12,24 @@ namespace { INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestPerfCountersTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVInferRequestPerfCountersTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVInferRequestPerfCountersTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestPerfCountersTest, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVInferRequestPerfCountersTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/wait.cpp index baefbafe215..538d55b0662 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_infer_request/wait.cpp @@ -6,7 +6,7 @@ #include "behavior/ov_infer_request/wait.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" using namespace ov::test::behavior; using namespace ov::test::conformance; @@ -15,24 +15,24 @@ namespace { INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(ov::test::conformance::targetDevice), - ::testing::ValuesIn(emptyConfig)), + ::testing::ValuesIn(empty_config)), OVInferRequestWaitTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_MULTI))), OVInferRequestWaitTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_AUTO))), OVInferRequestWaitTests::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(generate_configs(CommonTestUtils::DEVICE_HETERO))), OVInferRequestWaitTests::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_plugin/core_integration.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_plugin/core_integration.cpp index 6aac4f6dc58..86606380985 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_plugin/core_integration.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/ov_plugin/core_integration.cpp @@ -4,7 +4,7 @@ #include "behavior/ov_plugin/core_integration.hpp" #include "openvino/runtime/core.hpp" -#include "api_conformance_helpers.hpp" +#include "ov_api_conformance_helpers.hpp" using namespace ov::test::behavior; using namespace ov::test::conformance; @@ -17,7 +17,7 @@ namespace { INSTANTIATE_TEST_SUITE_P( smoke_OVClassCommon, OVClassBasicTestP, - ::testing::Values(std::make_pair(getPluginLibNameByDevice(ov::test::conformance::targetDevice), ov::test::conformance::targetDevice))); + ::testing::Values(std::make_pair(get_plugin_lib_name_by_device(ov::test::conformance::targetDevice), ov::test::conformance::targetDevice))); INSTANTIATE_TEST_SUITE_P( smoke_OVClassNetworkTestP, OVClassNetworkTestP, @@ -25,7 +25,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_OVClassImportExportTestP, OVClassImportExportTestP, - ::testing::Values(generateComplexDeviceName(ov::test::conformance::targetDevice))); + ::testing::Values(generate_complex_device_name(ov::test::conformance::targetDevice))); // // IE Class GetMetric @@ -33,11 +33,11 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_OVClassGetMetricTest, OVClassGetMetricTest_SUPPORTED_CONFIG_KEYS, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassGetMetricTest, OVClassGetMetricTest_SUPPORTED_METRICS, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassGetMetricTest, OVClassGetMetricTest_AVAILABLE_DEVICES, @@ -45,7 +45,7 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_OVClassGetMetricTest, OVClassGetMetricTest_FULL_DEVICE_NAME, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassGetMetricTest, OVClassGetMetricTest_OPTIMIZATION_CAPABILITIES, @@ -61,11 +61,11 @@ INSTANTIATE_TEST_SUITE_P( INSTANTIATE_TEST_SUITE_P( smoke_OVClassGetMetricTest, OVClassGetMetricTest_ThrowUnsupported, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassGetConfigTest, OVClassGetConfigTest_ThrowUnsupported, - ::testing::ValuesIn(returnAllPossibleDeviceCombination())); + ::testing::ValuesIn(return_all_possible_device_combination())); INSTANTIATE_TEST_SUITE_P( smoke_OVClassGetAvailableDevices, OVClassGetAvailableDevices, diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/configuration_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/configuration_tests.cpp index 3754671f25c..23bcd802554 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/configuration_tests.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/configuration_tests.cpp @@ -10,6 +10,7 @@ using namespace BehaviorTestsDefinitions; using namespace ov::test::conformance; + namespace { #if (defined(__APPLE__) || defined(_WIN32)) auto defaultBindThreadParameter = InferenceEngine::Parameter{[] { diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_integration.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_integration.cpp index 3933ea54a87..f543918d1ab 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_integration.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_integration.cpp @@ -6,8 +6,8 @@ #include "api_conformance_helpers.hpp" using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; using namespace InferenceEngine::PluginConfigParams; +using namespace ov::test::conformance; namespace { // diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_threading_tests.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_threading_tests.cpp index 9aa994c0a51..f6532a8703f 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_threading_tests.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/core_threading_tests.cpp @@ -5,9 +5,10 @@ #include #include "api_conformance_helpers.hpp" -namespace { using namespace ov::test::conformance; +namespace { + const Params coreThreadingParams[] = { std::tuple{ CommonTestUtils::DEVICE_HETERO, generateConfigs(CommonTestUtils::DEVICE_HETERO).front() }, std::tuple{ CommonTestUtils::DEVICE_MULTI, generateConfigs(CommonTestUtils::DEVICE_MULTI).front() }, diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/life_time.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/life_time.cpp index df2c28039b3..39361fc4d9d 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/life_time.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/plugin/life_time.cpp @@ -6,7 +6,6 @@ #include "api_conformance_helpers.hpp" using namespace BehaviorTestsDefinitions; -using namespace ov::test::conformance; namespace { const std::vector> orders = { // 0 - plugin diff --git a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/preprocessing/set_preprocess.cpp b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/preprocessing/set_preprocess.cpp index e021cd06c86..5ea5cc8f1aa 100644 --- a/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/preprocessing/set_preprocess.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/api_conformance_runner/src/behavior/preprocessing/set_preprocess.cpp @@ -30,21 +30,21 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestPreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisionsPreprocess), ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_HETERO))), InferRequestPreprocessTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisionsPreprocess), ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_MULTI))), InferRequestPreprocessTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessTest, ::testing::Combine( ::testing::ValuesIn(netPrecisionsPreprocess), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_AUTO))), InferRequestPreprocessTest::getTestCaseName); @@ -101,7 +101,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestPreprocessConve ::testing::Bool(), ::testing::Bool(), ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_HETERO))), InferRequestPreprocessConversionTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestPreprocessDynamicallyInSetBlobTest, @@ -115,7 +115,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Hetero_BehaviorTests, InferRequestPreprocessDynam ::testing::Values(true), // only SetBlob ::testing::Values(true), // only SetBlob ::testing::Values(CommonTestUtils::DEVICE_HETERO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_HETERO))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_HETERO))), InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessConversionTest, @@ -129,7 +129,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessConver ::testing::Bool(), ::testing::Bool(), ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_MULTI))), InferRequestPreprocessConversionTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessDynamicallyInSetBlobTest, @@ -143,7 +143,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Multi_BehaviorTests, InferRequestPreprocessDynami ::testing::Values(true), // only SetBlob ::testing::Values(true), // only SetBlob ::testing::Values(CommonTestUtils::DEVICE_MULTI), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_MULTI))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_MULTI))), InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessConversionTest, @@ -157,7 +157,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessConvers ::testing::Bool(), ::testing::Bool(), ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_AUTO))), InferRequestPreprocessConversionTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessDynamicallyInSetBlobTest, @@ -171,6 +171,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Auto_BehaviorTests, InferRequestPreprocessDynamic ::testing::Values(true), // only SetBlob ::testing::Values(true), // only SetBlob ::testing::Values(CommonTestUtils::DEVICE_AUTO), - ::testing::ValuesIn(generateConfigs(CommonTestUtils::DEVICE_AUTO))), + ::testing::ValuesIn(ov::test::conformance::generateConfigs(CommonTestUtils::DEVICE_AUTO))), InferRequestPreprocessDynamicallyInSetBlobTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/include/conformance.hpp b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/include/conformance.hpp index 0e19d5fc85f..33b2e5ddda3 100644 --- a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/include/conformance.hpp +++ b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/include/conformance.hpp @@ -9,19 +9,20 @@ namespace ov { namespace test { namespace conformance { - -extern const char *targetDevice; +extern const char* targetDevice; extern const char *targetPluginName; + extern std::vector IRFolderPaths; extern std::vector disabledTests; -extern std::map pluginConfig; -inline std::map readPluginConfig(const std::string &configFilePath) { +extern ov::AnyMap pluginConfig; + +inline ov::AnyMap readPluginConfig(const std::string &configFilePath) { if (!CommonTestUtils::fileExists(configFilePath)) { std::string msg = "Input directory (" + configFilePath + ") doesn't not exist!"; throw std::runtime_error(msg); } - std::map config; + ov::AnyMap config; std::ifstream file(configFilePath); if (file.is_open()) { std::string buffer; @@ -31,8 +32,7 @@ inline std::map readPluginConfig(const std::string &co if (configElements.size() != 2) { throw std::runtime_error("Incorrect line to get config item: " + buffer + "\n. Example: \"PLUGIN_CONFIG_KEY=PLUGIN_CONFIG_VALUE\""); } - std::pair configItem{configElements.front(), configElements.back()}; - config.insert(configItem); + config.emplace(configElements.front(), configElements.back()); } } } else { diff --git a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/skip_tests_config.cpp b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/skip_tests_config.cpp index 69981d62aa1..a8e4ba56bd6 100644 --- a/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/skip_tests_config.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/conformance_infra/src/skip_tests_config.cpp @@ -19,7 +19,7 @@ const char *targetPluginName = ""; std::vector IRFolderPaths = {}; std::vector disabledTests = {}; -std::map pluginConfig = {}; +ov::AnyMap pluginConfig = {}; } // namespace conformance } // namespace test diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/op_impl_check.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/op_impl_check.cpp index 5e8318c03e3..ba418b1dec7 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/op_impl_check.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/op_impl_check/op_impl_check.cpp @@ -19,7 +19,7 @@ INSTANTIATE_TEST_SUITE_P(conformance, ::testing::Combine( ::testing::ValuesIn(createFunctions()), ::testing::Values(targetDevice), - ::testing::Values(std::map())), + ::testing::Values(ov::AnyMap())), OpImplCheckTest::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp index 599f446a16b..4a13026500f 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp @@ -20,13 +20,13 @@ const std::vector netPrecisions = { ov::element::f16, ov::element::f32, }; -const std::vector> configs = { +const std::vector configs = { {}, }; -const std::vector> multiConfigs = { +const std::vector multiConfigs = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_CPU}}}; -const std::vector> heteroConfigs = { +const std::vector heteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_CPU}}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp index c6df3d655b5..c8a0cfda304 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_executable_network/exec_network_base.cpp @@ -8,14 +8,14 @@ using namespace ov::test::behavior; namespace { - const std::vector> configs = { + const std::vector configs = { {}, }; - const std::vector> multiConfigs = { + const std::vector multiConfigs = { {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}} }; - const std::vector> heteroConfigs = { + const std::vector heteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_CPU}}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVExecutableNetworkBaseTest, @@ -49,16 +49,16 @@ namespace { InferenceEngine::Precision::U16 }; - const std::vector> configSetPrc = { + const std::vector configSetPrc = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} }; - const std::vector> AutoConfigsSetPrc = { + const std::vector AutoConfigsSetPrc = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}, }; - const std::vector> MultiConfigsSetPrc = { + const std::vector MultiConfigsSetPrc = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}, {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}, {InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}} diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp index 3027d0c3860..687dacc6975 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -9,13 +9,13 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector> multiConfigs = { +const std::vector multiConfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp index 4986cc73fa0..0bba489213d 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -7,7 +7,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 5f2a5093818..fb07c5aa370 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -10,15 +10,15 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; -const std::vector> HeteroConfigs = { +const std::vector HeteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_CPU}} }; -const std::vector> AutoConfigs = { +const std::vector AutoConfigs = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_CPU}} }; diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index 583914a235b..443b713962a 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -9,15 +9,15 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; -const std::vector> HeteroConfigs = { +const std::vector HeteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_CPU}} }; -const std::vector> AutoConfigs = { +const std::vector AutoConfigs = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_CPU}} }; diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index a78759538ec..7e2306f0dfa 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -9,21 +9,21 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; -const std::vector> emptyConfigs = {{}}; +const std::vector emptyConfigs = {{}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestIOTensorTest, ::testing::Combine( diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index 80c30bebc0d..e33185f6687 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -10,13 +10,13 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index 7666a60b62d..785eb317c99 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -27,15 +27,15 @@ TEST_P(OVInferRequestPerfCountersTest, CheckOperationInProfilingInfo) { } } -const std::vector> configs = { +const std::vector configs = { {} }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp index 2de2ce6d3f5..bb23139cf89 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -10,17 +10,17 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}}, {{InferenceEngine::PluginConfigParams::KEY_CPU_THROUGHPUT_STREAMS, "0"}, {InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, "1"}} }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_CPU}} }; diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index e841d0bb5c9..1e5d9d7f1ff 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -83,12 +83,12 @@ TEST(OVClassBasicTest, smoke_SetConfigAfterCreatedThrow) { ov::Core ie; std::string value = {}; - ASSERT_NO_THROW(ie.set_config({{KEY_CPU_THREADS_NUM, "1"}}, "CPU")); - ASSERT_NO_THROW(value = ie.get_config("CPU", KEY_CPU_THREADS_NUM).as()); + ASSERT_NO_THROW(ie.set_property("CPU", {{KEY_CPU_THREADS_NUM, "1"}})); + ASSERT_NO_THROW(value = ie.get_property("CPU", KEY_CPU_THREADS_NUM).as()); ASSERT_EQ("1", value); - ASSERT_NO_THROW(ie.set_config({{KEY_CPU_THREADS_NUM, "4"}}, "CPU")); - ASSERT_NO_THROW(value = ie.get_config("CPU", KEY_CPU_THREADS_NUM).as()); + ASSERT_NO_THROW(ie.set_property("CPU", {{KEY_CPU_THREADS_NUM, "4"}})); + ASSERT_NO_THROW(value = ie.get_property("CPU", KEY_CPU_THREADS_NUM).as()); ASSERT_EQ("4", value); } diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/eltwise.cpp index 84131ce8b0c..58b5164d982 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -133,7 +133,7 @@ std::vector eltwiseOpTypesSingleThread = { ngraph::helpers::EltwiseTypes::POWER, }; -std::map additional_config_single_thread = { +ov::AnyMap additional_config_single_thread = { {"CPU_THREADS_NUM", "1"} }; diff --git a/src/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/softmax.cpp b/src/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/softmax.cpp index d13718e6909..d2a0c7bb709 100644 --- a/src/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/src/tests/functional/plugin/cpu/shared_tests_instances/single_layer_tests/softmax.cpp @@ -39,7 +39,7 @@ const auto params2D_static = testing::Combine( testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape2D)), testing::ValuesIn(axis2D), testing::Values(CommonTestUtils::DEVICE_CPU), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); const auto params2D_dynamic = testing::Combine( @@ -49,7 +49,7 @@ const auto params2D_dynamic = testing::Combine( testing::ValuesIn(inputDynamicShape2D), testing::ValuesIn(axis2D), testing::Values(CommonTestUtils::DEVICE_CPU), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); INSTANTIATE_TEST_SUITE_P( @@ -87,7 +87,7 @@ const auto params4Dstatic = testing::Combine( testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape4D)), testing::ValuesIn(axis4D), testing::Values(CommonTestUtils::DEVICE_CPU), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); const auto params4Ddynamic = testing::Combine( @@ -97,7 +97,7 @@ const auto params4Ddynamic = testing::Combine( testing::ValuesIn(inputDynamicShape4D), testing::ValuesIn(axis4D), testing::Values(CommonTestUtils::DEVICE_CPU), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); INSTANTIATE_TEST_SUITE_P( diff --git a/src/tests/functional/plugin/cpu/single_layer_tests/convolution.cpp b/src/tests/functional/plugin/cpu/single_layer_tests/convolution.cpp index 565ca55e41e..f07e08a5296 100755 --- a/src/tests/functional/plugin/cpu/single_layer_tests/convolution.cpp +++ b/src/tests/functional/plugin/cpu/single_layer_tests/convolution.cpp @@ -150,7 +150,7 @@ protected: init_input_shapes({inputShape}); if (configuration.count(PluginConfigParams::KEY_ENFORCE_BF16) && - PluginConfigParams::YES == configuration[PluginConfigParams::KEY_ENFORCE_BF16]) { + PluginConfigParams::YES == configuration[PluginConfigParams::KEY_ENFORCE_BF16].as()) { selectedType += "_BF16"; rel_threshold = 1e-2f; if (selectedType == "jit_gemm_BF16") diff --git a/src/tests/functional/plugin/cpu/single_layer_tests/eltwise.cpp b/src/tests/functional/plugin/cpu/single_layer_tests/eltwise.cpp index b3e0f3d3ca0..6749b6a89f7 100644 --- a/src/tests/functional/plugin/cpu/single_layer_tests/eltwise.cpp +++ b/src/tests/functional/plugin/cpu/single_layer_tests/eltwise.cpp @@ -196,7 +196,7 @@ std::vector eltwiseOpTypesDiffInp = { // Differen // ngraph::helpers::EltwiseTypes::MOD // Does not execute because of transformations }; -std::map additional_config; +ov::AnyMap additional_config; std::vector netType = {ElementType::bf16, ElementType::f32}; diff --git a/src/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp b/src/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp index fa0bbc4f6e1..8e5857b3696 100644 --- a/src/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp +++ b/src/tests/functional/plugin/cpu/single_layer_tests/group_convolution.cpp @@ -151,7 +151,7 @@ protected: init_input_shapes({inputShape}); if (configuration.count(PluginConfigParams::KEY_ENFORCE_BF16) && - PluginConfigParams::YES == configuration[PluginConfigParams::KEY_ENFORCE_BF16]) { + PluginConfigParams::YES == configuration[PluginConfigParams::KEY_ENFORCE_BF16].as()) { selectedType += "_BF16"; rel_threshold = 1e-2f; } else { diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp index 330b1eee7cf..1ee7c65ed63 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp @@ -13,7 +13,7 @@ const std::vector netPrecisions = { ov::element::u8, ov::element::f32 }; -const std::vector> configs = { +const std::vector configs = { {{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}} }; diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp index ce0c714278a..3cbf8684e44 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp @@ -6,7 +6,7 @@ using namespace ov::test::behavior; namespace { - const std::vector> configs = { + const std::vector configs = { {}, }; diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp index d182b0f8fef..719d9ac61c9 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_executable_network/get_metric.cpp @@ -79,13 +79,13 @@ INSTANTIATE_TEST_SUITE_P( using OVClassExecutableNetworkSetConfigFromFp32Test = OVClassExecutableNetworkGetMetricTestForSpecificConfig; TEST_P(OVClassExecutableNetworkSetConfigFromFp32Test, SetConfigFromFp32Throws) { -ov::Core ie; + ov::Core ie; -std::map initialConfig; -initialConfig[GNA_CONFIG_KEY(DEVICE_MODE)] = InferenceEngine::GNAConfigParams::GNA_SW_FP32; -ov::CompiledModel exeNetwork = ie.compile_model(simpleNetwork, deviceName, initialConfig); + ov::AnyMap initialConfig; + initialConfig[GNA_CONFIG_KEY(DEVICE_MODE)] = InferenceEngine::GNAConfigParams::GNA_SW_FP32; + ov::CompiledModel exeNetwork = ie.compile_model(simpleNetwork, deviceName, initialConfig); -ASSERT_THROW(exeNetwork.set_config({{configKey, configValue}}), ov::Exception); + ASSERT_THROW(exeNetwork.set_property({{configKey, configValue}}), ov::Exception); } IE_SUPPRESS_DEPRECATED_START diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/callback.cpp index f70b97c2047..79a5304a6b4 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -7,11 +7,11 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; -const std::vector> multiConfigs = { +const std::vector multiConfigs = { {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GNA}} }; diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp index 5c0658280ae..90451843646 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -7,7 +7,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 8db07c75445..c0f1e9d7026 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -9,7 +9,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index fa2a90c45d0..638a9ce9c32 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -8,12 +8,12 @@ using namespace ov::test::behavior; namespace { -const std::vector> device_modes { +const std::vector device_modes { {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}, {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}} }; -const std::vector> configs = { +const std::vector configs = { {{"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "1"}, {"GNA_SCALE_FACTOR_1", "1"}, diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index af3c57205bf..c0da93ebc32 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -9,7 +9,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {{"GNA_DEVICE_MODE", "GNA_SW_FP32"}, {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}} }; diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index ac3cdb25bf6..a4df6313f3d 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -9,7 +9,7 @@ namespace { OPENVINO_SUPPRESS_DEPRECATED_START -const std::vector> configs = { +const std::vector configs = { {{GNA_CONFIG_KEY(LIB_N_THREADS), "3"}} }; diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/wait.cpp index 5400171314c..be33b441a58 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -9,7 +9,7 @@ namespace { INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVInferRequestWaitTests, ::testing::Combine( ::testing::Values(CommonTestUtils::DEVICE_GNA), - ::testing::Values(std::map({}))), + ::testing::Values(ov::AnyMap({}))), OVInferRequestWaitTests::getTestCaseName); diff --git a/src/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/eltwise.cpp index 4c46952da2d..c8b873fe3dd 100644 --- a/src/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/tests/functional/plugin/gna/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -41,7 +41,7 @@ std::vector eltwiseOpTypes = { ngraph::helpers::EltwiseTypes::ADD }; -std::vector> additional_config = { +std::vector additional_config = { { {"GNA_DEVICE_MODE", "GNA_SW_EXACT"}, {"GNA_SCALE_FACTOR_0", "1638.4"}, diff --git a/src/tests/functional/plugin/gpu/dynamic_tests/gpu_dyn_batch_shape_tests.cpp b/src/tests/functional/plugin/gpu/dynamic_tests/gpu_dyn_batch_shape_tests.cpp index d0a2ed4dd75..28653a085e5 100644 --- a/src/tests/functional/plugin/gpu/dynamic_tests/gpu_dyn_batch_shape_tests.cpp +++ b/src/tests/functional/plugin/gpu/dynamic_tests/gpu_dyn_batch_shape_tests.cpp @@ -16,7 +16,7 @@ using OVDynamicBatchParams = std::tuple< std::vector, // dynamic and static case sizes ElementType, // Network precision std::string, // Device name - std::map // Config + ov::AnyMap // Config >; class OVDynamicBatchShape_Tests : public WithParamInterface, @@ -26,7 +26,7 @@ public: std::vector inputShapes; ElementType netPrecision; std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::tie(inputShapes, netPrecision, targetDevice, configuration) = obj.param; std::ostringstream result; @@ -48,7 +48,9 @@ public: result << "targetDevice=" << targetDevice; if (!configuration.empty()) { for (auto& configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); + result << "_"; } } return result.str(); @@ -85,9 +87,9 @@ TEST_P(OVDynamicBatchShape_Tests, InferDynamicBatchBound) { } namespace { -const std::map config = {}; +const ov::AnyMap config = {}; -const std::map hetero_config = { +const ov::AnyMap hetero_config = { {"TARGET_FALLBACK", CommonTestUtils::DEVICE_GPU} }; diff --git a/src/tests/functional/plugin/gpu/remote_blob_tests/gpu_remote_tensor_tests.cpp b/src/tests/functional/plugin/gpu/remote_blob_tests/gpu_remote_tensor_tests.cpp index 08a3f7db391..05b9d191f67 100644 --- a/src/tests/functional/plugin/gpu/remote_blob_tests/gpu_remote_tensor_tests.cpp +++ b/src/tests/functional/plugin/gpu/remote_blob_tests/gpu_remote_tensor_tests.cpp @@ -336,7 +336,7 @@ class OVRemoteTensor_TestsWithContext : public OVRemoteTensor_Test, public testi protected: std::shared_ptr fn_ptr; std::string deviceName; - std::map config; + ov::AnyMap config; public: void SetUp() override { diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp index ba64858e13d..7954d5745b9 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_executable_network/exec_net_base.cpp @@ -6,7 +6,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp index 8558ee061a9..ea7301c7d66 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -9,16 +9,16 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}, }; -const std::vector> multiConfigs = { +const std::vector multiConfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> autoBatchConfigs = { +const std::vector autoBatchConfigs = { {{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}} }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp index cbab4b6e25c..88e8ca55b4d 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -7,7 +7,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index e8bac081d25..6dae8c14843 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -11,12 +11,12 @@ using namespace ov::test::behavior; namespace { -const std::vector> AutoConfigs = { +const std::vector AutoConfigs = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU + std::string(",") + CommonTestUtils::DEVICE_CPU}}, {} }; -const std::vector> AutoNotSupportConfigs = { +const std::vector AutoNotSupportConfigs = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_GPU}} }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index c2c014ba195..6febe0fa518 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -9,20 +9,20 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}, }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> AutoBatchConfigs = { +const std::vector AutoBatchConfigs = { {{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}} }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index 1c2dae3fada..50576997c9c 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -10,16 +10,16 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}, }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> AutoBatchConfigs = { +const std::vector AutoBatchConfigs = { {{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}} }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index 5c233fdd978..d32b38a9d86 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -27,19 +27,19 @@ TEST_P(OVInferRequestPerfCountersTest, CheckOperationInProfilingInfo) { } } -const std::vector> configs = { +const std::vector configs = { {} }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> AutoBatchConfigs = { +const std::vector AutoBatchConfigs = { {{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}} }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp index eafddbff95c..acc67ae9d56 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -10,20 +10,20 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, {{InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, InferenceEngine::PluginConfigParams::GPU_THROUGHPUT_AUTO}}, }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> AutoBatchConfigs = { +const std::vector AutoBatchConfigs = { {{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}} }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index c350621a8e5..bdc92379db1 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -78,12 +78,12 @@ TEST_P(OVClassGetMetricTest_GPU_DEVICE_TOTAL_MEM_SIZE, GetMetricAndPrintNoThrow) ov::Core ie; ov::Any p; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, GPU_METRIC_KEY(DEVICE_TOTAL_MEM_SIZE))); + ASSERT_NO_THROW(p = ie.get_property(deviceName, GPU_METRIC_KEY(DEVICE_TOTAL_MEM_SIZE))); uint64_t t = p; std::cout << "GPU device total memory size: " << t << std::endl; - ASSERT_METRIC_SUPPORTED(GPU_METRIC_KEY(DEVICE_TOTAL_MEM_SIZE)); + OV_ASSERT_PROPERTY_SUPPORTED(GPU_METRIC_KEY(DEVICE_TOTAL_MEM_SIZE)); } INSTANTIATE_TEST_SUITE_P(nightly_OVClassGetMetricTest, @@ -95,11 +95,11 @@ TEST_P(OVClassGetMetricTest_GPU_UARCH_VERSION, GetMetricAndPrintNoThrow) { ov::Core ie; ov::Any p; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, GPU_METRIC_KEY(UARCH_VERSION))); + ASSERT_NO_THROW(p = ie.get_property(deviceName, GPU_METRIC_KEY(UARCH_VERSION))); std::string t = p; std::cout << "GPU device uarch: " << t << std::endl; - ASSERT_METRIC_SUPPORTED(GPU_METRIC_KEY(UARCH_VERSION)); + OV_ASSERT_PROPERTY_SUPPORTED(GPU_METRIC_KEY(UARCH_VERSION)); } INSTANTIATE_TEST_SUITE_P(nightly_OVClassGetMetricTest, @@ -111,12 +111,12 @@ TEST_P(OVClassGetMetricTest_GPU_EXECUTION_UNITS_COUNT, GetMetricAndPrintNoThrow) ov::Core ie; ov::Any p; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, GPU_METRIC_KEY(EXECUTION_UNITS_COUNT))); + ASSERT_NO_THROW(p = ie.get_property(deviceName, GPU_METRIC_KEY(EXECUTION_UNITS_COUNT))); int t = p; std::cout << "GPU EUs count: " << t << std::endl; - ASSERT_METRIC_SUPPORTED(GPU_METRIC_KEY(EXECUTION_UNITS_COUNT)); + OV_ASSERT_PROPERTY_SUPPORTED(GPU_METRIC_KEY(EXECUTION_UNITS_COUNT)); } INSTANTIATE_TEST_SUITE_P(nightly_OVClassGetMetricTest, diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/remote.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/remote.cpp index 1016a77be06..ee8b0e53bbf 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/remote.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/behavior/ov_plugin/remote.cpp @@ -8,18 +8,18 @@ using namespace ov::test; namespace { -const std::vector> configs; +const std::vector configs; -std::vector> generate_remote_params() { +std::vector> generate_remote_params() { return {}; } -const std::vector> MultiConfigs = { +const std::vector MultiConfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_GPU}} }; -const std::vector> AutoBatchConfigs = { +const std::vector AutoBatchConfigs = { {{ CONFIG_KEY(AUTO_BATCH_DEVICE_CONFIG) , CommonTestUtils::DEVICE_GPU}} }; diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/eltwise.cpp index 18163c3ff3f..0624e9d0413 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -53,7 +53,7 @@ std::vector eltwiseOpTypes = { ngraph::helpers::EltwiseTypes::MOD }; -std::map additional_config = {}; +ov::AnyMap additional_config = {}; const auto multiply_params = ::testing::Combine( ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(inShapes)), diff --git a/src/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/softmax.cpp b/src/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/softmax.cpp index 1c68cf8a312..f495b99c00b 100644 --- a/src/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/src/tests/functional/plugin/gpu/shared_tests_instances/single_layer_tests/softmax.cpp @@ -32,7 +32,7 @@ const auto params2D = testing::Combine( testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes2D)), testing::ValuesIn(axis2D), testing::Values(CommonTestUtils::DEVICE_GPU), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); INSTANTIATE_TEST_SUITE_P( @@ -57,7 +57,7 @@ const auto params4D = testing::Combine( testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes4D)), testing::ValuesIn(axis4D), testing::Values(CommonTestUtils::DEVICE_GPU), - testing::Values(std::map()) + testing::Values(ov::AnyMap()) ); INSTANTIATE_TEST_SUITE_P( diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp index b11cff03862..f5f1f353cb8 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_executable_network/exec_graph_info.cpp @@ -12,13 +12,13 @@ const std::vector netPrecisions = { ov::element::f16, ov::element::f32, }; -const std::vector> configs = { +const std::vector configs = { {}, }; -const std::vector> multiConfigs = { +const std::vector multiConfigs = { {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, CommonTestUtils::DEVICE_MYRIAD}}}; -const std::vector> heteroConfigs = { +const std::vector heteroConfigs = { {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_MYRIAD}}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_executable_network/ov_exec_net.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_executable_network/ov_exec_net.cpp index 795acbd0f2f..e11f0bc10e9 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_executable_network/ov_exec_net.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_executable_network/ov_exec_net.cpp @@ -6,7 +6,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {} }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, OVExecutableNetworkBaseTest, diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/callback.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/callback.cpp index 900dd7a554a..7f740d357f9 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/callback.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/callback.cpp @@ -9,11 +9,11 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; -const std::vector> multiConfigs = { +const std::vector multiConfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} }; diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp index 8e5f103e17b..854cf84d62d 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/cancellation.cpp @@ -7,7 +7,7 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index ba164f10d3b..8482448c731 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -18,12 +18,12 @@ const std::vector netPrecisions = { InferenceEngine::Precision::FP16 }; -const std::vector> configs = { +const std::vector configs = { {} }; -const std::vector> HeteroConfigs = { - {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_MYRIAD}} +const std::vector HeteroConfigs = { + {{"TARGET_FALLBACK", CommonTestUtils::DEVICE_MYRIAD}} }; std::shared_ptr getFunction1() { diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index 70a11bf1a80..557d4bdcc8f 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -9,15 +9,15 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} }; diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp index f1b1835bed4..6db87f90e7a 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/multithreading.cpp @@ -10,11 +10,11 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} }; diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp index c77496b8983..18408345c43 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/perf_counters.cpp @@ -33,15 +33,15 @@ TEST_P(OVInferRequestPerfCountersTest, CheckOperationInProfilingInfo) { } } -const std::vector> configs = { +const std::vector configs = { {} }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} }; diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/wait.cpp index 9e6fe5f9116..296a7b596f8 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_infer_request/wait.cpp @@ -10,15 +10,15 @@ using namespace ov::test::behavior; namespace { -const std::vector> configs = { +const std::vector configs = { {}, }; -const std::vector> Multiconfigs = { +const std::vector Multiconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} }; -const std::vector> Autoconfigs = { +const std::vector Autoconfigs = { {{ MULTI_CONFIG_KEY(DEVICE_PRIORITIES) , CommonTestUtils::DEVICE_MYRIAD}} }; diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_plugin/core_integration.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_plugin/core_integration.cpp index 994db394375..42d813134ad 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_plugin/core_integration.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/behavior/ov_plugin/core_integration.cpp @@ -35,15 +35,14 @@ using OVClassNetworkTestP_VPU_GetMetric = OVClassNetworkTestP; TEST_P(OVClassNetworkTestP_VPU_GetMetric, smoke_OptimizationCapabilitiesReturnsFP16) { ov::Core ie; - ASSERT_METRIC_SUPPORTED(METRIC_KEY(OPTIMIZATION_CAPABILITIES)) + OV_ASSERT_PROPERTY_SUPPORTED(ov::device::capabilities) - ov::Any optimizationCapabilitiesParameter; - ASSERT_NO_THROW(optimizationCapabilitiesParameter = - ie.get_metric(deviceName, METRIC_KEY(OPTIMIZATION_CAPABILITIES))); + std::vector device_capabilities; + ASSERT_NO_THROW(device_capabilities = + ie.get_property(deviceName, ov::device::capabilities)); - const auto optimizationCapabilities = optimizationCapabilitiesParameter.as>(); - ASSERT_EQ(optimizationCapabilities.size(), 1); - ASSERT_EQ(optimizationCapabilities.front(), METRIC_VALUE(FP16)); + ASSERT_EQ(device_capabilities.size(), 1); + ASSERT_EQ(device_capabilities.front(), ov::device::capability::FP16); } INSTANTIATE_TEST_SUITE_P(smoke_OVClassGetMetricP, OVClassNetworkTestP_VPU_GetMetric, ::testing::ValuesIn(devices)); diff --git a/src/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/eltwise.cpp index 389351cec78..51c07a05dd4 100644 --- a/src/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/tests/functional/plugin/myriad/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -13,7 +13,7 @@ using namespace ov::test::subgraph; namespace { -typedef std::map Config; +using Config = ov::AnyMap; std::vector> inShapes = { {{2}}, diff --git a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp index efbcce0644b..e0d0f89ad29 100644 --- a/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp +++ b/src/tests/functional/plugin/shared/include/base/ov_behavior_test_utils.hpp @@ -22,8 +22,8 @@ namespace behavior { typedef std::tuple< - std::string, // Device name - std::map // Config + std::string, // Device name + ov::AnyMap // Config > InferRequestParams; class OVInferRequestTests : public testing::WithParamInterface, @@ -31,14 +31,15 @@ class OVInferRequestTests : public testing::WithParamInterface obj) { std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::tie(targetDevice, configuration) = obj.param; std::ostringstream result; result << "targetDevice=" << targetDevice << "_"; if (!configuration.empty()) { using namespace CommonTestUtils; for (auto &configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); } } return result.str(); @@ -49,7 +50,11 @@ public: SKIP_IF_CURRENT_TEST_IS_DISABLED() std::tie(targetDevice, configuration) = this->GetParam(); function = ngraph::builder::subgraph::makeConvPoolRelu(); - execNet = core->compile_model(function, targetDevice, configuration); + ov::AnyMap params; + for (auto&& v : configuration) { + params.emplace(v.first, v.second); + } + execNet = core->compile_model(function, targetDevice, params); } void TearDown() override { @@ -62,7 +67,7 @@ protected: ov::CompiledModel execNet; std::shared_ptr core = utils::PluginCache::get().core(); std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::shared_ptr function; }; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp index a7c551e46be..cc175a8ac66 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_graph_info.hpp @@ -21,7 +21,7 @@ namespace behavior { typedef std::tuple< ov::element::Type_t, // Element type std::string, // Device name - std::map // Config + ov::AnyMap // Config > OVExecGraphImportExportTestParams; class OVExecGraphImportExportTest : public testing::WithParamInterface, @@ -30,7 +30,7 @@ class OVExecGraphImportExportTest : public testing::WithParamInterface obj) { ov::element::Type_t elementType; std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::tie(elementType, targetDevice, configuration) = obj.param; std::ostringstream result; result << "targetDevice=" << targetDevice << "_"; @@ -38,7 +38,9 @@ class OVExecGraphImportExportTest : public testing::WithParamInterface core = utils::PluginCache::get().core(); std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; ov::element::Type_t elementType; std::shared_ptr function; }; @@ -228,6 +230,23 @@ TEST_P(OVExecGraphImportExportTest, readFromV10IR) { EXPECT_EQ(importedExecNet.output().get_element_type(), ov::element::f32); } +static std::map any_copy(const ov::AnyMap& params) { + auto to_config_string = [] (const Any& any) -> std::string { + if (any.is()) { + return any.as() ? "YES" : "NO"; + } else { + std::stringstream strm; + any.print(strm); + return strm.str(); + } + }; + std::map result; + for (auto&& value : params) { + result.emplace(value.first, to_config_string(value.second)); + } + return result; +} + TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) { if (targetDevice == "MULTI" || targetDevice == "AUTO") { GTEST_SKIP() << "MULTI / AUTO does not support import / export" << std::endl; @@ -258,7 +277,7 @@ TEST_P(OVExecGraphImportExportTest, importExportedIENetwork) { ngraph::ParameterVector{param1, param2}); function->set_friendly_name("SingleRuLU"); } - execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), targetDevice, configuration); + execNet = ie->LoadNetwork(InferenceEngine::CNNNetwork(function), targetDevice, any_copy(configuration)); std::stringstream strm; execNet.Export(strm); @@ -328,7 +347,7 @@ TEST_P(OVExecGraphImportExportTest, ieImportExportedFunction) { std::stringstream strm; execNet.export_model(strm); - InferenceEngine::ExecutableNetwork importedExecNet = ie->ImportNetwork(strm, targetDevice, configuration); + InferenceEngine::ExecutableNetwork importedExecNet = ie->ImportNetwork(strm, targetDevice, any_copy(configuration)); EXPECT_EQ(function->inputs().size(), 2); EXPECT_EQ(function->inputs().size(), importedExecNet.GetInputsInfo().size()); EXPECT_NO_THROW(importedExecNet.GetInputsInfo()["param1"]); diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp index 4c709531361..96076cd5247 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/exec_network_base.hpp @@ -21,13 +21,15 @@ class OVExecutableNetworkBaseTest : public testing::WithParamInterface obj) { std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::tie(targetDevice, configuration) = obj.param; std::ostringstream result; result << "targetDevice=" << targetDevice << "_"; if (!configuration.empty()) { for (auto& configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); + result << "_"; } } return result.str(); @@ -69,7 +71,7 @@ public: protected: std::shared_ptr core = utils::PluginCache::get().core(); std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::shared_ptr function; }; @@ -85,7 +87,7 @@ TEST(OVExecutableNetworkBaseTest, smoke_LoadNetworkToDefaultDeviceNoThrow) { } TEST_P(OVExecutableNetworkBaseTest, canLoadCorrectNetworkToGetExecutableWithIncorrectConfig) { - std::map incorrectConfig = {{"abc", "def"}}; + ov::AnyMap incorrectConfig = {{"abc", "def"}}; EXPECT_ANY_THROW(auto execNet = core->compile_model(function, targetDevice, incorrectConfig)); } @@ -102,14 +104,14 @@ TEST_P(OVExecutableNetworkBaseTest, checkGetExecGraphInfoIsNotNullptr) { TEST_P(OVExecutableNetworkBaseTest, checkGetMetric) { auto execNet = core->compile_model(function, targetDevice, configuration); - EXPECT_NO_THROW(execNet.get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + EXPECT_NO_THROW(execNet.get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); } TEST_P(OVExecutableNetworkBaseTest, canLoadCorrectNetworkToGetExecutableAndCheckConfig) { auto execNet = core->compile_model(function, targetDevice, configuration); for (const auto& configItem : configuration) { InferenceEngine::Parameter param; - EXPECT_NO_THROW(param = execNet.get_config(configItem.first)); + EXPECT_NO_THROW(param = execNet.get_property(configItem.first)); EXPECT_FALSE(param.empty()); EXPECT_EQ(param, InferenceEngine::Parameter(configItem.second)); } @@ -121,7 +123,7 @@ TEST_P(OVExecutableNetworkBaseTest, CanSetConfigToExecNet) { for (const auto& confItem : configuration) { config.insert({confItem.first, InferenceEngine::Parameter(confItem.second)}); } - EXPECT_NO_THROW(execNet.set_config(config)); + EXPECT_NO_THROW(execNet.set_property(config)); } TEST_P(OVExecutableNetworkBaseTest, CanSetConfigToExecNetWithIncorrectConfig) { @@ -131,7 +133,7 @@ TEST_P(OVExecutableNetworkBaseTest, CanSetConfigToExecNetWithIncorrectConfig) { for (const auto& confItem : incorrectConfig) { config.insert({confItem.first, InferenceEngine::Parameter(confItem.second)}); } - EXPECT_ANY_THROW(execNet.set_config(config)); + EXPECT_ANY_THROW(execNet.set_property(config)); } TEST_P(OVExecutableNetworkBaseTest, CanSetConfigToExecNetAndCheckConfigAndCheck) { @@ -140,10 +142,10 @@ TEST_P(OVExecutableNetworkBaseTest, CanSetConfigToExecNetAndCheckConfigAndCheck) for (const auto& confItem : configuration) { config.insert({confItem.first, InferenceEngine::Parameter(confItem.second)}); } - execNet.set_config(config); + execNet.set_property(config); for (const auto& configItem : configuration) { InferenceEngine::Parameter param; - EXPECT_NO_THROW(param = execNet.get_config(configItem.first)); + EXPECT_NO_THROW(param = execNet.get_property(configItem.first)); EXPECT_FALSE(param.empty()); EXPECT_EQ(param, InferenceEngine::Parameter(configItem.second)); } diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp index 6988bc2c8c8..a4700bbc3a6 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_executable_network/get_metric.hpp @@ -22,7 +22,7 @@ namespace behavior { #define ASSERT_EXEC_METRIC_SUPPORTED(metricName) \ { \ - std::vector metrics = exeNetwork.get_metric(METRIC_KEY(SUPPORTED_METRICS));\ + std::vector metrics = exeNetwork.get_property(METRIC_KEY(SUPPORTED_METRICS));\ auto it = std::find(metrics.begin(), metrics.end(), metricName); \ ASSERT_NE(metrics.end(), it); \ } @@ -105,7 +105,7 @@ TEST_P(OVClassExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricNoT auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_NO_THROW(p = exeNetwork.get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(p = exeNetwork.get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); std::vector configValues = p; std::cout << "Supported config keys: " << std::endl; @@ -123,7 +123,7 @@ TEST_P(OVClassExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricNoThrow auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_NO_THROW(p = exeNetwork.get_metric(METRIC_KEY(SUPPORTED_METRICS))); + ASSERT_NO_THROW(p = exeNetwork.get_property(METRIC_KEY(SUPPORTED_METRICS))); std::vector metricValues = p; std::cout << "Supported metric keys: " << std::endl; @@ -141,7 +141,7 @@ TEST_P(OVClassExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThrow) { auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_NO_THROW(p = exeNetwork.get_metric(EXEC_NETWORK_METRIC_KEY(NETWORK_NAME))); + ASSERT_NO_THROW(p = exeNetwork.get_property(EXEC_NETWORK_METRIC_KEY(NETWORK_NAME))); std::string networkname = p; std::cout << "Exe network name: " << std::endl << networkname << std::endl; @@ -155,7 +155,7 @@ TEST_P(OVClassExecutableNetworkGetMetricTest_OPTIMAL_NUMBER_OF_INFER_REQUESTS, G auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_NO_THROW(p = exeNetwork.get_metric(EXEC_NETWORK_METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))); + ASSERT_NO_THROW(p = exeNetwork.get_property(EXEC_NETWORK_METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS))); unsigned int value = p; std::cout << "Optimal number of Inference Requests: " << value << std::endl; @@ -169,7 +169,7 @@ TEST_P(OVClassExecutableNetworkGetMetricTest_ThrowsUnsupported, GetMetricThrow) auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_THROW(p = exeNetwork.get_metric("unsupported_metric"), ov::Exception); + ASSERT_THROW(p = exeNetwork.get_property("unsupported_metric"), ov::Exception); } TEST_P(OVClassExecutableNetworkGetConfigTest, GetConfigNoThrow) { @@ -178,12 +178,12 @@ TEST_P(OVClassExecutableNetworkGetConfigTest, GetConfigNoThrow) { auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_NO_THROW(p = exeNetwork.get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(p = exeNetwork.get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); std::vector configValues = p; for (auto&& confKey : configValues) { ov::Any defaultValue; - ASSERT_NO_THROW(defaultValue = ie.get_config(deviceName, confKey)); + ASSERT_NO_THROW(defaultValue = ie.get_property(deviceName, confKey)); ASSERT_FALSE(defaultValue.empty()); } } @@ -194,7 +194,7 @@ TEST_P(OVClassExecutableNetworkGetConfigTest, GetConfigThrows) { auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_THROW(p = exeNetwork.get_config("unsupported_config"), ov::Exception); + ASSERT_THROW(p = exeNetwork.get_property("unsupported_config"), ov::Exception); } TEST_P(OVClassExecutableNetworkSetConfigTest, SetConfigThrows) { @@ -203,7 +203,7 @@ TEST_P(OVClassExecutableNetworkSetConfigTest, SetConfigThrows) { auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_THROW(exeNetwork.set_config({{"unsupported_config", "some_value"}}), ov::Exception); + ASSERT_THROW(exeNetwork.set_property({{"unsupported_config", "some_value"}}), ov::Exception); } TEST_P(OVClassExecutableNetworkSupportedConfigTest, SupportedConfigWorks) { @@ -212,8 +212,8 @@ TEST_P(OVClassExecutableNetworkSupportedConfigTest, SupportedConfigWorks) { auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_NO_THROW(exeNetwork.set_config({{configKey, configValue}})); - ASSERT_NO_THROW(p = exeNetwork.get_config(configKey)); + ASSERT_NO_THROW(exeNetwork.set_property({{configKey, configValue}})); + ASSERT_NO_THROW(p = exeNetwork.get_property(configKey)); ASSERT_EQ(p, configValue); } @@ -222,19 +222,19 @@ TEST_P(OVClassExecutableNetworkUnsupportedConfigTest, UnsupportedConfigThrows) { auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_THROW(exeNetwork.set_config({{configKey, configValue}}), ov::Exception); + ASSERT_THROW(exeNetwork.set_property({{configKey, configValue}}), ov::Exception); } TEST_P(OVClassExecutableNetworkGetConfigTest, GetConfigNoEmptyNoThrow) { ov::Core ie = createCoreWithTemplate(); ov::Any p; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(p = ie.get_property(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); std::vector devConfigValues = p; auto exeNetwork = ie.compile_model(simpleNetwork, deviceName); - ASSERT_NO_THROW(p = exeNetwork.get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(p = exeNetwork.get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); std::vector execConfigValues = p; /* @@ -242,7 +242,7 @@ TEST_P(OVClassExecutableNetworkGetConfigTest, GetConfigNoEmptyNoThrow) { ASSERT_NE(execConfigValues.end(), std::find(execConfigValues.begin(), execConfigValues.end(), configKey)); ov::Any configValue; - ASSERT_NO_THROW(ov::Any configValue = exeNetwork.get_config(configKey)); + ASSERT_NO_THROW(ov::Any configValue = exeNetwork.get_property(configKey)); } */ } @@ -254,8 +254,8 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMet auto heteroExeNetwork = ie.compile_model(actualNetwork, heteroDeviceName); auto deviceExeNetwork = ie.compile_model(actualNetwork, deviceName); - ASSERT_NO_THROW(pHetero = heteroExeNetwork.get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - ASSERT_NO_THROW(pDevice = deviceExeNetwork.get_metric(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(pHetero = heteroExeNetwork.get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + ASSERT_NO_THROW(pDevice = deviceExeNetwork.get_property(METRIC_KEY(SUPPORTED_CONFIG_KEYS))); std::vector heteroConfigValues = pHetero, deviceConfigValues = pDevice; std::cout << "Supported config keys: " << std::endl; @@ -270,8 +270,8 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMet auto it = std::find(heteroConfigValues.begin(), heteroConfigValues.end(), deviceConf); ASSERT_TRUE(it != heteroConfigValues.end()); - ov::Any heteroConfigValue = heteroExeNetwork.get_config(deviceConf); - ov::Any deviceConfigValue = deviceExeNetwork.get_config(deviceConf); + ov::Any heteroConfigValue = heteroExeNetwork.get_property(deviceConf); + ov::Any deviceConfigValue = deviceExeNetwork.get_property(deviceConf); // HETERO returns EXCLUSIVE_ASYNC_REQUESTS as a boolean value if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf) { @@ -287,8 +287,8 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN auto heteroExeNetwork = ie.compile_model(actualNetwork, heteroDeviceName); auto deviceExeNetwork = ie.compile_model(actualNetwork, deviceName); - ASSERT_NO_THROW(pHetero = heteroExeNetwork.get_metric(METRIC_KEY(SUPPORTED_METRICS))); - ASSERT_NO_THROW(pDevice = deviceExeNetwork.get_metric(METRIC_KEY(SUPPORTED_METRICS))); + ASSERT_NO_THROW(pHetero = heteroExeNetwork.get_property(METRIC_KEY(SUPPORTED_METRICS))); + ASSERT_NO_THROW(pDevice = deviceExeNetwork.get_property(METRIC_KEY(SUPPORTED_METRICS))); std::vector heteroMetricValues = pHetero, deviceMetricValues = pDevice; std::cout << "Supported metric keys: " << std::endl; @@ -306,8 +306,8 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN auto it = std::find(heteroMetricValues.begin(), heteroMetricValues.end(), deviceMetricName); ASSERT_TRUE(it != heteroMetricValues.end()); - ov::Any heteroMetricValue = heteroExeNetwork.get_metric(deviceMetricName); - ov::Any deviceMetricValue = deviceExeNetwork.get_metric(deviceMetricName); + ov::Any heteroMetricValue = heteroExeNetwork.get_property(deviceMetricName); + ov::Any deviceMetricValue = deviceExeNetwork.get_property(deviceMetricName); if (std::find(heteroSpecificMetrics.begin(), heteroSpecificMetrics.end(), deviceMetricName) == heteroSpecificMetrics.end()) { @@ -322,7 +322,7 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_NETWORK_NAME, GetMetricNoThro auto exeNetwork = ie.compile_model(actualNetwork, heteroDeviceName); - ASSERT_NO_THROW(p = exeNetwork.get_metric(EXEC_NETWORK_METRIC_KEY(NETWORK_NAME))); + ASSERT_NO_THROW(p = exeNetwork.get_property(EXEC_NETWORK_METRIC_KEY(NETWORK_NAME))); std::string networkname = p; std::cout << "Exe network name: " << std::endl << networkname << std::endl; @@ -336,7 +336,7 @@ TEST_P(OVClassHeteroExecutableNetworkGetMetricTest_TARGET_FALLBACK, GetMetricNoT auto exeNetwork = ie.compile_model(actualNetwork, heteroDeviceName); - ASSERT_NO_THROW(p = exeNetwork.get_config("TARGET_FALLBACK")); + ASSERT_NO_THROW(p = exeNetwork.get_property("TARGET_FALLBACK")); std::string targets = p; auto expectedTargets = deviceName + "," + CommonTestUtils::DEVICE_CPU; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp index ecfed10b807..8bb1ee51a08 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/infer_request_dynamic.hpp @@ -38,7 +38,7 @@ using OVInferRequestDynamicParams = std::tuple< std::shared_ptr, // ov Model std::vector, std::vector>>, // input/expected output shapes per inference std::string, // Device name - std::map // Config + ov::AnyMap // Config >; class OVInferRequestDynamicTests : public testing::WithParamInterface, @@ -54,7 +54,7 @@ protected: std::shared_ptr ie = utils::PluginCache::get().core(); std::shared_ptr function; std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::vector, std::vector>> inOutShapes; }; using OVNotSupportRequestDynamicTests = OVInferRequestDynamicTests; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp index b70dc1fce58..c8126be6035 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/io_tensor.hpp @@ -26,7 +26,7 @@ struct OVInferRequestIOTensorTest : public OVInferRequestTests { using OVInferRequestSetPrecisionParams = std::tuple< element::Type, // element type std::string, // Device name - std::map // Config + ov::AnyMap // Config >; struct OVInferRequestIOTensorSetPrecisionTest : public testing::WithParamInterface, public CommonTestUtils::TestsCommon { @@ -38,7 +38,7 @@ struct OVInferRequestIOTensorSetPrecisionTest : public testing::WithParamInterfa ov::CompiledModel execNet; ov::InferRequest req; std::string target_device; - runtime::ConfigMap config; + ov::AnyMap config; element::Type element_type; }; @@ -53,9 +53,9 @@ struct OVInferRequestCheckTensorPrecision : public testing::WithParamInterface core = utils::PluginCache::get().core(); std::shared_ptr model; - ov::CompiledModel compModel; - ov::InferRequest req; - runtime::ConfigMap config; + CompiledModel compModel; + InferRequest req; + AnyMap config; std::string target_device; element::Type element_type; }; diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp index 2a4014be694..2be921f207d 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/core_integration.hpp @@ -5,6 +5,7 @@ #pragma once #include "base/ov_behavior_test_utils.hpp" +#include #include "common_test_utils/test_assertions.hpp" #include "common_test_utils/file_utils.hpp" @@ -21,11 +22,11 @@ namespace ov { namespace test { namespace behavior { -#define ASSERT_METRIC_SUPPORTED(metricName) \ -{ \ - std::vector metrics = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_METRICS)); \ - auto it = std::find(metrics.begin(), metrics.end(), metricName); \ - ASSERT_NE(metrics.end(), it); \ +#define OV_ASSERT_PROPERTY_SUPPORTED(property_key) \ +{ \ + auto properties = ie.get_property(deviceName, ov::supported_properties); \ + auto it = std::find(properties.begin(), properties.end(), property_key); \ + ASSERT_NE(properties.end(), it); \ } @@ -63,8 +64,6 @@ using OVClassGetMetricTest_FULL_DEVICE_NAME = OVClassBaseTestP; using OVClassGetMetricTest_OPTIMIZATION_CAPABILITIES = OVClassBaseTestP; using OVClassGetMetricTest_DEVICE_GOPS = OVClassBaseTestP; using OVClassGetMetricTest_DEVICE_TYPE = OVClassBaseTestP; -using OVClassGetMetricTest_NUMBER_OF_WAITING_INFER_REQUESTS = OVClassBaseTestP; -using OVClassGetMetricTest_NUMBER_OF_EXEC_INFER_REQUESTS = OVClassBaseTestP; using OVClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS = OVClassBaseTestP; using OVClassGetMetricTest_ThrowUnsupported = OVClassBaseTestP; using OVClassGetConfigTest = OVClassBaseTestP; @@ -91,20 +90,20 @@ using OVClassSeveralDevicesTestQueryNetwork = OVClassSeveralDevicesTest; using OVClassSeveralDevicesTestDefaultCore = OVClassSeveralDevicesTest; inline bool supportsAvaliableDevices(ov::Core& ie, const std::string& deviceName) { - auto supportedMetricKeys = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_METRICS)).as>(); - return supportedMetricKeys.end() != - std::find(std::begin(supportedMetricKeys), std::end(supportedMetricKeys), METRIC_KEY(AVAILABLE_DEVICES)); + auto supported_properties = ie.get_property(deviceName, ov::supported_properties); + return supported_properties.end() != + std::find(std::begin(supported_properties), std::end(supported_properties), ov::available_devices); } bool supportsDeviceID(ov::Core& ie, const std::string& deviceName) { - auto supportedConfigKeys = - ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); - return supportedConfigKeys.end() != - std::find(std::begin(supportedConfigKeys), std::end(supportedConfigKeys), CONFIG_KEY(DEVICE_ID)); + auto supported_properties = + ie.get_property(deviceName, ov::supported_properties); + return supported_properties.end() != + std::find(std::begin(supported_properties), std::end(supported_properties), ov::device::id); } TEST(OVClassBasicTest, smoke_createDefault) { - ASSERT_NO_THROW(ov::Core ie); + OV_ASSERT_NO_THROW(ov::Core ie); } TEST_P(OVClassBasicTestP, registerExistingPluginThrows) { @@ -117,8 +116,8 @@ TEST_P(OVClassBasicTestP, registerExistingPluginThrows) { TEST_P(OVClassBasicTestP, registerNewPluginNoThrows) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.register_plugin(pluginName, "NEW_DEVICE_NAME")); - ASSERT_NO_THROW(ie.get_metric("NEW_DEVICE_NAME", METRIC_KEY(SUPPORTED_CONFIG_KEYS))); + OV_ASSERT_NO_THROW(ie.register_plugin(pluginName, "NEW_DEVICE_NAME")); + OV_ASSERT_NO_THROW(ie.get_property("NEW_DEVICE_NAME", ov::supported_properties)); } TEST(OVClassBasicTest, smoke_registerExistingPluginFileThrows) { @@ -137,7 +136,7 @@ TEST(OVClassBasicTest, smoke_createMockEngineConfigNoThrows) { std::string filename{"mock_engine_valid.xml"}; std::string content{""}; CommonTestUtils::createFile(filename, content); - ASSERT_NO_THROW(ov::Core ie(filename)); + OV_ASSERT_NO_THROW(ov::Core ie(filename)); CommonTestUtils::removeFile(filename.c_str()); } @@ -175,16 +174,16 @@ TEST_P(OVClassBasicTestP, smoke_registerPluginsXMLUnicodePath) { ov::Core ie = createCoreWithTemplate(); GTEST_COUT << "Core created " << testIndex << std::endl; - ASSERT_NO_THROW(ie.register_plugins(::ov::util::wstring_to_string(pluginsXmlW))); + OV_ASSERT_NO_THROW(ie.register_plugins(::ov::util::wstring_to_string(pluginsXmlW))); CommonTestUtils::removeFile(pluginsXmlW); # if defined __linux__ && !defined(__APPLE__) - ASSERT_NO_THROW(ie.get_versions("mock")); // from pluginXML + OV_ASSERT_NO_THROW(ie.get_versions("mock")); // from pluginXML # endif - ASSERT_NO_THROW(ie.get_versions(deviceName)); + OV_ASSERT_NO_THROW(ie.get_versions(deviceName)); GTEST_COUT << "Plugin created " << testIndex << std::endl; - ASSERT_NO_THROW(ie.register_plugin(pluginName, "TEST_DEVICE")); - ASSERT_NO_THROW(ie.get_versions("TEST_DEVICE")); + OV_ASSERT_NO_THROW(ie.register_plugin(pluginName, "TEST_DEVICE")); + OV_ASSERT_NO_THROW(ie.get_versions("TEST_DEVICE")); GTEST_COUT << "Plugin registered and created " << testIndex << std::endl; GTEST_COUT << "OK" << std::endl; @@ -206,12 +205,12 @@ TEST_P(OVClassBasicTestP, smoke_registerPluginsXMLUnicodePath) { TEST_P(OVClassBasicTestP, getVersionsByExactDeviceNoThrow) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.get_versions(deviceName + ".0")); + OV_ASSERT_NO_THROW(ie.get_versions(deviceName + ".0")); } TEST_P(OVClassBasicTestP, getVersionsByDeviceClassNoThrow) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.get_versions(deviceName)); + OV_ASSERT_NO_THROW(ie.get_versions(deviceName)); } TEST_P(OVClassBasicTestP, getVersionsNonEmpty) { @@ -231,17 +230,17 @@ TEST_P(OVClassBasicTestP, unregisterExistingPluginNoThrow) { // make the first call to IE which created device instance ie.get_versions(deviceName); // now, we can unregister device - ASSERT_NO_THROW(ie.unload_plugin(deviceName)); + OV_ASSERT_NO_THROW(ie.unload_plugin(deviceName)); } TEST_P(OVClassBasicTestP, accessToUnregisteredPluginThrows) { ov::Core ie = createCoreWithTemplate(); ASSERT_THROW(ie.unload_plugin(deviceName), ov::Exception); - ASSERT_NO_THROW(ie.get_versions(deviceName)); - ASSERT_NO_THROW(ie.unload_plugin(deviceName)); - ASSERT_NO_THROW(ie.set_config({}, deviceName)); - ASSERT_NO_THROW(ie.get_versions(deviceName)); - ASSERT_NO_THROW(ie.unload_plugin(deviceName)); + OV_ASSERT_NO_THROW(ie.get_versions(deviceName)); + OV_ASSERT_NO_THROW(ie.unload_plugin(deviceName)); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::AnyMap{})); + OV_ASSERT_NO_THROW(ie.get_versions(deviceName)); + OV_ASSERT_NO_THROW(ie.unload_plugin(deviceName)); } TEST(OVClassBasicTest, smoke_unregisterNonExistingPluginThrows) { @@ -255,50 +254,46 @@ TEST(OVClassBasicTest, smoke_unregisterNonExistingPluginThrows) { TEST_P(OVClassBasicTestP, SetConfigAllThrows) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.set_config({{"unsupported_key", "4"}})); + OV_ASSERT_NO_THROW(ie.set_property({{"unsupported_key", "4"}})); ASSERT_ANY_THROW(ie.get_versions(deviceName)); } TEST_P(OVClassBasicTestP, SetConfigForUnRegisteredDeviceThrows) { ov::Core ie = createCoreWithTemplate(); - ASSERT_THROW(ie.set_config({{"unsupported_key", "4"}}, "unregistered_device"), ov::Exception); + ASSERT_THROW(ie.set_property("unregistered_device", {{"unsupported_key", "4"}}), ov::Exception); } TEST_P(OVClassBasicTestP, SetConfigNoThrow) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.set_config({{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, - deviceName)); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::enable_profiling(true))); } TEST_P(OVClassBasicTestP, SetConfigAllNoThrow) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.set_config({{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}})); - ASSERT_NO_THROW(ie.get_versions(deviceName)); + OV_ASSERT_NO_THROW(ie.set_property(ov::enable_profiling(true))); + OV_ASSERT_NO_THROW(ie.get_versions(deviceName)); } TEST(OVClassBasicTest, smoke_SetConfigHeteroThrows) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.set_config({{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, - CommonTestUtils::DEVICE_HETERO)); + OV_ASSERT_NO_THROW(ie.set_property(CommonTestUtils::DEVICE_HETERO, ov::enable_profiling(true))); } TEST_P(OVClassBasicTestP, SetConfigHeteroTargetFallbackThrows) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.set_config({{"TARGET_FALLBACK", deviceName}}, CommonTestUtils::DEVICE_HETERO)); + OV_ASSERT_NO_THROW(ie.set_property(CommonTestUtils::DEVICE_HETERO, ov::target_fallback(deviceName))); } TEST(OVClassBasicTest, smoke_SetConfigHeteroNoThrow) { ov::Core ie = createCoreWithTemplate(); bool value = false; - ASSERT_NO_THROW(ie.set_config({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), InferenceEngine::PluginConfigParams::YES}}, - CommonTestUtils::DEVICE_HETERO)); - ASSERT_NO_THROW(value = ie.get_config("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as()); + OV_ASSERT_NO_THROW(ie.set_property(CommonTestUtils::DEVICE_HETERO, ov::dump_graph_dot(true))); + OV_ASSERT_NO_THROW(value = ie.get_property(CommonTestUtils::DEVICE_HETERO, ov::dump_graph_dot)); ASSERT_TRUE(value); - ASSERT_NO_THROW(ie.set_config({{HETERO_CONFIG_KEY(DUMP_GRAPH_DOT), InferenceEngine::PluginConfigParams::NO}}, - CommonTestUtils::DEVICE_HETERO)); - ASSERT_NO_THROW(value = ie.get_config("HETERO", HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)).as()); + OV_ASSERT_NO_THROW(ie.set_property(CommonTestUtils::DEVICE_HETERO, ov::dump_graph_dot(false))); + OV_ASSERT_NO_THROW(value = ie.get_property(CommonTestUtils::DEVICE_HETERO, ov::dump_graph_dot)); ASSERT_FALSE(value); } @@ -314,15 +309,15 @@ TEST_P(OVClassSpecificDeviceTestSetConfig, SetConfigSpecificDeviceNoThrow) { if (!supportsDeviceID(ie, clearDeviceName) || !supportsAvaliableDevices(ie, clearDeviceName)) { GTEST_SKIP(); } - std::vector deviceIDs = ie.get_metric(clearDeviceName, METRIC_KEY(AVAILABLE_DEVICES)); + auto deviceIDs = ie.get_property(clearDeviceName, ov::available_devices); if (std::find(deviceIDs.begin(), deviceIDs.end(), deviceID) == deviceIDs.end()) { GTEST_SKIP(); } - ASSERT_NO_THROW(ie.set_config({{InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES}}, deviceName)); - std::string value; - ASSERT_NO_THROW(value = ie.get_config(deviceName, InferenceEngine::PluginConfigParams::KEY_PERF_COUNT).as()); - ASSERT_EQ(value, InferenceEngine::PluginConfigParams::YES); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::enable_profiling(true))); + bool value = false; + OV_ASSERT_NO_THROW(value = ie.get_property(deviceName, ov::enable_profiling)); + ASSERT_TRUE(value); } // @@ -331,7 +326,7 @@ TEST_P(OVClassSpecificDeviceTestSetConfig, SetConfigSpecificDeviceNoThrow) { TEST_P(OVClassNetworkTestP, QueryNetworkActualThrows) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.query_model(actualNetwork, CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName)); + OV_ASSERT_NO_THROW(ie.query_model(actualNetwork, CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName)); } TEST_P(OVClassNetworkTestP, QueryNetworkActualNoThrow) { @@ -373,7 +368,7 @@ TEST_P(OVClassSeveralDevicesTestQueryNetwork, QueryNetworkActualSeveralDevicesNo if (!supportsDeviceID(ie, clearDeviceName) || !supportsAvaliableDevices(ie, clearDeviceName)) { GTEST_SKIP(); } - std::vector deviceIDs = ie.get_metric(clearDeviceName, METRIC_KEY(AVAILABLE_DEVICES)); + auto deviceIDs = ie.get_property(clearDeviceName, ov::available_devices); if (deviceIDs.size() < deviceNames.size()) GTEST_SKIP(); @@ -384,7 +379,7 @@ TEST_P(OVClassSeveralDevicesTestQueryNetwork, QueryNetworkActualSeveralDevicesNo multiDeviceName += ","; } } - ASSERT_NO_THROW(ie.query_model(actualNetwork, multiDeviceName)); + OV_ASSERT_NO_THROW(ie.query_model(actualNetwork, multiDeviceName)); } TEST_P(OVClassNetworkTestP, SetAffinityWithConstantBranches) { @@ -458,8 +453,8 @@ TEST_P(OVClassNetworkTestP, SetAffinityWithKSO) { TEST_P(OVClassNetworkTestP, QueryNetworkHeteroActualNoThrow) { ov::Core ie = createCoreWithTemplate(); ov::SupportedOpsMap res; - ASSERT_NO_THROW( - res = ie.query_model(actualNetwork, CommonTestUtils::DEVICE_HETERO, {{"TARGET_FALLBACK", deviceName}})); + OV_ASSERT_NO_THROW( + res = ie.query_model(actualNetwork, CommonTestUtils::DEVICE_HETERO, ov::target_fallback(deviceName))); ASSERT_LT(0, res.size()); } @@ -470,260 +465,184 @@ TEST_P(OVClassNetworkTestP, QueryNetworkMultiThrows) { TEST(OVClassBasicTest, smoke_GetMetricSupportedMetricsHeteroNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; std::string deviceName = CommonTestUtils::DEVICE_HETERO; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_METRICS))); - std::vector t = p; + std::vector t; + OV_ASSERT_NO_THROW(t = ie.get_property(deviceName, ov::supported_properties)); - std::cout << "Supported HETERO metrics: " << std::endl; + std::cout << "Supported HETERO properties: " << std::endl; for (auto&& str : t) { - std::cout << str << std::endl; + std::cout << str << " is_mutable: " << str.is_mutable() << std::endl; } - ASSERT_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_METRICS)); -} - -TEST(OVClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroNoThrow) { - ov::Core ie = createCoreWithTemplate(); - ov::Any p; - std::string deviceName = CommonTestUtils::DEVICE_HETERO; - - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - std::vector t = p; - - std::cout << "Supported HETERO config keys: " << std::endl; - for (auto&& str : t) { - std::cout << str << std::endl; - } - - ASSERT_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::supported_properties); } TEST(OVClassBasicTest, smoke_GetMetricSupportedConfigKeysHeteroThrows) { ov::Core ie = createCoreWithTemplate(); // TODO: check std::string targetDevice = CommonTestUtils::DEVICE_HETERO + std::string(":") + CommonTestUtils::DEVICE_CPU; - ASSERT_THROW(ie.get_metric(targetDevice, METRIC_KEY(SUPPORTED_CONFIG_KEYS)), ov::Exception); + ASSERT_THROW(ie.get_property(targetDevice, ov::supported_properties), ov::Exception); } TEST_P(OVClassGetMetricTest_SUPPORTED_METRICS, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; + std::vector t; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_METRICS))); - std::vector t = p; + OV_ASSERT_NO_THROW(t = ie.get_property(deviceName, ov::supported_properties)); - std::cout << "Supported metrics: " << std::endl; + std::cout << "Supported properties: " << std::endl; for (auto&& str : t) { - std::cout << str << std::endl; + std::cout << str << " is_mutable: " << str.is_mutable() << std::endl; } - ASSERT_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_METRICS)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::supported_properties); } TEST_P(OVClassGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; + std::vector t; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - std::vector t = p; + OV_ASSERT_NO_THROW(t = ie.get_property(deviceName, ov::supported_properties)); std::cout << "Supported config values: " << std::endl; for (auto&& str : t) { - std::cout << str << std::endl; + std::cout << str << " is_mutable: " << str.is_mutable() << std::endl; } - ASSERT_METRIC_SUPPORTED(METRIC_KEY(SUPPORTED_CONFIG_KEYS)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::supported_properties); } TEST_P(OVClassGetMetricTest_AVAILABLE_DEVICES, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; + std::vector t; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES))); - std::vector t = p; + OV_ASSERT_NO_THROW(t = ie.get_property(deviceName, ov::available_devices)); std::cout << "Available devices: " << std::endl; for (auto&& str : t) { std::cout << str << std::endl; } - ASSERT_METRIC_SUPPORTED(METRIC_KEY(AVAILABLE_DEVICES)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::available_devices); } TEST_P(OVClassGetMetricTest_FULL_DEVICE_NAME, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; + std::string t; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(FULL_DEVICE_NAME))); - std::string t = p; + OV_ASSERT_NO_THROW(t = ie.get_property(deviceName, ov::device::full_name)); std::cout << "Full device name: " << std::endl << t << std::endl; - ASSERT_METRIC_SUPPORTED(METRIC_KEY(FULL_DEVICE_NAME)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::device::full_name); } TEST_P(OVClassGetMetricTest_OPTIMIZATION_CAPABILITIES, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; - - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(OPTIMIZATION_CAPABILITIES))); - std::vector t = p; - + std::vector t; + OV_ASSERT_NO_THROW(t = ie.get_property(deviceName, ov::device::capabilities)); std::cout << "Optimization capabilities: " << std::endl; for (auto&& str : t) { std::cout << str << std::endl; } - - ASSERT_METRIC_SUPPORTED(METRIC_KEY(OPTIMIZATION_CAPABILITIES)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::device::capabilities); } TEST_P(OVClassGetMetricTest_DEVICE_GOPS, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; - - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(DEVICE_GOPS))); - std::map t = p; - std::cout << "Device GOPS: " << std::endl; - for (auto&& kv : t) { + for (auto&& kv : ie.get_property(deviceName, ov::device::gops)) { std::cout << kv.first << ": " << kv.second << std::endl; } - - ASSERT_METRIC_SUPPORTED(METRIC_KEY(DEVICE_GOPS)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::device::gops); } TEST_P(OVClassGetMetricTest_DEVICE_TYPE, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; - - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(DEVICE_TYPE))); - InferenceEngine::Metrics::DeviceType t = p; - + OV_ASSERT_PROPERTY_SUPPORTED(ov::device::type); + ov::device::Type t = {}; + OV_ASSERT_NO_THROW(t = ie.get_property(deviceName, ov::device::type)); std::cout << "Device Type: " << t << std::endl; - - ASSERT_METRIC_SUPPORTED(METRIC_KEY(DEVICE_TYPE)); -} - -TEST_P(OVClassGetMetricTest_NUMBER_OF_WAITING_INFER_REQUESTS, GetMetricAndPrintNoThrow) { - ov::Core ie = createCoreWithTemplate(); - ov::Any p; - - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(NUMBER_OF_WAITING_INFER_REQUESTS))); - unsigned int t = p; - - std::cout << "Number of waiting infer requests: " << std::endl << t << std::endl; - - ASSERT_METRIC_SUPPORTED(METRIC_KEY(NUMBER_OF_WAITING_INFER_REQUESTS)); -} - -TEST_P(OVClassGetMetricTest_NUMBER_OF_EXEC_INFER_REQUESTS, GetMetricAndPrintNoThrow) { - ov::Core ie = createCoreWithTemplate(); - ov::Any p; - - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(NUMBER_OF_EXEC_INFER_REQUESTS))); - unsigned int t = p; - - std::cout << "Number of executing infer requests: " << std::endl << t << std::endl; - - ASSERT_METRIC_SUPPORTED(METRIC_KEY(NUMBER_OF_EXEC_INFER_REQUESTS)); } TEST_P(OVClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; + unsigned int start, end, step; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS))); - std::tuple t = p; + ASSERT_NO_THROW(std::tie(start, end, step) = ie.get_property(deviceName, ov::range_for_async_infer_requests)); - unsigned int start = std::get<0>(t); - unsigned int end = std::get<1>(t); - unsigned int step = std::get<2>(t); - - std::cout << "Range for async infer requests: " << std::endl; - std::cout << start << std::endl; - std::cout << end << std::endl; - std::cout << step << std::endl; - std::cout << std::endl; + std::cout << "Range for async infer requests: " << std::endl + << start << std::endl + << end << std::endl + << step << std::endl + << std::endl; ASSERT_LE(start, end); ASSERT_GE(step, 1); - ASSERT_METRIC_SUPPORTED(METRIC_KEY(RANGE_FOR_ASYNC_INFER_REQUESTS)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::range_for_async_infer_requests); } TEST_P(OVClassGetMetricTest_RANGE_FOR_STREAMS, GetMetricAndPrintNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; + unsigned int start, end; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(RANGE_FOR_STREAMS))); - std::tuple t = p; + ASSERT_NO_THROW(std::tie(start, end) = ie.get_property(deviceName, ov::range_for_streams)); - unsigned int start = std::get<0>(t); - unsigned int end = std::get<1>(t); - - std::cout << "Range for streams: " << std::endl; - std::cout << start << std::endl; - std::cout << end << std::endl; - std::cout << std::endl; + std::cout << "Range for streams: " << std::endl + << start << std::endl + << end << std::endl + << std::endl; ASSERT_LE(start, end); - ASSERT_METRIC_SUPPORTED(METRIC_KEY(RANGE_FOR_STREAMS)); + OV_ASSERT_PROPERTY_SUPPORTED(ov::range_for_streams); } TEST_P(OVClassGetMetricTest_ThrowUnsupported, GetMetricThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; - ASSERT_THROW(p = ie.get_metric(deviceName, "unsupported_metric"), ov::Exception); + ASSERT_THROW(ie.get_property(deviceName, "unsupported_metric"), ov::Exception); } TEST_P(OVClassGetConfigTest, GetConfigNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; + std::vector configValues; - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - std::vector configValues = p; + OV_ASSERT_NO_THROW(configValues = ie.get_property(deviceName, ov::supported_properties)); for (auto&& confKey : configValues) { ov::Any defaultValue; - ASSERT_NO_THROW(defaultValue = ie.get_config(deviceName, confKey)); + OV_ASSERT_NO_THROW(defaultValue = ie.get_property(deviceName, confKey)); ASSERT_FALSE(defaultValue.empty()); } } TEST_P(OVClassGetConfigTest, GetConfigHeteroNoThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; - - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - std::vector configValues = p; + std::vector configValues; + OV_ASSERT_NO_THROW(configValues = ie.get_property(deviceName, ov::supported_properties)); for (auto&& confKey : configValues) { - ASSERT_NO_THROW(ie.get_config(deviceName, confKey)); + OV_ASSERT_NO_THROW(ie.get_property(deviceName, confKey)); } } TEST_P(OVClassGetConfigTest_ThrowUnsupported, GetConfigHeteroThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; - - ASSERT_THROW(p = ie.get_config(CommonTestUtils::DEVICE_HETERO, "unsupported_config"), ov::Exception); + ASSERT_THROW(ie.get_property(CommonTestUtils::DEVICE_HETERO, "unsupported_config"), ov::Exception); } TEST_P(OVClassGetConfigTest_ThrowUnsupported, GetConfigHeteroWithDeviceThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; - ASSERT_THROW(p = ie.get_config(CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName, - HETERO_CONFIG_KEY(DUMP_GRAPH_DOT)), + ASSERT_THROW(ie.get_property(CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName, + ov::dump_graph_dot), ov::Exception); } TEST_P(OVClassGetConfigTest_ThrowUnsupported, GetConfigThrow) { ov::Core ie = createCoreWithTemplate(); - ov::Any p; - ASSERT_THROW(p = ie.get_config(deviceName, "unsupported_config"), ov::Exception); + ASSERT_THROW(ie.get_property(deviceName, "unsupported_config"), ov::Exception); } TEST_P(OVClassSpecificDeviceTestGetConfig, GetConfigSpecificDeviceNoThrow) { @@ -739,17 +658,17 @@ TEST_P(OVClassSpecificDeviceTestGetConfig, GetConfigSpecificDeviceNoThrow) { if (!supportsDeviceID(ie, clearDeviceName) || !supportsAvaliableDevices(ie, clearDeviceName)) { GTEST_SKIP(); } - std::vector deviceIDs = ie.get_metric(clearDeviceName, METRIC_KEY(AVAILABLE_DEVICES)); + auto deviceIDs = ie.get_property(clearDeviceName, ov::available_devices); if (std::find(deviceIDs.begin(), deviceIDs.end(), deviceID) == deviceIDs.end()) { GTEST_SKIP(); } - ASSERT_NO_THROW(p = ie.get_metric(deviceName, METRIC_KEY(SUPPORTED_CONFIG_KEYS))); - std::vector configValues = p; + std::vector configValues; + OV_ASSERT_NO_THROW(configValues = ie.get_property(deviceName, ov::supported_properties)); for (auto &&confKey : configValues) { ov::Any defaultValue; - ASSERT_NO_THROW(defaultValue = ie.get_config(deviceName, confKey)); + OV_ASSERT_NO_THROW(defaultValue = ie.get_property(deviceName, confKey)); ASSERT_FALSE(defaultValue.empty()); } } @@ -758,7 +677,7 @@ TEST_P(OVClassGetAvailableDevices, GetAvailableDevicesNoThrow) { ov::Core ie = createCoreWithTemplate(); std::vector devices; - ASSERT_NO_THROW(devices = ie.get_available_devices()); + OV_ASSERT_NO_THROW(devices = ie.get_available_devices()); bool deviceFound = false; std::cout << "Available devices: " << std::endl; @@ -777,17 +696,16 @@ TEST_P(OVClassGetAvailableDevices, GetAvailableDevicesNoThrow) { // // QueryNetwork with HETERO on particular device // - TEST_P(OVClassQueryNetworkTest, QueryNetworkHETEROWithDeviceIDNoThrow) { ov::Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { - auto deviceIDs = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); + auto deviceIDs = ie.get_property(deviceName, ov::available_devices); if (deviceIDs.empty()) GTEST_SKIP(); - ASSERT_NO_THROW(ie.query_model(actualNetwork, - CommonTestUtils::DEVICE_HETERO, - {{"TARGET_FALLBACK", deviceName + "." + deviceIDs[0] + "," + deviceName}})); + OV_ASSERT_NO_THROW(ie.query_model(actualNetwork, + CommonTestUtils::DEVICE_HETERO, + ov::target_fallback(deviceName + "." + deviceIDs[0], deviceName))); } else { GTEST_SKIP(); } @@ -834,7 +752,7 @@ TEST_P(OVClassQueryNetworkTest, QueryNetworkHETEROWithBigDeviceIDThrows) { if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.query_model(actualNetwork, CommonTestUtils::DEVICE_HETERO, - {{"TARGET_FALLBACK", deviceName + ".100," + deviceName}}), + ov::target_fallback(deviceName + ".100", deviceName)), ov::Exception); } else { GTEST_SKIP(); @@ -849,17 +767,26 @@ using OVClassNetworkTestP = OVClassBaseTestP; TEST_P(OVClassNetworkTestP, LoadNetworkActualNoThrow) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.compile_model(actualNetwork, deviceName)); + OV_ASSERT_NO_THROW(ie.compile_model(actualNetwork, deviceName)); } TEST_P(OVClassNetworkTestP, LoadNetworkActualHeteroDeviceNoThrow) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.compile_model(actualNetwork, CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName)); + OV_ASSERT_NO_THROW(ie.compile_model(actualNetwork, CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName)); } TEST_P(OVClassNetworkTestP, LoadNetworkActualHeteroDevice2NoThrow) { ov::Core ie = createCoreWithTemplate(); - ASSERT_NO_THROW(ie.compile_model(actualNetwork, CommonTestUtils::DEVICE_HETERO, {{"TARGET_FALLBACK", deviceName}})); + OV_ASSERT_NO_THROW(ie.compile_model(actualNetwork, CommonTestUtils::DEVICE_HETERO, ov::target_fallback(deviceName))); +} + +TEST_P(OVClassNetworkTestP, LoadNetworkActualHeteroDeviceUsingDevicePropertiesNoThrow) { + ov::Core ie = createCoreWithTemplate(); + OV_ASSERT_NO_THROW(ie.compile_model(actualNetwork, + CommonTestUtils::DEVICE_HETERO, + ov::target_fallback(deviceName), + ov::device::properties(deviceName, + ov::enable_profiling(true)))); } TEST_P(OVClassNetworkTestP, LoadNetworkCreateDefaultExecGraphResult) { @@ -902,7 +829,7 @@ TEST_P(OVClassSeveralDevicesTestLoadNetwork, LoadNetworkActualSeveralDevicesNoTh if (!supportsDeviceID(ie, clearDeviceName) || !supportsAvaliableDevices(ie, clearDeviceName)) { GTEST_SKIP(); } - std::vector deviceIDs = ie.get_metric(clearDeviceName, METRIC_KEY(AVAILABLE_DEVICES)); + auto deviceIDs = ie.get_property(clearDeviceName, ov::available_devices); if (deviceIDs.size() < deviceNames.size()) GTEST_SKIP(); @@ -913,7 +840,7 @@ TEST_P(OVClassSeveralDevicesTestLoadNetwork, LoadNetworkActualSeveralDevicesNoTh multiDeviceName += ","; } } - ASSERT_NO_THROW(ie.compile_model(actualNetwork, multiDeviceName)); + OV_ASSERT_NO_THROW(ie.compile_model(actualNetwork, multiDeviceName)); } // @@ -923,12 +850,12 @@ TEST_P(OVClassLoadNetworkTest, LoadNetworkHETEROWithDeviceIDNoThrow) { ov::Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { - auto deviceIDs = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); + auto deviceIDs = ie.get_property(deviceName, ov::available_devices); if (deviceIDs.empty()) GTEST_SKIP(); std::string heteroDevice = CommonTestUtils::DEVICE_HETERO + std::string(":") + deviceName + "." + deviceIDs[0] + "," + deviceName; - ASSERT_NO_THROW(ie.compile_model(actualNetwork, heteroDevice)); + OV_ASSERT_NO_THROW(ie.compile_model(actualNetwork, heteroDevice)); } else { GTEST_SKIP(); } @@ -938,10 +865,10 @@ TEST_P(OVClassLoadNetworkTest, LoadNetworkWithDeviceIDNoThrow) { ov::Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName)) { - auto deviceIDs = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); + auto deviceIDs = ie.get_property(deviceName, ov::available_devices); if (deviceIDs.empty()) GTEST_SKIP(); - ASSERT_NO_THROW(ie.compile_model(simpleNetwork, deviceName + "." + deviceIDs[0])); + OV_ASSERT_NO_THROW(ie.compile_model(simpleNetwork, deviceName + "." + deviceIDs[0])); } else { GTEST_SKIP(); } @@ -973,7 +900,7 @@ TEST_P(OVClassLoadNetworkTest, LoadNetworkHETEROWithBigDeviceIDThrows) { if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.compile_model(actualNetwork, "HETERO", - {{"TARGET_FALLBACK", deviceName + ".100," + CommonTestUtils::DEVICE_CPU}}), + ov::target_fallback(deviceName + ".100", CommonTestUtils::DEVICE_CPU)), ov::Exception); } else { GTEST_SKIP(); @@ -986,8 +913,8 @@ TEST_P(OVClassLoadNetworkTest, LoadNetworkHETEROAndDeviceIDThrows) { if (supportsDeviceID(ie, deviceName)) { ASSERT_THROW(ie.compile_model(actualNetwork, CommonTestUtils::DEVICE_HETERO, - {{"TARGET_FALLBACK", deviceName + "," + CommonTestUtils::DEVICE_CPU}, - {CONFIG_KEY(DEVICE_ID), "110"}}), + ov::target_fallback(deviceName, CommonTestUtils::DEVICE_CPU), + ov::device::id("110")), ov::Exception); } else { GTEST_SKIP(); @@ -1002,18 +929,20 @@ TEST_P(OVClassLoadNetworkTest, LoadNetworkHETEROwithMULTINoThrow) { ov::Core ie = createCoreWithTemplate(); if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { std::string devices; - auto availableDevices = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); + auto availableDevices = ie.get_property(deviceName, ov::available_devices); for (auto&& device : availableDevices) { devices += deviceName + '.' + device; if (&device != &(availableDevices.back())) { devices += ','; } } - std::string targetFallback(CommonTestUtils::DEVICE_MULTI + std::string(",") + deviceName); - ASSERT_NO_THROW( - ie.compile_model(actualNetwork, - CommonTestUtils::DEVICE_HETERO, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, {"TARGET_FALLBACK", targetFallback}})); + OV_ASSERT_NO_THROW( + ie.compile_model(actualNetwork, + CommonTestUtils::DEVICE_HETERO, + ov::device::properties(CommonTestUtils::DEVICE_MULTI, + ov::device::priorities(devices)), + ov::device::properties(CommonTestUtils::DEVICE_HETERO, + ov::target_fallback(CommonTestUtils::DEVICE_MULTI, deviceName)))); } else { GTEST_SKIP(); } @@ -1024,17 +953,18 @@ TEST_P(OVClassLoadNetworkTest, LoadNetworkMULTIwithHETERONoThrow) { if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { std::string devices; - auto availableDevices = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); + auto availableDevices = ie.get_property(deviceName, ov::available_devices); for (auto&& device : availableDevices) { devices += CommonTestUtils::DEVICE_HETERO + std::string(".") + device; if (&device != &(availableDevices.back())) { devices += ','; } } - ASSERT_NO_THROW(ie.compile_model( - actualNetwork, - CommonTestUtils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, {"TARGET_FALLBACK", deviceName + "," + deviceName}})); + OV_ASSERT_NO_THROW(ie.compile_model( + actualNetwork, + CommonTestUtils::DEVICE_MULTI, + ov::device::properties(CommonTestUtils::DEVICE_MULTI, ov::device::priorities(devices)), + ov::device::properties(CommonTestUtils::DEVICE_HETERO, ov::target_fallback(deviceName, deviceName)))); } else { GTEST_SKIP(); } @@ -1049,7 +979,7 @@ TEST_P(OVClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) { if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { std::string devices; - auto availableDevices = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); + auto availableDevices = ie.get_property(deviceName, ov::available_devices); for (auto&& device : availableDevices) { devices += deviceName + '.' + device; if (&device != &(availableDevices.back())) { @@ -1063,11 +993,15 @@ TEST_P(OVClassLoadNetworkTest, QueryNetworkHETEROWithMULTINoThrow_V10) { expectedLayers.emplace(node->get_friendly_name()); } ov::SupportedOpsMap result; - std::string targetFallback(CommonTestUtils::DEVICE_MULTI + std::string(",") + deviceName); - ASSERT_NO_THROW(result = ie.query_model( - multinputNetwork, - CommonTestUtils::DEVICE_HETERO, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, {"TARGET_FALLBACK", targetFallback}})); + std::string hetero_device_priorities(CommonTestUtils::DEVICE_MULTI + std::string(",") + deviceName); + OV_ASSERT_NO_THROW(result = ie.query_model( + multinputNetwork, + CommonTestUtils::DEVICE_HETERO, + ov::device::properties(CommonTestUtils::DEVICE_MULTI, + ov::device::priorities(devices)), + ov::device::properties(CommonTestUtils::DEVICE_HETERO, + ov::target_fallback(CommonTestUtils::DEVICE_MULTI, + deviceName)))); std::unordered_set actualLayers; for (auto&& layer : result) { @@ -1084,7 +1018,7 @@ TEST_P(OVClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { if (supportsDeviceID(ie, deviceName) && supportsAvaliableDevices(ie, deviceName)) { std::string devices; - auto availableDevices = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)).as>(); + auto availableDevices = ie.get_property(deviceName, ov::available_devices); for (auto&& device : availableDevices) { devices += "HETERO." + device; if (&device != &(availableDevices.back())) { @@ -1098,10 +1032,12 @@ TEST_P(OVClassLoadNetworkTest, QueryNetworkMULTIWithHETERONoThrow_V10) { expectedLayers.emplace(node->get_friendly_name()); } ov::SupportedOpsMap result; - ASSERT_NO_THROW(result = ie.query_model(multinputNetwork, - CommonTestUtils::DEVICE_MULTI, - {{MULTI_CONFIG_KEY(DEVICE_PRIORITIES), devices}, - {"TARGET_FALLBACK", deviceName + "," + deviceName}})); + OV_ASSERT_NO_THROW(result = ie.query_model(multinputNetwork, + CommonTestUtils::DEVICE_MULTI, + ov::device::properties(CommonTestUtils::DEVICE_MULTI, + ov::device::priorities(devices)), + ov::device::properties(CommonTestUtils::DEVICE_HETERO, + ov::target_fallback(deviceName, deviceName)))); std::unordered_set actualLayers; for (auto&& layer : result) { @@ -1121,11 +1057,11 @@ TEST_P(OVClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins CommonTestUtils::DEVICE_CPU); ASSERT_EQ(3, versions.size()); } - std::map config; + ov::AnyMap config; if (deviceName == CommonTestUtils::DEVICE_CPU) { - config.insert({"CPU_THREADS_NUM", "3"}); + config.insert(ov::enable_profiling(true)); } - // ASSERT_NO_THROW({ + // OV_ASSERT_NO_THROW({ // ov::Core ie = createCoreWithTemplate(); // std::string name = actualNetwork.getInputsInfo().begin()->first; // actualNetwork.getInputsInfo().at(name)->setPrecision(Precision::U8); @@ -1136,32 +1072,30 @@ TEST_P(OVClassLoadNetworkAfterCoreRecreateTest, LoadAfterRecreateCoresAndPlugins TEST_P(OVClassSetDefaultDeviceIDTest, SetDefaultDeviceIDNoThrow) { ov::Core ie = createCoreWithTemplate(); - std::vector deviceIDs = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)); + auto deviceIDs = ie.get_property(deviceName, ov::available_devices); if (std::find(deviceIDs.begin(), deviceIDs.end(), deviceID) == deviceIDs.end()) { GTEST_SKIP(); } std::string value; - ASSERT_NO_THROW(ie.set_config({{ InferenceEngine::PluginConfigParams::KEY_DEVICE_ID, deviceID }, - { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES }}, - deviceName)); - ASSERT_NO_THROW(value = ie.get_config(deviceName, InferenceEngine::PluginConfigParams::KEY_PERF_COUNT).as()); - ASSERT_EQ(value, InferenceEngine::PluginConfigParams::YES); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::device::id(deviceID), ov::enable_profiling(true))); + ASSERT_TRUE(ie.get_property(deviceName, ov::enable_profiling)); + OV_ASSERT_NO_THROW(value = ie.get_property(deviceName, ov::enable_profiling.name()).as()); + ASSERT_EQ(value, "YES"); } TEST_P(OVClassSetGlobalConfigTest, SetGlobalConfigNoThrow) { ov::Core ie = createCoreWithTemplate(); - std::vector deviceIDs = ie.get_metric(deviceName, METRIC_KEY(AVAILABLE_DEVICES)); + auto deviceIDs = ie.get_property(deviceName, ov::available_devices); ov::Any ref, src; for (auto& dev_id : deviceIDs) { - ASSERT_NO_THROW(ie.set_config({{ InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::NO }}, - deviceName + "." + dev_id)); + OV_ASSERT_NO_THROW(ie.set_property(deviceName + "." + dev_id, ov::enable_profiling(false))); } - ASSERT_NO_THROW(ie.set_config({{ InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES }}, deviceName)); - ASSERT_NO_THROW(ref = ie.get_config(deviceName, InferenceEngine::PluginConfigParams::KEY_PERF_COUNT)); + OV_ASSERT_NO_THROW(ie.set_property(deviceName, ov::enable_profiling(true))); + OV_ASSERT_NO_THROW(ref = ie.get_property(deviceName, ov::enable_profiling.name())); for (auto& dev_id : deviceIDs) { - ASSERT_NO_THROW(src = ie.get_config(deviceName + "." + dev_id, InferenceEngine::PluginConfigParams::KEY_PERF_COUNT)); + OV_ASSERT_NO_THROW(src = ie.get_property(deviceName + "." + dev_id, ov::enable_profiling.name())); ASSERT_EQ(src, ref); } } @@ -1177,17 +1111,17 @@ TEST_P(OVClassSeveralDevicesTestDefaultCore, DefaultCoreSeveralDevicesNoThrow) { if (!supportsDeviceID(ie, clearDeviceName) || !supportsAvaliableDevices(ie, clearDeviceName)) { GTEST_SKIP(); } - std::vector deviceIDs = ie.get_metric(clearDeviceName, METRIC_KEY(AVAILABLE_DEVICES)); + auto deviceIDs = ie.get_property(clearDeviceName, ov::available_devices); if (deviceIDs.size() < deviceNames.size()) GTEST_SKIP(); for (size_t i = 0; i < deviceNames.size(); ++i) { - ASSERT_NO_THROW(ie.set_config({{ InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS, std::to_string(i + 2) }}, deviceNames[i])); + OV_ASSERT_NO_THROW(ie.set_property(deviceNames[i], ov::enable_profiling(true))); } - std::string res; + bool res; for (size_t i = 0; i < deviceNames.size(); ++i) { - ASSERT_NO_THROW(res = ie.get_config(deviceNames[i], InferenceEngine::PluginConfigParams::KEY_GPU_THROUGHPUT_STREAMS).as()); - ASSERT_EQ(res, std::to_string(i + 2)); + OV_ASSERT_NO_THROW(res = ie.get_property(deviceNames[i], ov::enable_profiling)); + ASSERT_TRUE(res); } } } // namespace behavior diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/remote.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/remote.hpp index ff4a3f03b0f..f16d4e164a0 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_plugin/remote.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_plugin/remote.hpp @@ -6,7 +6,6 @@ #include "common_test_utils/test_common.hpp" #include "openvino/runtime/core.hpp" -#include "openvino/runtime/parameter.hpp" #include "openvino/runtime/infer_request.hpp" #include "openvino/runtime/compiled_model.hpp" #include "openvino/op/parameter.hpp" @@ -18,8 +17,8 @@ namespace test { using RemoteTensorParams = std::tuple>; // remote context and tensor parameters + ov::AnyMap, // config + std::pair>; // remote context and tensor parameters class OVRemoteTest : public testing::WithParamInterface, public CommonTestUtils::TestsCommon { @@ -31,9 +30,9 @@ protected: element::Type element_type; std::string target_device; - runtime::ConfigMap config; - runtime::ParamMap context_parameters; - runtime::ParamMap tensor_parameters; + ov::AnyMap config; + ov::AnyMap context_parameters; + ov::AnyMap tensor_parameters; std::shared_ptr function; ov::Core core = *ov::test::utils::PluginCache::get().core(); ov::CompiledModel exec_network; diff --git a/src/tests/functional/plugin/shared/include/single_layer_tests/op_impl_check/op_impl_check.hpp b/src/tests/functional/plugin/shared/include/single_layer_tests/op_impl_check/op_impl_check.hpp index 4c912c57112..7cacf4a11d2 100644 --- a/src/tests/functional/plugin/shared/include/single_layer_tests/op_impl_check/op_impl_check.hpp +++ b/src/tests/functional/plugin/shared/include/single_layer_tests/op_impl_check/op_impl_check.hpp @@ -19,7 +19,7 @@ namespace subgraph { using OpImplParams = std::tuple< std::pair>, // Function to check std::string, // Target Device - std::map>; // Plugin Config + ov::AnyMap>; // Plugin Config class OpImplCheckTest : public testing::WithParamInterface, public CommonTestUtils::TestsCommon { @@ -28,7 +28,7 @@ protected: std::shared_ptr core = ov::test::utils::PluginCache::get().core(); std::shared_ptr function; std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; public: void run(); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp index e1234aa8f7d..f1d827f7cd8 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp @@ -36,7 +36,7 @@ void OVInferRequestBatchedTests::SetUp() { void OVInferRequestBatchedTests::TearDown() { if (m_need_reset_core) { - ie->set_config({{CONFIG_KEY(CACHE_DIR), {}}}); + ie->set_property({{CONFIG_KEY(CACHE_DIR), {}}}); ie.reset(); PluginCache::get().reset(); CommonTestUtils::removeFilesWithExt(m_cache_dir, "blob"); @@ -178,7 +178,7 @@ TEST_P(OVInferRequestBatchedTests, SetInputTensorsBase_Caching) { auto batch_shape = Shape{batch, 2, 2, 2}; auto one_shape_size = ov::shape_size(one_shape); auto model = OVInferRequestBatchedTests::create_n_inputs(1, element::f32, batch_shape, "N..."); - ie->set_config({{CONFIG_KEY(CACHE_DIR), m_cache_dir}}); + ie->set_property({{CONFIG_KEY(CACHE_DIR), m_cache_dir}}); auto execNet_no_cache = ie->compile_model(model, targetDevice); auto execNet_cache = ie->compile_model(model, targetDevice); // Allocate 8 chunks, set 'user tensors' to 0, 2, 4, 6 chunks @@ -480,7 +480,7 @@ TEST_P(OVInferRequestBatchedTests, SetInputTensors_Cache_CheckDeepCopy) { std::vector buffer(ov::shape_size(batch_shape), 1); std::vector buffer_out(ov::shape_size(batch_shape), 1); auto model = OVInferRequestBatchedTests::create_n_inputs(2, element::f32, batch_shape, "NCHW"); - ie->set_config({{CONFIG_KEY(CACHE_DIR), m_cache_dir}}); + ie->set_property({{CONFIG_KEY(CACHE_DIR), m_cache_dir}}); auto execNet_no_cache = ie->compile_model(model, targetDevice); auto execNet = ie->compile_model(model, targetDevice); ov::InferRequest req; diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp index 4c5e7164c7d..0910760614a 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -36,7 +36,7 @@ std::string OVInferRequestDynamicTests::getTestCaseName(testing::TestParamInfo func; std::vector, std::vector>> inOutShapes; std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::tie(func, inOutShapes, targetDevice, configuration) = obj.param; std::ostringstream result; result << "function=" << func->get_friendly_name() << "_"; @@ -48,7 +48,9 @@ std::string OVInferRequestDynamicTests::getTestCaseName(testing::TestParamInfo& obj) { element::Type type; std::string targetDevice; - std::map configuration; + ov::AnyMap configuration; std::tie(type, targetDevice, configuration) = obj.param; std::ostringstream result; result << "type=" << type << "_"; @@ -190,7 +190,9 @@ std::string OVInferRequestIOTensorSetPrecisionTest::getTestCaseName(const testin if (!configuration.empty()) { using namespace CommonTestUtils; for (auto &configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); + result << "_"; } } return result.str(); @@ -234,7 +236,7 @@ TEST_P(OVInferRequestIOTensorSetPrecisionTest, CanSetOutBlobWithDifferentPrecisi std::string OVInferRequestCheckTensorPrecision::getTestCaseName(const testing::TestParamInfo& obj) { element::Type type; std::string targetDevice; - std::map configuration; + AnyMap configuration; std::tie(type, targetDevice, configuration) = obj.param; std::ostringstream result; result << "type=" << type << "_"; @@ -242,7 +244,9 @@ std::string OVInferRequestCheckTensorPrecision::getTestCaseName(const testing::T if (!configuration.empty()) { using namespace CommonTestUtils; for (auto &configItem : configuration) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); + result << "_"; } } return result.str(); diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp index 8be24cf4bdc..b41cb539d18 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/wait.cpp @@ -57,8 +57,8 @@ TEST_P(OVInferRequestWaitTests, throwExceptionOnSetTensorAfterAsyncInfer) { auto&& config = configuration; auto itConfig = config.find(CONFIG_KEY(CPU_THROUGHPUT_STREAMS)); if (itConfig != config.end()) { - if (itConfig->second != "CPU_THROUGHPUT_AUTO") { - if (std::stoi(itConfig->second) == 0) { + if (itConfig->second.as() != "CPU_THROUGHPUT_AUTO") { + if (std::stoi(itConfig->second.as()) == 0) { GTEST_SKIP() << "Not applicable with disabled streams"; } } diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp index 6fceaee9a75..9639539e408 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/life_time.cpp @@ -87,7 +87,7 @@ TEST_P(OVHoldersTest, LoadedAny) { { ov::Core core = createCoreWithTemplate(); auto compiled_model = core.compile_model(function, targetDevice); - any = compiled_model.get_metric(METRIC_KEY(SUPPORTED_METRICS)); + any = compiled_model.get_property(METRIC_KEY(SUPPORTED_METRICS)); } } diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp index d0d5c364b1a..3c82da66b71 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/remote.cpp @@ -13,17 +13,19 @@ namespace test { std::string OVRemoteTest::getTestCaseName(testing::TestParamInfo obj) { ov::element::Type element_type; std::string target_device; - runtime::ConfigMap config; - std::pair param_pair; + ov::AnyMap config; + std::pair param_pair; std::tie(element_type, target_device, config, param_pair) = obj.param; - runtime::ParamMap context_parameters; - runtime::ParamMap tensor_parameters; + ov::AnyMap context_parameters; + ov::AnyMap tensor_parameters; std::tie(context_parameters, tensor_parameters) = param_pair; std::ostringstream result; result << "element_type=" << element_type; result << "targetDevice=" << target_device; for (auto& configItem : config) { - result << "configItem=" << configItem.first << "_" << configItem.second << "_"; + result << "configItem=" << configItem.first << "_"; + configItem.second.print(result); + result << "_"; } result << "__context_parameters="; for (auto& param : context_parameters) { @@ -40,7 +42,7 @@ std::string OVRemoteTest::getTestCaseName(testing::TestParamInfo param_pair; + std::pair param_pair; std::tie(element_type, target_device, config, param_pair) = GetParam(); std::tie(context_parameters, tensor_parameters) = param_pair; function = ngraph::builder::subgraph::makeConvPoolRelu({1, 1, 32, 32}, element_type); @@ -59,7 +61,7 @@ TEST_P(OVRemoteTest, canCreateRemote) { ? core.get_default_context(target_device) : core.create_context(target_device, context_parameters); - runtime::ParamMap params; + ov::AnyMap params; std::string device; ASSERT_NO_THROW(params = context.get_params()); diff --git a/src/tests/functional/plugin/shared/src/main.cpp b/src/tests/functional/plugin/shared/src/main.cpp index 529cbc78487..5fef518e2a8 100644 --- a/src/tests/functional/plugin/shared/src/main.cpp +++ b/src/tests/functional/plugin/shared/src/main.cpp @@ -4,6 +4,7 @@ #include "gtest/gtest.h" +#include "common_test_utils/ov_common_utils.hpp" #include "functional_test_utils/layer_test_utils/environment.hpp" #include "functional_test_utils/layer_test_utils/summary.hpp" #include "functional_test_utils/skip_tests_config.hpp" diff --git a/src/tests/functional/plugin/shared/src/single_layer_tests/op_impl_check/om_impl_check.cpp b/src/tests/functional/plugin/shared/src/single_layer_tests/op_impl_check/om_impl_check.cpp index 17b7f912d26..cef5aee662a 100644 --- a/src/tests/functional/plugin/shared/src/single_layer_tests/op_impl_check/om_impl_check.cpp +++ b/src/tests/functional/plugin/shared/src/single_layer_tests/op_impl_check/om_impl_check.cpp @@ -43,7 +43,7 @@ void OpImplCheckTest::SetUp() { std::string OpImplCheckTest::getTestCaseName(const testing::TestParamInfo &obj) { std::pair> funcInfo; std::string targetDevice; - std::map config; + ov::AnyMap config; std::tie(funcInfo, targetDevice, config) = obj.param; std::ostringstream result; @@ -52,7 +52,9 @@ std::string OpImplCheckTest::getTestCaseName(const testing::TestParamInfo>; std::ostream& operator <<(std::ostream& os, const InputShape& inputShape); using ElementType = ov::element::Type_t; -using Config = std::map; +using Config = ov::AnyMap; using TargetDevice = std::string; class SubgraphBaseTest : public CommonTestUtils::TestsCommon { @@ -47,7 +47,7 @@ protected: std::shared_ptr core = ov::test::utils::PluginCache::get().core(); std::string targetDevice; - Config configuration; + ov::AnyMap configuration; std::shared_ptr function, functionRefs = nullptr; std::map, ov::Tensor> inputs; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/read_ir.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/read_ir.hpp index 615d48920ed..f2c7ea1903b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/read_ir.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/read_ir/read_ir.hpp @@ -13,7 +13,7 @@ namespace subgraph { using ReadIRParams = std::tuple< std::string, // IR path std::string, // Target Device - std::map>; // Plugin Config + ov::AnyMap>; // Plugin Config class ReadIRTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp index ea9f73f11f2..1603123d3b6 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_layer/eltwise.hpp @@ -20,7 +20,7 @@ typedef std::tuple< ElementType, // In precision ElementType, // Out precision TargetDevice, // Device name - Config // Additional network configuration + ov::AnyMap // Additional network configuration > EltwiseTestParams; class EltwiseLayerTest : public testing::WithParamInterface, diff --git a/src/tests/functional/shared_test_classes/src/read_ir/read_ir.cpp b/src/tests/functional/shared_test_classes/src/read_ir/read_ir.cpp index b121ee6900e..129f5bf6096 100644 --- a/src/tests/functional/shared_test_classes/src/read_ir/read_ir.cpp +++ b/src/tests/functional/shared_test_classes/src/read_ir/read_ir.cpp @@ -23,7 +23,7 @@ namespace subgraph { std::string ReadIRTest::getTestCaseName(const testing::TestParamInfo &obj) { using namespace CommonTestUtils; std::string pathToModel, deviceName; - std::map config; + ov::AnyMap config; std::tie(pathToModel, deviceName, config) = obj.param; std::ostringstream result; @@ -36,7 +36,8 @@ std::string ReadIRTest::getTestCaseName(const testing::TestParamInfofirst << "=" << configItem->second; + result << configItem->first << "="; + configItem->second.print(result); if (++configItem != config.end()) { result << "_"; } diff --git a/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp b/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp index 2385fa7669a..cc39b863c1a 100644 --- a/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp +++ b/src/tests/functional/shared_test_classes/src/single_layer/eltwise.cpp @@ -19,7 +19,7 @@ std::string EltwiseLayerTest::getTestCaseName(const testing::TestParamInfo additional_config; + ov::AnyMap additional_config; std::tie(shapes, eltwiseOpType, secondaryInputType, opType, netType, inType, outType, targetName, additional_config) = obj.param; std::ostringstream results; @@ -41,7 +41,8 @@ std::string EltwiseLayerTest::getTestCaseName(const testing::TestParamInfo #include #include +#include namespace ov { -namespace test { static void PrintTo(const Any& any, std::ostream* os) { any.print(*os); } -} // namespace test } // namespace ov diff --git a/src/tests/ie_test_utils/common_test_utils/test_common.hpp b/src/tests/ie_test_utils/common_test_utils/test_common.hpp index cd70976b7e2..e006d8b4cf2 100644 --- a/src/tests/ie_test_utils/common_test_utils/test_common.hpp +++ b/src/tests/ie_test_utils/common_test_utils/test_common.hpp @@ -8,6 +8,7 @@ #include #include "test_assertions.hpp" +#include "ov_common_utils.hpp" namespace CommonTestUtils { diff --git a/src/tests/ie_test_utils/functional_test_utils/src/ov_plugin_cache.cpp b/src/tests/ie_test_utils/functional_test_utils/src/ov_plugin_cache.cpp index 74da7cd9b45..04bc6fb9378 100644 --- a/src/tests/ie_test_utils/functional_test_utils/src/ov_plugin_cache.cpp +++ b/src/tests/ie_test_utils/functional_test_utils/src/ov_plugin_cache.cpp @@ -61,11 +61,11 @@ std::shared_ptr PluginCache::core(const std::string &deviceToCheck) { } if (!deviceToCheck.empty()) { - std::vector metrics = ov_core->get_metric(deviceToCheck, METRIC_KEY(SUPPORTED_METRICS)); + std::vector metrics = ov_core->get_property(deviceToCheck, METRIC_KEY(SUPPORTED_METRICS)); if (std::find(metrics.begin(), metrics.end(), METRIC_KEY(AVAILABLE_DEVICES)) != metrics.end()) { std::vector availableDevices = - ov_core->get_metric(deviceToCheck, METRIC_KEY(AVAILABLE_DEVICES)); + ov_core->get_property(deviceToCheck, METRIC_KEY(AVAILABLE_DEVICES)); if (availableDevices.empty()) { std::cerr << "No available devices for " << deviceToCheck << std::endl; diff --git a/src/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp b/src/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp index f6eda9ebe2a..639eec709bb 100644 --- a/src/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp +++ b/src/tests/ie_test_utils/unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp @@ -32,10 +32,11 @@ public: MOCK_CONST_METHOD3(QueryNetwork, InferenceEngine::QueryNetworkResult( const InferenceEngine::CNNNetwork&, const std::string&, const std::map&)); - MOCK_CONST_METHOD3(GetMetric, InferenceEngine::Parameter(const std::string&, const std::string&, const std::map&)); - MOCK_CONST_METHOD2(GetConfig, InferenceEngine::Parameter(const std::string&, const std::string&)); + MOCK_CONST_METHOD3(GetMetric, ov::Any(const std::string&, const std::string&, const ov::AnyMap&)); + MOCK_CONST_METHOD2(GetConfig, ov::Any(const std::string&, const std::string&)); MOCK_CONST_METHOD0(GetAvailableDevices, std::vector()); MOCK_CONST_METHOD1(DeviceSupportsImportExport, bool(const std::string&)); // NOLINT not a cast to bool + MOCK_METHOD2(GetSupportedConfig, std::map(const std::string&, const std::map&)); MOCK_CONST_METHOD0(isNewAPI, bool()); ~MockICore() = default; diff --git a/tools/compile_tool/main.cpp b/tools/compile_tool/main.cpp index 4424559cef2..4dc199cc2db 100644 --- a/tools/compile_tool/main.cpp +++ b/tools/compile_tool/main.cpp @@ -781,7 +781,7 @@ int main(int argc, char* argv[]) { } else { ov::Core core; if (!FLAGS_log_level.empty()) { - core.set_config({{CONFIG_KEY(LOG_LEVEL), FLAGS_log_level}}, FLAGS_d); + core.set_property(FLAGS_d, {{CONFIG_KEY(LOG_LEVEL), FLAGS_log_level}}); } auto model = core.read_model(FLAGS_m); @@ -789,7 +789,8 @@ int main(int argc, char* argv[]) { configurePrePostProcessing(model, FLAGS_ip, FLAGS_op, FLAGS_iop, FLAGS_il, FLAGS_ol, FLAGS_iol, FLAGS_iml, FLAGS_oml, FLAGS_ioml); printInputAndOutputsInfoShort(*model); auto timeBeforeLoadNetwork = std::chrono::steady_clock::now(); - auto compiledModel = core.compile_model(model, FLAGS_d, configure()); + auto configs = configure(); + auto compiledModel = core.compile_model(model, FLAGS_d, {configs.begin(), configs.end()}); loadNetworkTimeElapsed = std::chrono::duration_cast(std::chrono::steady_clock::now() - timeBeforeLoadNetwork); std::string outputName = FLAGS_o; if (outputName.empty()) {