Port tests/ to API2.0 (#21476)

* Port properties in time and stress tests to API2.0

* Port CC tests infer_tool.py to API2.0

* Update tests/conditional_compilation/tools/infer_tool.py

Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>

---------

Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
This commit is contained in:
Vitaliy Urusovskij 2023-12-06 14:36:43 +04:00 committed by GitHub
parent 6d3fbf4237
commit e4c38e3afd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 18 additions and 19 deletions

View File

@ -14,20 +14,20 @@ import sys
from pathlib import Path
import numpy as np
from openvino.inference_engine import IECore
from openvino import Core
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
def input_preparation(net):
def input_preparation(model):
"""
Function to prepare reproducible from run to run input data
:param net: IENetwork object
:param model: OpenVINO Model object
:return: Dict where keys are layers' names and values are numpy arrays with layers' shapes
"""
feed_dict = {}
for layer_name, layer_data in net.input_info.items():
for layer_name, layer_data in model.input_info.items():
feed_dict.update({layer_name: np.ones(shape=layer_data.input_data.shape)})
return feed_dict
@ -40,17 +40,16 @@ def infer(ir_path, device):
:return: Dict containing out blob name and out data
"""
bin_path = os.path.splitext(ir_path)[0] + '.bin'
ie = IECore()
net = ie.read_network(model=ir_path, weights=bin_path)
exec_net = ie.load_network(net, device)
res = exec_net.infer(inputs=input_preparation(net))
core = Core()
model = core.read_model(ir_path)
compiled_model = core.compile_model(model, device)
res = compiled_model(input_preparation(model))
del net
# It's important to delete executable network first to avoid double free in plugin offloading.
del model
# It's important to delete compiled model first to avoid double free in plugin offloading.
# Issue relates ony for hetero and Myriad plugins
del exec_net
del ie
del compiled_model
del core
return res

View File

@ -123,7 +123,7 @@ inference_with_streams(const std::string &model, const std::string &target_devic
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(target_device);
try {
nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
nireq = ie_api_wrapper->get_property(ov::optimal_number_of_infer_requests.name());
} catch (const std::exception &ex) {
log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS");
}

View File

@ -114,7 +114,7 @@ TEST_P(MemCheckTestSuite, inference_with_streams) {
ie_api_wrapper->read_network(model);
ie_api_wrapper->load_network(device);
try {
nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS));
nireq = ie_api_wrapper->get_property(ov::optimal_number_of_infer_requests.name());
} catch (const std::exception &ex) {
log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS");
}

View File

@ -41,14 +41,14 @@ void setPerformanceConfig(InferenceEngine::Core ie, const std::string &device) {
* @brief Function that enables Latency performance hint for specified device (OV API 2)
*/
void setPerformanceConfig(ov::Core ie, const std::string &device) {
auto supported_config_keys = ie.get_property(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as<std::vector<std::string>>();
auto supported_config_keys = ie.get_property(device, ov::supported_properties);
if (std::find(supported_config_keys.begin(), supported_config_keys.end(), "PERFORMANCE_HINT") ==
if (std::find(supported_config_keys.begin(), supported_config_keys.end(), ov::hint::performance_mode) ==
supported_config_keys.end()) {
std::cerr << "Device " << device << " doesn't support config key 'PERFORMANCE_HINT'!\n"
std::cerr << "Device " << device << " doesn't support " << ov::hint::performance_mode.name() << " property!\n"
<< "Performance config was not set.";
}
else
ie.set_property(device, {{CONFIG_KEY(PERFORMANCE_HINT), CONFIG_VALUE(LATENCY)}});
ie.set_property(device, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY));
}
}