Extend sync benchmark CLI parameters (#20844)

This commit is contained in:
Anatoliy Talamanov 2023-11-03 08:51:22 +00:00 committed by GitHub
parent 0effa37811
commit c20d52dc4f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 18 additions and 11 deletions

View File

@ -70,7 +70,7 @@ Running
.. code-block:: sh
sync_benchmark <path_to_model>
sync_benchmark <path_to_model> <device_name>(default: CPU)
To run the sample, you need to specify a model:

View File

@ -67,7 +67,7 @@ Running
.. code-block:: sh
python sync_benchmark.py <path_to_model>
python sync_benchmark.py <path_to_model> <device_name>(default: CPU)
To run the sample, you need to specify a model:
@ -138,4 +138,4 @@ See Also
* :doc:`Model Downloader <omz_tools_downloader>`
* :doc:`Convert a Model <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>`
@endsphinxdirective
@endsphinxdirective

View File

@ -20,8 +20,12 @@ int main(int argc, char* argv[]) {
try {
slog::info << "OpenVINO:" << slog::endl;
slog::info << ov::get_openvino_version();
if (argc != 2) {
slog::info << "Usage : " << argv[0] << " <path_to_model>" << slog::endl;
std::string device_name = "CPU";
if (argc == 3) {
device_name = argv[2];
} else if (argc != 2) {
slog::info << "Usage : " << argv[0] << " <path_to_model> <device_name>(default: CPU)" << slog::endl;
return EXIT_FAILURE;
}
// Optimize for latency. Most of the devices are configured for latency by default,
@ -29,11 +33,11 @@ int main(int argc, char* argv[]) {
ov::AnyMap latency{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::LATENCY}};
// Create ov::Core and use it to compile a model.
// Pick a device by replacing CPU, for example AUTO:GPU,CPU.
// Select the device by providing the name as the second parameter to CLI.
// Using MULTI device is pointless in sync scenario
// because only one instance of ov::InferRequest is used
ov::Core core;
ov::CompiledModel compiled_model = core.compile_model(argv[1], "CPU", latency);
ov::CompiledModel compiled_model = core.compile_model(argv[1], device_name, latency);
ov::InferRequest ireq = compiled_model.create_infer_request();
// Fill input data for the ireq
for (const ov::Output<const ov::Node>& model_input : compiled_model.inputs()) {

View File

@ -30,19 +30,22 @@ def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
log.info('OpenVINO:')
log.info(f"{'Build ':.<39} {get_version()}")
if len(sys.argv) != 2:
log.info(f'Usage: {sys.argv[0]} <path_to_model>')
device_name = 'CPU'
if len(sys.argv) == 3:
device_name = sys.argv[2]
elif len(sys.argv) != 2:
log.info(f'Usage: {sys.argv[0]} <path_to_model> <device_name>(default: CPU)')
return 1
# Optimize for latency. Most of the devices are configured for latency by default,
# but there are exceptions like GNA
latency = {'PERFORMANCE_HINT': 'LATENCY'}
# Create Core and use it to compile a model.
# Pick a device by replacing CPU, for example AUTO:GPU,CPU.
# Select the device by providing the name as the second parameter to CLI.
# Using MULTI device is pointless in sync scenario
# because only one instance of openvino.runtime.InferRequest is used
core = ov.Core()
compiled_model = core.compile_model(sys.argv[1], 'CPU', latency)
compiled_model = core.compile_model(sys.argv[1], device_name, latency)
ireq = compiled_model.create_infer_request()
# Fill input data for the ireq
for model_input in compiled_model.inputs: