diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.md index e4430f0331b..0adb885e6fa 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.md +++ b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_sync_benchmark.md @@ -70,7 +70,7 @@ Running .. code-block:: sh - sync_benchmark + sync_benchmark (default: CPU) To run the sample, you need to specify a model: diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.md index ecd142cffca..fdbd0fddb2f 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.md +++ b/docs/articles_en/learn_openvino/openvino_samples/python_sample_sync_benchmark.md @@ -67,7 +67,7 @@ Running .. code-block:: sh - python sync_benchmark.py + python sync_benchmark.py (default: CPU) To run the sample, you need to specify a model: @@ -138,4 +138,4 @@ See Also * :doc:`Model Downloader ` * :doc:`Convert a Model ` -@endsphinxdirective \ No newline at end of file +@endsphinxdirective diff --git a/samples/cpp/benchmark/sync_benchmark/main.cpp b/samples/cpp/benchmark/sync_benchmark/main.cpp index b2f09d9053d..c0bb1656cef 100644 --- a/samples/cpp/benchmark/sync_benchmark/main.cpp +++ b/samples/cpp/benchmark/sync_benchmark/main.cpp @@ -20,8 +20,12 @@ int main(int argc, char* argv[]) { try { slog::info << "OpenVINO:" << slog::endl; slog::info << ov::get_openvino_version(); - if (argc != 2) { - slog::info << "Usage : " << argv[0] << " " << slog::endl; + + std::string device_name = "CPU"; + if (argc == 3) { + device_name = argv[2]; + } else if (argc != 2) { + slog::info << "Usage : " << argv[0] << " (default: CPU)" << slog::endl; return EXIT_FAILURE; } // Optimize for latency. Most of the devices are configured for latency by default, @@ -29,11 +33,11 @@ int main(int argc, char* argv[]) { ov::AnyMap latency{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::LATENCY}}; // Create ov::Core and use it to compile a model. - // Pick a device by replacing CPU, for example AUTO:GPU,CPU. + // Select the device by providing the name as the second parameter to CLI. // Using MULTI device is pointless in sync scenario // because only one instance of ov::InferRequest is used ov::Core core; - ov::CompiledModel compiled_model = core.compile_model(argv[1], "CPU", latency); + ov::CompiledModel compiled_model = core.compile_model(argv[1], device_name, latency); ov::InferRequest ireq = compiled_model.create_infer_request(); // Fill input data for the ireq for (const ov::Output& model_input : compiled_model.inputs()) { diff --git a/samples/python/benchmark/sync_benchmark/sync_benchmark.py b/samples/python/benchmark/sync_benchmark/sync_benchmark.py index e270d25a64f..6aed1a489e1 100755 --- a/samples/python/benchmark/sync_benchmark/sync_benchmark.py +++ b/samples/python/benchmark/sync_benchmark/sync_benchmark.py @@ -30,19 +30,22 @@ def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) log.info('OpenVINO:') log.info(f"{'Build ':.<39} {get_version()}") - if len(sys.argv) != 2: - log.info(f'Usage: {sys.argv[0]} ') + device_name = 'CPU' + if len(sys.argv) == 3: + device_name = sys.argv[2] + elif len(sys.argv) != 2: + log.info(f'Usage: {sys.argv[0]} (default: CPU)') return 1 # Optimize for latency. Most of the devices are configured for latency by default, # but there are exceptions like GNA latency = {'PERFORMANCE_HINT': 'LATENCY'} # Create Core and use it to compile a model. - # Pick a device by replacing CPU, for example AUTO:GPU,CPU. + # Select the device by providing the name as the second parameter to CLI. # Using MULTI device is pointless in sync scenario # because only one instance of openvino.runtime.InferRequest is used core = ov.Core() - compiled_model = core.compile_model(sys.argv[1], 'CPU', latency) + compiled_model = core.compile_model(sys.argv[1], device_name, latency) ireq = compiled_model.create_infer_request() # Fill input data for the ireq for model_input in compiled_model.inputs: