diff --git a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.md b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.md index 0f4fb60cf8b..582a2f0038e 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.md +++ b/docs/articles_en/learn_openvino/openvino_samples/cpp_sample_throughput_benchmark.md @@ -75,7 +75,7 @@ Running .. code-block:: sh - throughput_benchmark + throughput_benchmark (default: CPU) To run the sample, you need to specify a model: diff --git a/docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.md b/docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.md index a9b9bc86598..1cc9c025010 100644 --- a/docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.md +++ b/docs/articles_en/learn_openvino/openvino_samples/python_sample_throughput_benchmark.md @@ -72,7 +72,7 @@ Running .. code-block:: sh - python throughput_benchmark.py + python throughput_benchmark.py (default: CPU) To run the sample, you need to specify a model: diff --git a/samples/cpp/benchmark/throughput_benchmark/main.cpp b/samples/cpp/benchmark/throughput_benchmark/main.cpp index 885bd27713b..4961fdaccee 100644 --- a/samples/cpp/benchmark/throughput_benchmark/main.cpp +++ b/samples/cpp/benchmark/throughput_benchmark/main.cpp @@ -22,8 +22,12 @@ int main(int argc, char* argv[]) { try { slog::info << "OpenVINO:" << slog::endl; slog::info << ov::get_openvino_version(); - if (argc != 2) { - slog::info << "Usage : " << argv[0] << " " << slog::endl; + + std::string device_name = "CPU"; + if (argc == 3) { + device_name = argv[2]; + } else if (argc != 2) { + slog::info << "Usage : " << argv[0] << " (default: CPU)" << slog::endl; return EXIT_FAILURE; } // Optimize for throughput. Best throughput can be reached by @@ -31,10 +35,10 @@ int main(int argc, char* argv[]) { ov::AnyMap tput{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}}; // Create ov::Core and use it to compile a model. - // Pick a device by replacing CPU, for example MULTI:CPU(4),GPU(8). + // Select the device by providing the name as the second parameter to CLI. // It is possible to set CUMULATIVE_THROUGHPUT as ov::hint::PerformanceMode for AUTO device ov::Core core; - ov::CompiledModel compiled_model = core.compile_model(argv[1], "CPU", tput); + ov::CompiledModel compiled_model = core.compile_model(argv[1], device_name, tput); // Create optimal number of ov::InferRequest instances uint32_t nireq = compiled_model.get_property(ov::optimal_number_of_infer_requests); std::vector ireqs(nireq); diff --git a/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py b/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py index c934a765017..0573642fb2f 100755 --- a/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py +++ b/samples/python/benchmark/throughput_benchmark/throughput_benchmark.py @@ -30,18 +30,21 @@ def main(): log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout) log.info('OpenVINO:') log.info(f"{'Build ':.<39} {get_version()}") - if len(sys.argv) != 2: - log.info(f'Usage: {sys.argv[0]} ') + device_name = 'CPU' + if len(sys.argv) == 3: + device_name = sys.argv[2] + elif len(sys.argv) != 2: + log.info(f'Usage: {sys.argv[0]} (default: CPU)') return 1 # Optimize for throughput. Best throughput can be reached by # running multiple openvino.runtime.InferRequest instances asyncronously tput = {'PERFORMANCE_HINT': 'THROUGHPUT'} # Create Core and use it to compile a model. - # Pick a device by replacing CPU, for example MULTI:CPU(4),GPU(8). + # Select the device by providing the name as the second parameter to CLI. # It is possible to set CUMULATIVE_THROUGHPUT as PERFORMANCE_HINT for AUTO device core = ov.Core() - compiled_model = core.compile_model(sys.argv[1], 'CPU', tput) + compiled_model = core.compile_model(sys.argv[1], device_name, tput) # AsyncInferQueue creates optimal number of InferRequest instances ireqs = ov.AsyncInferQueue(compiled_model) # Fill input data for ireqs