Extend throughput benchmark with device CLI parameter (#20816)
* Extend throughput benchmark CLI parameters * Added device name as the second CLI parameter with default CPU value * Update samples/cpp/benchmark/throughput_benchmark/main.cpp Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com> * Fix comments to review * Modified python version * Modified documentation * Fix comments to review * Fixed the comment * Modified python doc * Fixed device name handling in python version * Update main.cpp * Update throughput_benchmark.py --------- Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
This commit is contained in:
parent
cf3d8435bd
commit
e6e7ee5e6f
@ -75,7 +75,7 @@ Running
|
|||||||
|
|
||||||
.. code-block:: sh
|
.. code-block:: sh
|
||||||
|
|
||||||
throughput_benchmark <path_to_model>
|
throughput_benchmark <path_to_model> <device_name>(default: CPU)
|
||||||
|
|
||||||
|
|
||||||
To run the sample, you need to specify a model:
|
To run the sample, you need to specify a model:
|
||||||
|
@ -72,7 +72,7 @@ Running
|
|||||||
|
|
||||||
.. code-block:: sh
|
.. code-block:: sh
|
||||||
|
|
||||||
python throughput_benchmark.py <path_to_model>
|
python throughput_benchmark.py <path_to_model> <device_name>(default: CPU)
|
||||||
|
|
||||||
|
|
||||||
To run the sample, you need to specify a model:
|
To run the sample, you need to specify a model:
|
||||||
|
@ -22,8 +22,12 @@ int main(int argc, char* argv[]) {
|
|||||||
try {
|
try {
|
||||||
slog::info << "OpenVINO:" << slog::endl;
|
slog::info << "OpenVINO:" << slog::endl;
|
||||||
slog::info << ov::get_openvino_version();
|
slog::info << ov::get_openvino_version();
|
||||||
if (argc != 2) {
|
|
||||||
slog::info << "Usage : " << argv[0] << " <path_to_model>" << slog::endl;
|
std::string device_name = "CPU";
|
||||||
|
if (argc == 3) {
|
||||||
|
device_name = argv[2];
|
||||||
|
} else if (argc != 2) {
|
||||||
|
slog::info << "Usage : " << argv[0] << " <path_to_model> <device_name>(default: CPU)" << slog::endl;
|
||||||
return EXIT_FAILURE;
|
return EXIT_FAILURE;
|
||||||
}
|
}
|
||||||
// Optimize for throughput. Best throughput can be reached by
|
// Optimize for throughput. Best throughput can be reached by
|
||||||
@ -31,10 +35,10 @@ int main(int argc, char* argv[]) {
|
|||||||
ov::AnyMap tput{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}};
|
ov::AnyMap tput{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::THROUGHPUT}};
|
||||||
|
|
||||||
// Create ov::Core and use it to compile a model.
|
// Create ov::Core and use it to compile a model.
|
||||||
// Pick a device by replacing CPU, for example MULTI:CPU(4),GPU(8).
|
// Select the device by providing the name as the second parameter to CLI.
|
||||||
// It is possible to set CUMULATIVE_THROUGHPUT as ov::hint::PerformanceMode for AUTO device
|
// It is possible to set CUMULATIVE_THROUGHPUT as ov::hint::PerformanceMode for AUTO device
|
||||||
ov::Core core;
|
ov::Core core;
|
||||||
ov::CompiledModel compiled_model = core.compile_model(argv[1], "CPU", tput);
|
ov::CompiledModel compiled_model = core.compile_model(argv[1], device_name, tput);
|
||||||
// Create optimal number of ov::InferRequest instances
|
// Create optimal number of ov::InferRequest instances
|
||||||
uint32_t nireq = compiled_model.get_property(ov::optimal_number_of_infer_requests);
|
uint32_t nireq = compiled_model.get_property(ov::optimal_number_of_infer_requests);
|
||||||
std::vector<ov::InferRequest> ireqs(nireq);
|
std::vector<ov::InferRequest> ireqs(nireq);
|
||||||
|
@ -30,18 +30,21 @@ def main():
|
|||||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||||
log.info('OpenVINO:')
|
log.info('OpenVINO:')
|
||||||
log.info(f"{'Build ':.<39} {get_version()}")
|
log.info(f"{'Build ':.<39} {get_version()}")
|
||||||
if len(sys.argv) != 2:
|
device_name = 'CPU'
|
||||||
log.info(f'Usage: {sys.argv[0]} <path_to_model>')
|
if len(sys.argv) == 3:
|
||||||
|
device_name = sys.argv[2]
|
||||||
|
elif len(sys.argv) != 2:
|
||||||
|
log.info(f'Usage: {sys.argv[0]} <path_to_model> <device_name>(default: CPU)')
|
||||||
return 1
|
return 1
|
||||||
# Optimize for throughput. Best throughput can be reached by
|
# Optimize for throughput. Best throughput can be reached by
|
||||||
# running multiple openvino.runtime.InferRequest instances asyncronously
|
# running multiple openvino.runtime.InferRequest instances asyncronously
|
||||||
tput = {'PERFORMANCE_HINT': 'THROUGHPUT'}
|
tput = {'PERFORMANCE_HINT': 'THROUGHPUT'}
|
||||||
|
|
||||||
# Create Core and use it to compile a model.
|
# Create Core and use it to compile a model.
|
||||||
# Pick a device by replacing CPU, for example MULTI:CPU(4),GPU(8).
|
# Select the device by providing the name as the second parameter to CLI.
|
||||||
# It is possible to set CUMULATIVE_THROUGHPUT as PERFORMANCE_HINT for AUTO device
|
# It is possible to set CUMULATIVE_THROUGHPUT as PERFORMANCE_HINT for AUTO device
|
||||||
core = ov.Core()
|
core = ov.Core()
|
||||||
compiled_model = core.compile_model(sys.argv[1], 'CPU', tput)
|
compiled_model = core.compile_model(sys.argv[1], device_name, tput)
|
||||||
# AsyncInferQueue creates optimal number of InferRequest instances
|
# AsyncInferQueue creates optimal number of InferRequest instances
|
||||||
ireqs = ov.AsyncInferQueue(compiled_model)
|
ireqs = ov.AsyncInferQueue(compiled_model)
|
||||||
# Fill input data for ireqs
|
# Fill input data for ireqs
|
||||||
|
Loading…
Reference in New Issue
Block a user