Extend sync benchmark CLI parameters (#20844)
This commit is contained in:
parent
0effa37811
commit
c20d52dc4f
@ -70,7 +70,7 @@ Running
|
|||||||
|
|
||||||
.. code-block:: sh
|
.. code-block:: sh
|
||||||
|
|
||||||
sync_benchmark <path_to_model>
|
sync_benchmark <path_to_model> <device_name>(default: CPU)
|
||||||
|
|
||||||
|
|
||||||
To run the sample, you need to specify a model:
|
To run the sample, you need to specify a model:
|
||||||
|
@ -67,7 +67,7 @@ Running
|
|||||||
|
|
||||||
.. code-block:: sh
|
.. code-block:: sh
|
||||||
|
|
||||||
python sync_benchmark.py <path_to_model>
|
python sync_benchmark.py <path_to_model> <device_name>(default: CPU)
|
||||||
|
|
||||||
|
|
||||||
To run the sample, you need to specify a model:
|
To run the sample, you need to specify a model:
|
||||||
@ -138,4 +138,4 @@ See Also
|
|||||||
* :doc:`Model Downloader <omz_tools_downloader>`
|
* :doc:`Model Downloader <omz_tools_downloader>`
|
||||||
* :doc:`Convert a Model <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>`
|
* :doc:`Convert a Model <openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide>`
|
||||||
|
|
||||||
@endsphinxdirective
|
@endsphinxdirective
|
||||||
|
@ -20,8 +20,12 @@ int main(int argc, char* argv[]) {
|
|||||||
try {
|
try {
|
||||||
slog::info << "OpenVINO:" << slog::endl;
|
slog::info << "OpenVINO:" << slog::endl;
|
||||||
slog::info << ov::get_openvino_version();
|
slog::info << ov::get_openvino_version();
|
||||||
if (argc != 2) {
|
|
||||||
slog::info << "Usage : " << argv[0] << " <path_to_model>" << slog::endl;
|
std::string device_name = "CPU";
|
||||||
|
if (argc == 3) {
|
||||||
|
device_name = argv[2];
|
||||||
|
} else if (argc != 2) {
|
||||||
|
slog::info << "Usage : " << argv[0] << " <path_to_model> <device_name>(default: CPU)" << slog::endl;
|
||||||
return EXIT_FAILURE;
|
return EXIT_FAILURE;
|
||||||
}
|
}
|
||||||
// Optimize for latency. Most of the devices are configured for latency by default,
|
// Optimize for latency. Most of the devices are configured for latency by default,
|
||||||
@ -29,11 +33,11 @@ int main(int argc, char* argv[]) {
|
|||||||
ov::AnyMap latency{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::LATENCY}};
|
ov::AnyMap latency{{ov::hint::performance_mode.name(), ov::hint::PerformanceMode::LATENCY}};
|
||||||
|
|
||||||
// Create ov::Core and use it to compile a model.
|
// Create ov::Core and use it to compile a model.
|
||||||
// Pick a device by replacing CPU, for example AUTO:GPU,CPU.
|
// Select the device by providing the name as the second parameter to CLI.
|
||||||
// Using MULTI device is pointless in sync scenario
|
// Using MULTI device is pointless in sync scenario
|
||||||
// because only one instance of ov::InferRequest is used
|
// because only one instance of ov::InferRequest is used
|
||||||
ov::Core core;
|
ov::Core core;
|
||||||
ov::CompiledModel compiled_model = core.compile_model(argv[1], "CPU", latency);
|
ov::CompiledModel compiled_model = core.compile_model(argv[1], device_name, latency);
|
||||||
ov::InferRequest ireq = compiled_model.create_infer_request();
|
ov::InferRequest ireq = compiled_model.create_infer_request();
|
||||||
// Fill input data for the ireq
|
// Fill input data for the ireq
|
||||||
for (const ov::Output<const ov::Node>& model_input : compiled_model.inputs()) {
|
for (const ov::Output<const ov::Node>& model_input : compiled_model.inputs()) {
|
||||||
|
@ -30,19 +30,22 @@ def main():
|
|||||||
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
||||||
log.info('OpenVINO:')
|
log.info('OpenVINO:')
|
||||||
log.info(f"{'Build ':.<39} {get_version()}")
|
log.info(f"{'Build ':.<39} {get_version()}")
|
||||||
if len(sys.argv) != 2:
|
device_name = 'CPU'
|
||||||
log.info(f'Usage: {sys.argv[0]} <path_to_model>')
|
if len(sys.argv) == 3:
|
||||||
|
device_name = sys.argv[2]
|
||||||
|
elif len(sys.argv) != 2:
|
||||||
|
log.info(f'Usage: {sys.argv[0]} <path_to_model> <device_name>(default: CPU)')
|
||||||
return 1
|
return 1
|
||||||
# Optimize for latency. Most of the devices are configured for latency by default,
|
# Optimize for latency. Most of the devices are configured for latency by default,
|
||||||
# but there are exceptions like GNA
|
# but there are exceptions like GNA
|
||||||
latency = {'PERFORMANCE_HINT': 'LATENCY'}
|
latency = {'PERFORMANCE_HINT': 'LATENCY'}
|
||||||
|
|
||||||
# Create Core and use it to compile a model.
|
# Create Core and use it to compile a model.
|
||||||
# Pick a device by replacing CPU, for example AUTO:GPU,CPU.
|
# Select the device by providing the name as the second parameter to CLI.
|
||||||
# Using MULTI device is pointless in sync scenario
|
# Using MULTI device is pointless in sync scenario
|
||||||
# because only one instance of openvino.runtime.InferRequest is used
|
# because only one instance of openvino.runtime.InferRequest is used
|
||||||
core = ov.Core()
|
core = ov.Core()
|
||||||
compiled_model = core.compile_model(sys.argv[1], 'CPU', latency)
|
compiled_model = core.compile_model(sys.argv[1], device_name, latency)
|
||||||
ireq = compiled_model.create_infer_request()
|
ireq = compiled_model.create_infer_request()
|
||||||
# Fill input data for the ireq
|
# Fill input data for the ireq
|
||||||
for model_input in compiled_model.inputs:
|
for model_input in compiled_model.inputs:
|
||||||
|
Loading…
Reference in New Issue
Block a user