From 28a118be390e58234be39ebc9c912cd664eb0653 Mon Sep 17 00:00:00 2001 From: Zlobin Vladimir Date: Fri, 4 Nov 2022 14:41:15 +0400 Subject: [PATCH] Apply improvements I found during https://github.com/openvinotoolkit/openvino/pull/13388 (#13743) --- samples/cpp/benchmark_app/infer_request_wrap.hpp | 2 +- samples/cpp/benchmark_app/main.cpp | 14 +++----------- src/bindings/python/src/openvino/runtime/ie_api.py | 6 +++--- .../src/pyopenvino/core/async_infer_queue.cpp | 2 +- .../openvino/tools/benchmark/benchmark.py | 4 +--- .../openvino/tools/benchmark/main.py | 2 +- .../openvino/tools/benchmark/parameters.py | 3 +-- .../openvino/tools/benchmark/utils/utils.py | 4 ++-- 8 files changed, 13 insertions(+), 24 deletions(-) diff --git a/samples/cpp/benchmark_app/infer_request_wrap.hpp b/samples/cpp/benchmark_app/infer_request_wrap.hpp index d1938f469a7..4e8bbb009d5 100644 --- a/samples/cpp/benchmark_app/infer_request_wrap.hpp +++ b/samples/cpp/benchmark_app/infer_request_wrap.hpp @@ -128,7 +128,7 @@ public: // Inference Request guarantee that it will wait for all asynchronous internal tasks in destructor // So it should be released before any context that the request can use inside internal asynchronous tasks // For example all members of InferRequestsQueue would be destroyed before `requests` vector - // So requests can try to use this members from `putIdleRequest()` that would be called from request callback + // So requests can try to use this members from `put_idle_request()` that would be called from request callback // To avoid this we should move this vector declaration after all members declaration or just clear it manually // in destructor requests.clear(); diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index 509e65a6fee..39dde71f8ff 100755 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -842,7 +842,7 @@ int main(int argc, char* argv[]) { throw ov::Exception("Every device used with the benchmark_app should support " + std::string(ov::optimal_number_of_infer_requests.name()) + " Failed to query the metric for the " + device_name + - " with error:" + ex.what()); + " with error: " + ex.what()); } } } @@ -1132,13 +1132,6 @@ int main(int argc, char* argv[]) { if (FLAGS_api == "sync") { inferRequest->infer(); } else { - // As the inference request is currently idle, the wait() adds no - // additional overhead (and should return immediately). The primary - // reason for calling the method is exception checking/re-throwing. - // Callback, that governs the actual execution can handle errors as - // well, but as it uses just error codes it has no details like ‘what()’ - // method of `std::exception` So, rechecking for any exceptions here. - inferRequest->wait(); inferRequest->start_async(); } ++iteration; @@ -1182,8 +1175,7 @@ int main(int argc, char* argv[]) { } double totalDuration = inferRequestsQueue.get_duration_in_milliseconds(); - double fps = (FLAGS_api == "sync") ? batchSize * 1000.0 / generalLatency.median_or_percentile - : 1000.0 * processedFramesN / totalDuration; + double fps = 1000.0 * processedFramesN / totalDuration; if (statistics) { statistics->add_parameters(StatisticsReport::Category::EXECUTION_RESULTS, @@ -1264,7 +1256,7 @@ int main(int argc, char* argv[]) { slog::info << "Count: " << iteration << " iterations" << slog::endl; slog::info << "Duration: " << double_to_string(totalDuration) << " ms" << slog::endl; if (device_name.find("MULTI") == std::string::npos) { - slog::info << "Latency: " << slog::endl; + slog::info << "Latency:" << slog::endl; generalLatency.write_to_slog(); if (FLAGS_pcseq && app_inputs_info.size() > 1) { diff --git a/src/bindings/python/src/openvino/runtime/ie_api.py b/src/bindings/python/src/openvino/runtime/ie_api.py index db66827d3cd..de9ba2efa47 100644 --- a/src/bindings/python/src/openvino/runtime/ie_api.py +++ b/src/bindings/python/src/openvino/runtime/ie_api.py @@ -274,16 +274,16 @@ class CompiledModel(CompiledModelBase): class AsyncInferQueue(AsyncInferQueueBase): - """AsyncInferQueue with pool of asynchronous requests. + """AsyncInferQueue with a pool of asynchronous requests. - AsyncInferQueue represents helper that creates a pool of asynchronous + AsyncInferQueue represents a helper that creates a pool of asynchronous InferRequests and provides synchronization functions to control flow of a simple pipeline. """ def __iter__(self) -> Iterable[InferRequest]: """Allows to iterate over AsyncInferQueue. - :return: a map object (which is an iterator) that yields InferRequests. + :return: a generator that yields InferRequests. :rtype: Iterable[openvino.runtime.InferRequest] """ return (InferRequest(x) for x in super().__iter__()) diff --git a/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index 23ffcb3e0e8..afbbc61f440 100644 --- a/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -163,7 +163,7 @@ public: void regclass_AsyncInferQueue(py::module m) { py::class_> cls(m, "AsyncInferQueue"); - cls.doc() = "openvino.runtime.AsyncInferQueue represents helper that creates a pool of asynchronous" + cls.doc() = "openvino.runtime.AsyncInferQueue represents a helper that creates a pool of asynchronous" "InferRequests and provides synchronization functions to control flow of a simple pipeline."; cls.def(py::init(), diff --git a/tools/benchmark_tool/openvino/tools/benchmark/benchmark.py b/tools/benchmark_tool/openvino/tools/benchmark/benchmark.py index 00015557a89..e8e5412e52f 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/benchmark.py +++ b/tools/benchmark_tool/openvino/tools/benchmark/benchmark.py @@ -4,13 +4,11 @@ import os from datetime import datetime from math import ceil -from typing import Union from openvino.runtime import Core, get_version, AsyncInferQueue -from .utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, GPU_DEVICE_NAME, XML_EXTENSION, BIN_EXTENSION +from .utils.constants import GPU_DEVICE_NAME, XML_EXTENSION, BIN_EXTENSION from .utils.logging import logger from .utils.utils import get_duration_seconds -from .utils.statistics_report import StatisticsReport def percentile(values, percent): return values[ceil(len(values) * percent / 100) - 1] diff --git a/tools/benchmark_tool/openvino/tools/benchmark/main.py b/tools/benchmark_tool/openvino/tools/benchmark/main.py index 797630d1f6c..2b5139a3ab6 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/main.py +++ b/tools/benchmark_tool/openvino/tools/benchmark/main.py @@ -124,7 +124,7 @@ def main(): # --------------------- 3. Setting device configuration -------------------------------------------------------- next_step() - def get_device_type_from_name(name) : + def get_device_type_from_name(name): new_name = str(name) new_name = new_name.split(".", 1)[0] new_name = new_name.split("(", 1)[0] diff --git a/tools/benchmark_tool/openvino/tools/benchmark/parameters.py b/tools/benchmark_tool/openvino/tools/benchmark/parameters.py index 29ea79376d8..292afdd24ef 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/parameters.py +++ b/tools/benchmark_tool/openvino/tools/benchmark/parameters.py @@ -1,8 +1,7 @@ # Copyright (C) 2018-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import sys,argparse -from fnmatch import fnmatch +import sys, argparse from openvino.tools.benchmark.utils.utils import show_available_devices diff --git a/tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py b/tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py index 7a97c035140..acc7cbc64cd 100644 --- a/tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py +++ b/tools/benchmark_tool/openvino/tools/benchmark/utils/utils.py @@ -388,8 +388,8 @@ def print_perf_counters(perf_counts_list): f"{'execType: ' + pi.exec_type:<20}") total_time += pi.real_time total_time_cpu += pi.cpu_time - print(f'Total time: {(total_time // timedelta(microseconds=1)) / 1000.0} milliseconds') - print(f'Total CPU time: {(total_time_cpu // timedelta(microseconds=1)) / 1000.0} milliseconds\n') + print(f'Total time: {(total_time // timedelta(microseconds=1)) / 1000.0} seconds') + print(f'Total CPU time: {(total_time_cpu // timedelta(microseconds=1)) / 1000.0} seconds\n') def get_command_line_arguments(argv):