Apply improvements I found during https://github.com/openvinotoolkit/openvino/pull/13388 (#13743)
This commit is contained in:
parent
65f83b591e
commit
28a118be39
@ -128,7 +128,7 @@ public:
|
|||||||
// Inference Request guarantee that it will wait for all asynchronous internal tasks in destructor
|
// Inference Request guarantee that it will wait for all asynchronous internal tasks in destructor
|
||||||
// So it should be released before any context that the request can use inside internal asynchronous tasks
|
// So it should be released before any context that the request can use inside internal asynchronous tasks
|
||||||
// For example all members of InferRequestsQueue would be destroyed before `requests` vector
|
// For example all members of InferRequestsQueue would be destroyed before `requests` vector
|
||||||
// So requests can try to use this members from `putIdleRequest()` that would be called from request callback
|
// So requests can try to use this members from `put_idle_request()` that would be called from request callback
|
||||||
// To avoid this we should move this vector declaration after all members declaration or just clear it manually
|
// To avoid this we should move this vector declaration after all members declaration or just clear it manually
|
||||||
// in destructor
|
// in destructor
|
||||||
requests.clear();
|
requests.clear();
|
||||||
|
@ -842,7 +842,7 @@ int main(int argc, char* argv[]) {
|
|||||||
throw ov::Exception("Every device used with the benchmark_app should support " +
|
throw ov::Exception("Every device used with the benchmark_app should support " +
|
||||||
std::string(ov::optimal_number_of_infer_requests.name()) +
|
std::string(ov::optimal_number_of_infer_requests.name()) +
|
||||||
" Failed to query the metric for the " + device_name +
|
" Failed to query the metric for the " + device_name +
|
||||||
" with error:" + ex.what());
|
" with error: " + ex.what());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1132,13 +1132,6 @@ int main(int argc, char* argv[]) {
|
|||||||
if (FLAGS_api == "sync") {
|
if (FLAGS_api == "sync") {
|
||||||
inferRequest->infer();
|
inferRequest->infer();
|
||||||
} else {
|
} else {
|
||||||
// As the inference request is currently idle, the wait() adds no
|
|
||||||
// additional overhead (and should return immediately). The primary
|
|
||||||
// reason for calling the method is exception checking/re-throwing.
|
|
||||||
// Callback, that governs the actual execution can handle errors as
|
|
||||||
// well, but as it uses just error codes it has no details like ‘what()’
|
|
||||||
// method of `std::exception` So, rechecking for any exceptions here.
|
|
||||||
inferRequest->wait();
|
|
||||||
inferRequest->start_async();
|
inferRequest->start_async();
|
||||||
}
|
}
|
||||||
++iteration;
|
++iteration;
|
||||||
@ -1182,8 +1175,7 @@ int main(int argc, char* argv[]) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
double totalDuration = inferRequestsQueue.get_duration_in_milliseconds();
|
double totalDuration = inferRequestsQueue.get_duration_in_milliseconds();
|
||||||
double fps = (FLAGS_api == "sync") ? batchSize * 1000.0 / generalLatency.median_or_percentile
|
double fps = 1000.0 * processedFramesN / totalDuration;
|
||||||
: 1000.0 * processedFramesN / totalDuration;
|
|
||||||
|
|
||||||
if (statistics) {
|
if (statistics) {
|
||||||
statistics->add_parameters(StatisticsReport::Category::EXECUTION_RESULTS,
|
statistics->add_parameters(StatisticsReport::Category::EXECUTION_RESULTS,
|
||||||
@ -1264,7 +1256,7 @@ int main(int argc, char* argv[]) {
|
|||||||
slog::info << "Count: " << iteration << " iterations" << slog::endl;
|
slog::info << "Count: " << iteration << " iterations" << slog::endl;
|
||||||
slog::info << "Duration: " << double_to_string(totalDuration) << " ms" << slog::endl;
|
slog::info << "Duration: " << double_to_string(totalDuration) << " ms" << slog::endl;
|
||||||
if (device_name.find("MULTI") == std::string::npos) {
|
if (device_name.find("MULTI") == std::string::npos) {
|
||||||
slog::info << "Latency: " << slog::endl;
|
slog::info << "Latency:" << slog::endl;
|
||||||
generalLatency.write_to_slog();
|
generalLatency.write_to_slog();
|
||||||
|
|
||||||
if (FLAGS_pcseq && app_inputs_info.size() > 1) {
|
if (FLAGS_pcseq && app_inputs_info.size() > 1) {
|
||||||
|
@ -274,16 +274,16 @@ class CompiledModel(CompiledModelBase):
|
|||||||
|
|
||||||
|
|
||||||
class AsyncInferQueue(AsyncInferQueueBase):
|
class AsyncInferQueue(AsyncInferQueueBase):
|
||||||
"""AsyncInferQueue with pool of asynchronous requests.
|
"""AsyncInferQueue with a pool of asynchronous requests.
|
||||||
|
|
||||||
AsyncInferQueue represents helper that creates a pool of asynchronous
|
AsyncInferQueue represents a helper that creates a pool of asynchronous
|
||||||
InferRequests and provides synchronization functions to control flow of
|
InferRequests and provides synchronization functions to control flow of
|
||||||
a simple pipeline.
|
a simple pipeline.
|
||||||
"""
|
"""
|
||||||
def __iter__(self) -> Iterable[InferRequest]:
|
def __iter__(self) -> Iterable[InferRequest]:
|
||||||
"""Allows to iterate over AsyncInferQueue.
|
"""Allows to iterate over AsyncInferQueue.
|
||||||
|
|
||||||
:return: a map object (which is an iterator) that yields InferRequests.
|
:return: a generator that yields InferRequests.
|
||||||
:rtype: Iterable[openvino.runtime.InferRequest]
|
:rtype: Iterable[openvino.runtime.InferRequest]
|
||||||
"""
|
"""
|
||||||
return (InferRequest(x) for x in super().__iter__())
|
return (InferRequest(x) for x in super().__iter__())
|
||||||
|
@ -163,7 +163,7 @@ public:
|
|||||||
|
|
||||||
void regclass_AsyncInferQueue(py::module m) {
|
void regclass_AsyncInferQueue(py::module m) {
|
||||||
py::class_<AsyncInferQueue, std::shared_ptr<AsyncInferQueue>> cls(m, "AsyncInferQueue");
|
py::class_<AsyncInferQueue, std::shared_ptr<AsyncInferQueue>> cls(m, "AsyncInferQueue");
|
||||||
cls.doc() = "openvino.runtime.AsyncInferQueue represents helper that creates a pool of asynchronous"
|
cls.doc() = "openvino.runtime.AsyncInferQueue represents a helper that creates a pool of asynchronous"
|
||||||
"InferRequests and provides synchronization functions to control flow of a simple pipeline.";
|
"InferRequests and provides synchronization functions to control flow of a simple pipeline.";
|
||||||
|
|
||||||
cls.def(py::init<ov::CompiledModel&, size_t>(),
|
cls.def(py::init<ov::CompiledModel&, size_t>(),
|
||||||
|
@ -4,13 +4,11 @@
|
|||||||
import os
|
import os
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from math import ceil
|
from math import ceil
|
||||||
from typing import Union
|
|
||||||
from openvino.runtime import Core, get_version, AsyncInferQueue
|
from openvino.runtime import Core, get_version, AsyncInferQueue
|
||||||
|
|
||||||
from .utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, GPU_DEVICE_NAME, XML_EXTENSION, BIN_EXTENSION
|
from .utils.constants import GPU_DEVICE_NAME, XML_EXTENSION, BIN_EXTENSION
|
||||||
from .utils.logging import logger
|
from .utils.logging import logger
|
||||||
from .utils.utils import get_duration_seconds
|
from .utils.utils import get_duration_seconds
|
||||||
from .utils.statistics_report import StatisticsReport
|
|
||||||
|
|
||||||
def percentile(values, percent):
|
def percentile(values, percent):
|
||||||
return values[ceil(len(values) * percent / 100) - 1]
|
return values[ceil(len(values) * percent / 100) - 1]
|
||||||
|
@ -124,7 +124,7 @@ def main():
|
|||||||
|
|
||||||
# --------------------- 3. Setting device configuration --------------------------------------------------------
|
# --------------------- 3. Setting device configuration --------------------------------------------------------
|
||||||
next_step()
|
next_step()
|
||||||
def get_device_type_from_name(name) :
|
def get_device_type_from_name(name):
|
||||||
new_name = str(name)
|
new_name = str(name)
|
||||||
new_name = new_name.split(".", 1)[0]
|
new_name = new_name.split(".", 1)[0]
|
||||||
new_name = new_name.split("(", 1)[0]
|
new_name = new_name.split("(", 1)[0]
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
# Copyright (C) 2018-2022 Intel Corporation
|
# Copyright (C) 2018-2022 Intel Corporation
|
||||||
# SPDX-License-Identifier: Apache-2.0
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
|
||||||
import sys,argparse
|
import sys, argparse
|
||||||
from fnmatch import fnmatch
|
|
||||||
|
|
||||||
from openvino.tools.benchmark.utils.utils import show_available_devices
|
from openvino.tools.benchmark.utils.utils import show_available_devices
|
||||||
|
|
||||||
|
@ -388,8 +388,8 @@ def print_perf_counters(perf_counts_list):
|
|||||||
f"{'execType: ' + pi.exec_type:<20}")
|
f"{'execType: ' + pi.exec_type:<20}")
|
||||||
total_time += pi.real_time
|
total_time += pi.real_time
|
||||||
total_time_cpu += pi.cpu_time
|
total_time_cpu += pi.cpu_time
|
||||||
print(f'Total time: {(total_time // timedelta(microseconds=1)) / 1000.0} milliseconds')
|
print(f'Total time: {(total_time // timedelta(microseconds=1)) / 1000.0} seconds')
|
||||||
print(f'Total CPU time: {(total_time_cpu // timedelta(microseconds=1)) / 1000.0} milliseconds\n')
|
print(f'Total CPU time: {(total_time_cpu // timedelta(microseconds=1)) / 1000.0} seconds\n')
|
||||||
|
|
||||||
|
|
||||||
def get_command_line_arguments(argv):
|
def get_command_line_arguments(argv):
|
||||||
|
Loading…
Reference in New Issue
Block a user