This commit is contained in:
Zlobin Vladimir 2022-11-04 14:41:15 +04:00 committed by GitHub
parent 65f83b591e
commit 28a118be39
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 13 additions and 24 deletions

View File

@ -128,7 +128,7 @@ public:
// Inference Request guarantee that it will wait for all asynchronous internal tasks in destructor
// So it should be released before any context that the request can use inside internal asynchronous tasks
// For example all members of InferRequestsQueue would be destroyed before `requests` vector
// So requests can try to use this members from `putIdleRequest()` that would be called from request callback
// So requests can try to use this members from `put_idle_request()` that would be called from request callback
// To avoid this we should move this vector declaration after all members declaration or just clear it manually
// in destructor
requests.clear();

View File

@ -1132,13 +1132,6 @@ int main(int argc, char* argv[]) {
if (FLAGS_api == "sync") {
inferRequest->infer();
} else {
// As the inference request is currently idle, the wait() adds no
// additional overhead (and should return immediately). The primary
// reason for calling the method is exception checking/re-throwing.
// Callback, that governs the actual execution can handle errors as
// well, but as it uses just error codes it has no details like what()
// method of `std::exception` So, rechecking for any exceptions here.
inferRequest->wait();
inferRequest->start_async();
}
++iteration;
@ -1182,8 +1175,7 @@ int main(int argc, char* argv[]) {
}
double totalDuration = inferRequestsQueue.get_duration_in_milliseconds();
double fps = (FLAGS_api == "sync") ? batchSize * 1000.0 / generalLatency.median_or_percentile
: 1000.0 * processedFramesN / totalDuration;
double fps = 1000.0 * processedFramesN / totalDuration;
if (statistics) {
statistics->add_parameters(StatisticsReport::Category::EXECUTION_RESULTS,

View File

@ -274,16 +274,16 @@ class CompiledModel(CompiledModelBase):
class AsyncInferQueue(AsyncInferQueueBase):
"""AsyncInferQueue with pool of asynchronous requests.
"""AsyncInferQueue with a pool of asynchronous requests.
AsyncInferQueue represents helper that creates a pool of asynchronous
AsyncInferQueue represents a helper that creates a pool of asynchronous
InferRequests and provides synchronization functions to control flow of
a simple pipeline.
"""
def __iter__(self) -> Iterable[InferRequest]:
"""Allows to iterate over AsyncInferQueue.
:return: a map object (which is an iterator) that yields InferRequests.
:return: a generator that yields InferRequests.
:rtype: Iterable[openvino.runtime.InferRequest]
"""
return (InferRequest(x) for x in super().__iter__())

View File

@ -163,7 +163,7 @@ public:
void regclass_AsyncInferQueue(py::module m) {
py::class_<AsyncInferQueue, std::shared_ptr<AsyncInferQueue>> cls(m, "AsyncInferQueue");
cls.doc() = "openvino.runtime.AsyncInferQueue represents helper that creates a pool of asynchronous"
cls.doc() = "openvino.runtime.AsyncInferQueue represents a helper that creates a pool of asynchronous"
"InferRequests and provides synchronization functions to control flow of a simple pipeline.";
cls.def(py::init<ov::CompiledModel&, size_t>(),

View File

@ -4,13 +4,11 @@
import os
from datetime import datetime
from math import ceil
from typing import Union
from openvino.runtime import Core, get_version, AsyncInferQueue
from .utils.constants import MULTI_DEVICE_NAME, HETERO_DEVICE_NAME, CPU_DEVICE_NAME, GPU_DEVICE_NAME, XML_EXTENSION, BIN_EXTENSION
from .utils.constants import GPU_DEVICE_NAME, XML_EXTENSION, BIN_EXTENSION
from .utils.logging import logger
from .utils.utils import get_duration_seconds
from .utils.statistics_report import StatisticsReport
def percentile(values, percent):
return values[ceil(len(values) * percent / 100) - 1]

View File

@ -2,7 +2,6 @@
# SPDX-License-Identifier: Apache-2.0
import sys, argparse
from fnmatch import fnmatch
from openvino.tools.benchmark.utils.utils import show_available_devices

View File

@ -388,8 +388,8 @@ def print_perf_counters(perf_counts_list):
f"{'execType: ' + pi.exec_type:<20}")
total_time += pi.real_time
total_time_cpu += pi.cpu_time
print(f'Total time: {(total_time // timedelta(microseconds=1)) / 1000.0} milliseconds')
print(f'Total CPU time: {(total_time_cpu // timedelta(microseconds=1)) / 1000.0} milliseconds\n')
print(f'Total time: {(total_time // timedelta(microseconds=1)) / 1000.0} seconds')
print(f'Total CPU time: {(total_time_cpu // timedelta(microseconds=1)) / 1000.0} seconds\n')
def get_command_line_arguments(argv):