Added callback and wait migration guide (#10775)

* Added callback and wait migration guide

* Added start async

* Simplified wait

* Added selector for sync async

* fixed doc

* fixed build

* fixed doc

* fixed doc
This commit is contained in:
Anton Pankratov 2022-03-10 14:00:42 +03:00 committed by GitHub
parent 5566b67238
commit a8a2640fb7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 94 additions and 13 deletions

View File

@ -80,13 +80,13 @@ Inference Engine API fills inputs as `I32` precision (**not** aligned with the o
.. doxygensnippet:: docs/snippets/ie_common.cpp
:language: cpp
:fragment: [ie:get_input_tensor]
.. tab:: ONNX
.. doxygensnippet:: docs/snippets/ie_common.cpp
:language: cpp
:fragment: [ie:get_input_tensor]
.. tab:: Model created in code
.. doxygensnippet:: docs/snippets/ie_common.cpp
@ -110,13 +110,13 @@ OpenVINO™ Runtime API 2.0 fills inputs as `I64` precision (aligned with the or
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp
:fragment: [ov_api_2_0:get_input_tensor_aligned]
.. tab:: ONNX
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp
:fragment: [ov_api_2_0:get_input_tensor_aligned]
.. tab:: Model created in code
.. doxygensnippet:: docs/snippets/ov_common.cpp
@ -129,11 +129,39 @@ OpenVINO™ Runtime API 2.0 fills inputs as `I64` precision (aligned with the or
Inference Engine API:
@snippet docs/snippets/ie_common.cpp ie:inference
@sphinxdirective
.. tab:: sync
.. doxygensnippet:: docs/snippets/ie_common.cpp
:language: cpp
:fragment: [ie:inference]
.. tab:: async
.. doxygensnippet:: docs/snippets/ie_common.cpp
:language: cpp
:fragment: [ie:start_async_and_wait]
@endsphinxdirective
OpenVINO™ Runtime API 2.0:
@snippet docs/snippets/ov_common.cpp ov_api_2_0:inference
@sphinxdirective
.. tab:: sync
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp
:fragment: [ov_api_2_0:inference]
.. tab:: async
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp
:fragment: [ov_api_2_0:start_async_and_wait]
@endsphinxdirective
## 7. Process the Inference Results
@ -152,13 +180,13 @@ Inference Engine API processes outputs as `I32` precision (**not** aligned with
.. doxygensnippet:: docs/snippets/ie_common.cpp
:language: cpp
:fragment: [ie:get_output_tensor]
.. tab:: ONNX
.. doxygensnippet:: docs/snippets/ie_common.cpp
:language: cpp
:fragment: [ie:get_output_tensor]
.. tab:: Model created in code
.. doxygensnippet:: docs/snippets/ie_common.cpp
@ -184,17 +212,17 @@ OpenVINO™ Runtime API 2.0 processes outputs:
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp
:fragment: [ov_api_2_0:get_output_tensor_aligned]
.. tab:: ONNX
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp
:fragment: [ov_api_2_0:get_output_tensor_aligned]
.. tab:: Model created in code
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp
:fragment: [ov_api_2_0:get_output_tensor_aligned]
@endsphinxdirective
@endsphinxdirective

View File

@ -54,6 +54,32 @@ int main() {
infer_request.Infer();
//! [ie:inference]
//! [ie:start_async_and_wait]
// Start inference without blocking current thread
auto restart_once = true;
infer_request.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
[&, restart_once](InferenceEngine::InferRequest request, InferenceEngine::StatusCode status) mutable {
if (status != InferenceEngine::OK) {
// Process error code
} else {
// Extract inference result
InferenceEngine::Blob::Ptr output_blob = request.GetBlob(outputs.begin()->first);
// Restart inference if needed
if (restart_once) {
request.StartAsync();
restart_once = false;
}
}
});
infer_request.StartAsync();
// Get inference status
InferenceEngine::StatusCode status = infer_request.Wait(InferenceEngine::InferRequest::STATUS_ONLY);
// Wait for 1 miliseconds
status = infer_request.Wait(1);
// Wait for inference complition
infer_request.Wait(InferenceEngine::InferRequest::RESULT_READY);
//! [ie:start_async_and_wait]
//! [ie:get_output_tensor]
InferenceEngine::Blob::Ptr output_blob = infer_request.GetBlob(outputs.begin()->first);
InferenceEngine::MemoryBlob::Ptr moutput = InferenceEngine::as<InferenceEngine::MemoryBlob>(output_blob);
@ -68,4 +94,4 @@ int main() {
}
//! [ie:get_output_tensor]
return 0;
}
}

View File

@ -74,11 +74,38 @@ int main() {
//! [ov_api_2_0:create_infer_request]
inputs_aligned(infer_request);
//! [ov_api_2_0:inference]
infer_request.infer();
//! [ov_api_2_0:inference]
//! [ov_api_2_0:start_async_and_wait]
auto restart_once = true;
infer_request.set_callback([&, restart_once] (std::exception_ptr exception_ptr) mutable {
if (exception_ptr) {
// procces exception or rethrow it.
std::rethrow_exception(exception_ptr);
} else {
// Extract inference result
ov::Tensor output_tensor = infer_request.get_output_tensor();
// Restart inference if needed
if (restart_once) {
infer_request.start_async();
restart_once = false;
}
}
});
// Start inference without blocking current thread
infer_request.start_async();
// Get inference status
bool status = infer_request.wait_for(std::chrono::milliseconds{0});
// Wait for one miliseconds
status = infer_request.wait_for(std::chrono::milliseconds{1});
// Wait for inference complition
infer_request.wait();
//! [ov_api_2_0:start_async_and_wait]
outputs_aligned(infer_request);
return 0;
}
}