DOCS: Removed useless 4 spaces in snippets (#10870)

* Updated snippets

* Added link to encryption
This commit is contained in:
Ilya Lavrenov
2022-03-11 08:43:18 +03:00
committed by GitHub
parent 5f19d22323
commit 829c8c98c5
12 changed files with 171 additions and 146 deletions

View File

@@ -249,6 +249,7 @@ Congratulations, you have made your first application with OpenVINO™ toolkit,
## See also
- [OpenVINO™ Runtime Preprocessing](./preprocessing_overview.md)
- [Using Encrypted Models with OpenVINO™](./protecting_model_guide.md)
[ie_api_flow_cpp]: img/BASIC_IE_API_workflow_Cpp.svg
[ie_api_use_cpp]: img/IMPLEMENT_PIPELINE_with_API_C.svg

View File

@@ -131,13 +131,13 @@ Inference Engine API:
@sphinxdirective
.. tab:: sync
.. tab:: Sync
.. doxygensnippet:: docs/snippets/ie_common.cpp
:language: cpp
:fragment: [ie:inference]
.. tab:: async
.. tab:: Async
.. doxygensnippet:: docs/snippets/ie_common.cpp
:language: cpp
@@ -149,13 +149,13 @@ OpenVINO™ Runtime API 2.0:
@sphinxdirective
.. tab:: sync
.. tab:: Sync
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp
:fragment: [ov_api_2_0:inference]
.. tab:: async
.. tab:: Async
.. doxygensnippet:: docs/snippets/ov_common.cpp
:language: cpp

View File

@@ -48,6 +48,8 @@ But the following OpenVINO tools don't support IR v10 as an input, they require
- Post Training Optimization tool
- Deep Learning WorkBench
> **NOTE**: If IR v10 models need to be quantized, the recommendation is to download and use Post Training Optimization tool from OpenVINO 2021.4 release.
### Differences between Inference Engine and OpenVINO Runtime 2.0
Inference Engine and ngraph APIs are not deprecated, they are fully functional and can be used in applications. But OpenVINO recommends users to migrate to new OpenVINO Runtime API 2.0, because it already has additional features and this list will be extended later. The following list of additional features is supported by new API:

View File

@@ -1,4 +1,4 @@
# Running on multiple device simultaneously {#openvino_docs_OV_UG_Running_on_multiple_devices}
# Running on multiple devices simultaneously {#openvino_docs_OV_UG_Running_on_multiple_devices}
## Introducing the Multi-Device Plugin (C++)

View File

@@ -9,7 +9,6 @@
:hidden:
openvino_docs_Integrate_OV_with_your_application
<!-- should be a part of Integrate OV in user application -->
openvino_docs_IE_DG_ShapeInference
openvino_docs_OV_UG_Working_with_devices
openvino_docs_OV_Runtime_UG_Preprocessing_Overview

View File

@@ -55,6 +55,9 @@ int main() {
//! [ie:inference]
//! [ie:start_async_and_wait]
// NOTE: For demonstration purposes we are trying to set callback
// which restarts inference inside one more time, so two inferences happen here
// Start inference without blocking current thread
auto restart_once = true;
infer_request.SetCompletionCallback<std::function<void(InferenceEngine::InferRequest, InferenceEngine::StatusCode)>>(
@@ -72,11 +75,11 @@ int main() {
}
});
infer_request.StartAsync();
// Get inference status
// Get inference status immediately
InferenceEngine::StatusCode status = infer_request.Wait(InferenceEngine::InferRequest::STATUS_ONLY);
// Wait for 1 miliseconds
// Wait for 1 milisecond
status = infer_request.Wait(1);
// Wait for inference complition
// Wait for inference completion
infer_request.Wait(InferenceEngine::InferRequest::RESULT_READY);
//! [ie:start_async_and_wait]

View File

@@ -3,39 +3,43 @@
int main() {
ov::Core core;
auto model = core.read_model("sample.xml");
{
//! [compile_model]
{
auto compiled_model = core.compile_model(model, "GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
}
auto compiled_model = core.compile_model(model, "GPU",
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
//! [compile_model]
}
{
//! [compile_model_no_auto_batching]
{
// disabling the automatic batching
// leaving intact other configurations options that the device selects for the 'throughput' hint
auto compiled_model = core.compile_model(model, "GPU", {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT),
ov::hint::allow_auto_batching(false)});
}
// disabling the automatic batching
// leaving intact other configurations options that the device selects for the 'throughput' hint
auto compiled_model = core.compile_model(model, "GPU",
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT),
ov::hint::allow_auto_batching(false));
//! [compile_model_no_auto_batching]
//! [query_optimal_num_requests]
{
// when the batch size is automatically selected by the implementation
// it is important to query/create and run the sufficient #requests
auto compiled_model = core.compile_model(model, "GPU", ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
auto num_requests = compiled_model.get_property(ov::optimal_number_of_infer_requests);
}
//! [query_optimal_num_requests]
//! [hint_num_requests]
{
// limiting the available parallel slack for the 'throughput' hint via the ov::hint::num_requests
// so that certain parameters (like selected batch size) are automatically accommodated accordingly
auto compiled_model = core.compile_model(model, "GPU", {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT),
ov::hint::num_requests(4)});
//! [query_optimal_num_requests]
// when the batch size is automatically selected by the implementation
// it is important to query/create and run the sufficient #requests
auto compiled_model = core.compile_model(model, "GPU",
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
auto num_requests = compiled_model.get_property(ov::optimal_number_of_infer_requests);
//! [query_optimal_num_requests]
}
{
//! [hint_num_requests]
// limiting the available parallel slack for the 'throughput' hint via the ov::hint::num_requests
// so that certain parameters (like selected batch size) are automatically accommodated accordingly
auto compiled_model = core.compile_model(model, "GPU",
ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT),
ov::hint::num_requests(4));
//! [hint_num_requests]
}
return 0;
}

View File

@@ -80,6 +80,9 @@ int main() {
//! [ov_api_2_0:inference]
//! [ov_api_2_0:start_async_and_wait]
// NOTE: For demonstration purposes we are trying to set callback
// which restarts inference inside one more time, so two inferences happen here
auto restart_once = true;
infer_request.set_callback([&, restart_once] (std::exception_ptr exception_ptr) mutable {
if (exception_ptr) {
@@ -97,11 +100,11 @@ int main() {
});
// Start inference without blocking current thread
infer_request.start_async();
// Get inference status
// Get inference status immediately
bool status = infer_request.wait_for(std::chrono::milliseconds{0});
// Wait for one miliseconds
// Wait for one milisecond
status = infer_request.wait_for(std::chrono::milliseconds{1});
// Wait for inference complition
// Wait for inference completion
infer_request.wait();
//! [ov_api_2_0:start_async_and_wait]

View File

@@ -31,25 +31,26 @@ for (auto&& node : model->get_ops()) {
auto compiled_model = core.compile_model(model, device);
//! [fix_automatic_affinities]
//! [compile_model]
{
auto compiled_model = core.compile_model(model, "HETERO:GPU,CPU");
// or with ov::device::priorities with multiple args
compiled_model = core.compile_model(model, "HETERO", ov::device::priorities("GPU", "CPU"));
// or with ov::device::priorities with a single argument
compiled_model = core.compile_model(model, "HETERO", ov::device::priorities("GPU,CPU"));
}
//! [compile_model]
auto compiled_model = core.compile_model(model, "HETERO:GPU,CPU");
// or with ov::device::priorities with multiple args
compiled_model = core.compile_model(model, "HETERO", ov::device::priorities("GPU", "CPU"));
// or with ov::device::priorities with a single argument
compiled_model = core.compile_model(model, "HETERO", ov::device::priorities("GPU,CPU"));
//! [compile_model]
}
{
//! [configure_fallback_devices]
auto compiled_model = core.compile_model(model, "HETERO",
// GPU with fallback to CPU
ov::device::priorities("GPU", "CPU"),
// profiling is enabled only for GPU
ov::device::properties("GPU", ov::enable_profiling(true)),
// FP32 inference precision only for CPU
ov::device::properties("CPU", ov::hint::inference_precision(ov::element::f32))
);
auto compiled_model = core.compile_model(model, "HETERO",
// GPU with fallback to CPU
ov::device::priorities("GPU", "CPU"),
// profiling is enabled only for GPU
ov::device::properties("GPU", ov::enable_profiling(true)),
// FP32 inference precision only for CPU
ov::device::properties("CPU", ov::hint::inference_precision(ov::element::f32))
);
//! [configure_fallback_devices]
}
return 0;

View File

@@ -31,6 +31,8 @@ compiled_model = core.compile_model(model, device)
#! [compile_model]
compiled_model = core.compile_model(model, device_name="HETERO:GPU,CPU")
# device priorities via configuration property
compiled_model = core.compile_model(model, device_name="HETERO", config={"MULTI_DEVICE_PRIORITIES": "GPU,CPU"})
#! [compile_model]
#! [configure_fallback_devices]

View File

@@ -4,52 +4,55 @@
#include <openvino/core/layout.hpp>
int main() {
ov::Layout layout;
//! [ov:layout:simple]
layout = ov::Layout("NHWC");
//! [ov:layout:simple]
//! [ov:layout:complex]
// Each dimension has name separated by comma, layout is wrapped with square brackets
layout = ov::Layout("[time,temperature,humidity]");
//! [ov:layout:complex]
//! [ov:layout:partially_defined]
// First dimension is batch, 4th is 'channels'. Others are not important for us
layout = ov::Layout("N??C");
// Or the same using advanced syntax
layout = ov::Layout("[n,?,?,c]");
//! [ov:layout:partially_defined]
//! [ov:layout:dynamic]
// First dimension is 'batch' others are whatever
layout = ov::Layout("N...");
ov::Layout layout;
//! [ov:layout:simple]
layout = ov::Layout("NHWC");
//! [ov:layout:simple]
// Second dimension is 'channels' others are whatever
layout = ov::Layout("?C...");
//! [ov:layout:complex]
// Each dimension has name separated by comma, layout is wrapped with square brackets
layout = ov::Layout("[time,temperature,humidity]");
//! [ov:layout:complex]
// Last dimension is 'channels' others are whatever
layout = ov::Layout("...C");
//! [ov:layout:dynamic]
//! [ov:layout:partially_defined]
// First dimension is batch, 4th is 'channels'. Others are not important for us
layout = ov::Layout("N??C");
// Or the same using advanced syntax
layout = ov::Layout("[n,?,?,c]");
//! [ov:layout:partially_defined]
//! [ov:layout:predefined]
// returns 0 for batch
ov::layout::batch_idx("NCDHW");
//! [ov:layout:dynamic]
// First dimension is 'batch' others are whatever
layout = ov::Layout("N...");
// returns 1 for channels
ov::layout::channels_idx("NCDHW");
// Second dimension is 'channels' others are whatever
layout = ov::Layout("?C...");
// returns 2 for depth
ov::layout::depth_idx("NCDHW");
// Last dimension is 'channels' others are whatever
layout = ov::Layout("...C");
//! [ov:layout:dynamic]
// returns -2 for height
ov::layout::height_idx("...HW");
//! [ov:layout:predefined]
// returns 0 for batch
ov::layout::batch_idx("NCDHW");
// returns -1 for width
ov::layout::width_idx("...HW");
//! [ov:layout:predefined]
// returns 1 for channels
ov::layout::channels_idx("NCDHW");
//! [ov:layout:dump]
layout = ov::Layout("NCHW");
std::cout << layout.to_string(); // prints [N,C,H,W]
//! [ov:layout:dump]
// returns 2 for depth
ov::layout::depth_idx("NCDHW");
return 0;
// returns -2 for height
ov::layout::height_idx("...HW");
// returns -1 for width
ov::layout::width_idx("...HW");
//! [ov:layout:predefined]
//! [ov:layout:dump]
layout = ov::Layout("NCHW");
std::cout << layout.to_string(); // prints [N,C,H,W]
//! [ov:layout:dump]
return 0;
}

View File

@@ -6,71 +6,78 @@
#include <openvino/core/preprocess/pre_post_process.hpp>
void ppp_input_1(ov::preprocess::PrePostProcessor& ppp) {
//! [ov:preprocess:input_1]
ppp.input() // no index/name is needed if model has one input
.preprocess().scale(50.f);
//! [ov:preprocess:input_1]
ppp.input() // no index/name is needed if model has one input
.preprocess().scale(50.f);
ppp.output() // same for output
.postprocess().convert_element_type(ov::element::u8);
//! [ov:preprocess:input_1]
//! [ov:preprocess:mean_scale]
ppp.input("input").preprocess().mean(128).scale(127);
//! [ov:preprocess:mean_scale]
//! [ov:preprocess:mean_scale_array]
// Suppose model's shape is {1, 3, 224, 224}
ppp.input("input").model().set_layout("NCHW"); // N=1, C=3, H=224, W=224
// Mean/Scale has 3 values which matches with C=3
ppp.input("input").preprocess()
.mean({103.94, 116.78, 123.68}).scale({57.21, 57.45, 57.73});
//! [ov:preprocess:mean_scale_array]
//! [ov:preprocess:convert_element_type]
// First define data type for your tensor
ppp.input("input").tensor().set_element_type(ov::element::u8);
ppp.output() // same for output
.postprocess().convert_element_type(ov::element::u8);
//! [ov:preprocess:input_1]
// Then define preprocessing step
ppp.input("input").preprocess().convert_element_type(ov::element::f32);
//! [ov:preprocess:mean_scale]
ppp.input("input").preprocess().mean(128).scale(127);
//! [ov:preprocess:mean_scale]
// If conversion is needed to `model's` element type, 'f32' can be omitted
ppp.input("input").preprocess().convert_element_type();
//! [ov:preprocess:convert_element_type]
//! [ov:preprocess:convert_layout]
// First define layout for your tensor
ppp.input("input").tensor().set_layout("NHWC");
//! [ov:preprocess:mean_scale_array]
// Suppose model's shape is {1, 3, 224, 224}
ppp.input("input").model().set_layout("NCHW"); // N=1, C=3, H=224, W=224
// Mean/Scale has 3 values which matches with C=3
ppp.input("input").preprocess()
.mean({103.94, 116.78, 123.68}).scale({57.21, 57.45, 57.73});
//! [ov:preprocess:mean_scale_array]
// Then define layout of model
ppp.input("input").model().set_layout("NCHW");
//! [ov:preprocess:convert_element_type]
// First define data type for your tensor
ppp.input("input").tensor().set_element_type(ov::element::u8);
std::cout << ppp; // Will print 'implicit layout conversion step'
//! [ov:preprocess:convert_layout]
//! [ov:preprocess:convert_layout_2]
ppp.input("input").tensor().set_shape({1, 480, 640, 3});
// Model expects shape {1, 3, 480, 640}
ppp.input("input").preprocess().convert_layout({0, 3, 1, 2});
// 0 -> 0; 3 -> 1; 1 -> 2; 2 -> 3
//! [ov:preprocess:convert_layout_2]
// Then define preprocessing step
ppp.input("input").preprocess().convert_element_type(ov::element::f32);
//! [ov:preprocess:resize_1]
ppp.input("input").tensor().set_shape({1, 3, 960, 1280});
ppp.input("input").model().set_layout("??HW");
ppp.input("input").preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR, 480, 640);
//! [ov:preprocess:resize_1]
//! [ov:preprocess:resize_2]
ppp.input("input").tensor().set_shape({1, 3, 960, 1280});
ppp.input("input").model().set_layout("??HW"); // Model accepts {1, 3, 480, 640} shape
// Resize to model's dimension
ppp.input("input").preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
//! [ov:preprocess:resize_2]
// If conversion is needed to `model's` element type, 'f32' can be omitted
ppp.input("input").preprocess().convert_element_type();
//! [ov:preprocess:convert_element_type]
//! [ov:preprocess:convert_color_1]
ppp.input("input").tensor().set_color_format(ov::preprocess::ColorFormat::BGR);
ppp.input("input").preprocess().convert_color(ov::preprocess::ColorFormat::RGB);
//! [ov:preprocess:convert_color_1]
//! [ov:preprocess:convert_color_2]
// This will split original `input` to 2 separate inputs: `input/y' and 'input/uv'
ppp.input("input").tensor().set_color_format(ov::preprocess::ColorFormat::NV12_TWO_PLANES);
ppp.input("input").preprocess().convert_color(ov::preprocess::ColorFormat::RGB);
std::cout << ppp; // Dump preprocessing steps to see what will happen
//! [ov:preprocess:convert_color_2]
//! [ov:preprocess:convert_layout]
// First define layout for your tensor
ppp.input("input").tensor().set_layout("NHWC");
// Then define layout of model
ppp.input("input").model().set_layout("NCHW");
std::cout << ppp; // Will print 'implicit layout conversion step'
//! [ov:preprocess:convert_layout]
//! [ov:preprocess:convert_layout_2]
ppp.input("input").tensor().set_shape({1, 480, 640, 3});
// Model expects shape {1, 3, 480, 640}
ppp.input("input").preprocess().convert_layout({0, 3, 1, 2});
// 0 -> 0; 3 -> 1; 1 -> 2; 2 -> 3
//! [ov:preprocess:convert_layout_2]
//! [ov:preprocess:resize_1]
ppp.input("input").tensor().set_shape({1, 3, 960, 1280});
ppp.input("input").model().set_layout("??HW");
ppp.input("input").preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR, 480, 640);
//! [ov:preprocess:resize_1]
//! [ov:preprocess:resize_2]
ppp.input("input").tensor().set_shape({1, 3, 960, 1280});
ppp.input("input").model().set_layout("??HW"); // Model accepts {1, 3, 480, 640} shape
// Resize to model's dimension
ppp.input("input").preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
//! [ov:preprocess:resize_2]
//! [ov:preprocess:convert_color_1]
ppp.input("input").tensor().set_color_format(ov::preprocess::ColorFormat::BGR);
ppp.input("input").preprocess().convert_color(ov::preprocess::ColorFormat::RGB);
//! [ov:preprocess:convert_color_1]
//! [ov:preprocess:convert_color_2]
// This will split original `input` to 2 separate inputs: `input/y' and 'input/uv'
ppp.input("input").tensor().set_color_format(ov::preprocess::ColorFormat::NV12_TWO_PLANES);
ppp.input("input").preprocess().convert_color(ov::preprocess::ColorFormat::RGB);
std::cout << ppp; // Dump preprocessing steps to see what will happen
//! [ov:preprocess:convert_color_2]
}
void ppp_input_2(ov::preprocess::PrePostProcessor& ppp) {