Updated common migration pipeline (#8176)
* Updated common migration pipeline * Fixed merge issue * Added new model and extended example * Fixed typo * Added v10-v11 comparison
This commit is contained in:
parent
9fc64ea726
commit
54c384db11
@ -32,6 +32,8 @@ OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:read_model
|
||||
|
||||
Read model has the same structure as in the example from [OpenVINO™ Graph Construction](@ref ov_graph_construction) guide.
|
||||
|
||||
### 2.1 Configure Input and Output of the Model
|
||||
|
||||
Inference Engine API:
|
||||
@ -52,4 +54,108 @@ OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:compile_model
|
||||
|
||||
## 5. TBD
|
||||
## 4. Create an Inference Request
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:create_infer_request
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:create_infer_request
|
||||
|
||||
## 5. Prepare input
|
||||
|
||||
### IR v10
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:get_input_tensor
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_v10
|
||||
|
||||
### IR v11
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:get_input_tensor
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned
|
||||
|
||||
### ONNX
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:get_input_tensor
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned
|
||||
|
||||
### From Function
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:get_input_tensor
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:get_input_tensor_aligned
|
||||
|
||||
## 6. Start Inference
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:inference
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:inference
|
||||
|
||||
|
||||
## 7. Process the Inference Results
|
||||
|
||||
### IR v10
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:get_output_tensor
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_v10
|
||||
|
||||
### IR v11
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:get_output_tensor
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned
|
||||
|
||||
### ONNX
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:get_output_tensor
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned
|
||||
|
||||
### From Function
|
||||
|
||||
Inference Engine API:
|
||||
|
||||
@snippet snippets/ie_common.cpp ie:get_output_tensor
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_common.cpp ov_api_2_0:get_output_tensor_aligned
|
||||
|
||||
|
12
docs/migration_ov_2_0/docs/graph_construction.md
Normal file
12
docs/migration_ov_2_0/docs/graph_construction.md
Normal file
@ -0,0 +1,12 @@
|
||||
# OpenVINO™ graph construction {#ov_graph_construction}
|
||||
|
||||
OpenVINO™ 2.0 includes nGraph engine in a common part. The `ngraph` namespace was changed to `ov`.
|
||||
Code snippets below show how application code should be changed for migration to OpenVINO™ 2.0.
|
||||
|
||||
nGraph API:
|
||||
|
||||
@snippet snippets/ngraph.cpp ngraph:graph
|
||||
|
||||
OpenVINO™ 2.0 API:
|
||||
|
||||
@snippet snippets/ov_graph.cpp ov:graph
|
@ -9,4 +9,5 @@ The list with differences between APIs below:
|
||||
- Namespaces were aligned between components.
|
||||
|
||||
Please look at next transition guides to understand how transit own application to OpenVINO™ API 2.0.
|
||||
- [OpenVINO™ Graph Construction](@ref ov_graph_construction)
|
||||
- [OpenVINO™ Common Inference pipeline](@ref ov_inference_pipeline)
|
||||
|
@ -27,8 +27,29 @@ int main() {
|
||||
//! [ie:create_infer_request]
|
||||
|
||||
//! [ie:get_input_tensor]
|
||||
InferenceEngine::Blob::Ptr input_blob = infer_request.GetBlob(inputs.begin()->first);
|
||||
// fill input blob
|
||||
InferenceEngine::Blob::Ptr input_blob1 = infer_request.GetBlob(inputs.begin()->first);
|
||||
// fill first blob
|
||||
InferenceEngine::SizeVector dims1 = input_blob1->getTensorDesc().getDims();
|
||||
InferenceEngine::MemoryBlob::Ptr minput1 = InferenceEngine::as<InferenceEngine::MemoryBlob>(input_blob1);
|
||||
if (minput1) {
|
||||
// locked memory holder should be alive all time while access to its
|
||||
// buffer happens
|
||||
auto minputHolder = minput1->wmap();
|
||||
// Original I64 precision was converted to I32
|
||||
auto data = minputHolder.as<InferenceEngine::PrecisionTrait<InferenceEngine::Precision::I32>::value_type*>();
|
||||
// Fill data ...
|
||||
}
|
||||
InferenceEngine::Blob::Ptr input_blob2 = infer_request.GetBlob("data2");
|
||||
// fill first blob
|
||||
InferenceEngine::MemoryBlob::Ptr minput2 = InferenceEngine::as<InferenceEngine::MemoryBlob>(input_blob2);
|
||||
if (minput2) {
|
||||
// locked memory holder should be alive all time while access to its
|
||||
// buffer happens
|
||||
auto minputHolder = minput2->wmap();
|
||||
// Original I64 precision was converted to I32
|
||||
auto data = minputHolder.as<InferenceEngine::PrecisionTrait<InferenceEngine::Precision::I32>::value_type*>();
|
||||
// Fill data ...
|
||||
}
|
||||
//! [ie:get_input_tensor]
|
||||
|
||||
//! [ie:inference]
|
||||
@ -37,7 +58,16 @@ int main() {
|
||||
|
||||
//! [ie:get_output_tensor]
|
||||
InferenceEngine::Blob::Ptr output_blob = infer_request.GetBlob(outputs.begin()->first);
|
||||
// process output data
|
||||
InferenceEngine::MemoryBlob::Ptr moutput = InferenceEngine::as<InferenceEngine::MemoryBlob>(output_blob);
|
||||
if (moutput) {
|
||||
// locked memory holder should be alive all time while access to its
|
||||
// buffer happens
|
||||
auto minputHolder = moutput->rmap();
|
||||
// Original I64 precision was converted to I32
|
||||
auto data =
|
||||
minputHolder.as<const InferenceEngine::PrecisionTrait<InferenceEngine::Precision::I32>::value_type*>();
|
||||
// process output data
|
||||
}
|
||||
//! [ie:get_output_tensor]
|
||||
return 0;
|
||||
}
|
||||
|
48
docs/snippets/ngraph.cpp
Normal file
48
docs/snippets/ngraph.cpp
Normal file
@ -0,0 +1,48 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include <ngraph/ngraph.hpp>
|
||||
#include <ngraph/opsets/opset8.hpp>
|
||||
|
||||
int main() {
|
||||
//! [ngraph:graph]
|
||||
// _____________ _____________
|
||||
// | Parameter | | Parameter |
|
||||
// | data1 | | data2 |
|
||||
// |___________| |___________|
|
||||
// | |
|
||||
// data1_t | | data2_t
|
||||
// \ /
|
||||
// \ /
|
||||
// \ /
|
||||
// ____\____/____
|
||||
// | Concat |
|
||||
// | concat |
|
||||
// |____________|
|
||||
// |
|
||||
// | concat_t
|
||||
// |
|
||||
// _______|_______
|
||||
// | Result |
|
||||
// | result |
|
||||
// |_____________|
|
||||
auto data1 = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::i64, ngraph::Shape{1, 3, 2, 2});
|
||||
data1->set_friendly_name("data1"); // operation name
|
||||
data1->output(0).set_names({"data1_t"}); // tensor names
|
||||
auto data2 = std::make_shared<ngraph::opset8::Parameter>(ngraph::element::i64, ngraph::Shape{1, 2, 2, 2});
|
||||
data2->set_friendly_name("data2"); // operation name
|
||||
data2->output(0).set_names({"data2_t"}); // tensor names
|
||||
|
||||
auto concat = std::make_shared<ngraph::opset8::Concat>(ngraph::OutputVector{data1, data2}, 1);
|
||||
concat->set_friendly_name("concat"); // operation name
|
||||
concat->output(0).set_names({"concat_t"}); // tensor name
|
||||
|
||||
auto result = std::make_shared<ngraph::opset8::Result>(concat);
|
||||
result->set_friendly_name("result"); // operation name
|
||||
|
||||
auto f = std::make_shared<ngraph::Function>(ngraph::ResultVector{result},
|
||||
ngraph::ParameterVector{data1, data2},
|
||||
"function_name");
|
||||
//! [ngraph:graph]
|
||||
return 0;
|
||||
}
|
@ -1,9 +1,61 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include <openvino/core/function.hpp>
|
||||
#include <openvino/core/core.hpp>
|
||||
#include <openvino/runtime/runtime.hpp>
|
||||
|
||||
void inputs_v10(ov::runtime::InferRequest& infer_request) {
|
||||
//! [ov_api_2_0:get_input_tensor_v10]
|
||||
// Get input tensor by index
|
||||
ov::runtime::Tensor input_tensor1 = infer_request.get_input_tensor(0);
|
||||
// IR v10 works with converted precisions (i64 -> i32)
|
||||
auto data1 = input_tensor1.data<int32_t>();
|
||||
// Fill first data ...
|
||||
|
||||
// Get input tensor by tensor name
|
||||
ov::runtime::Tensor input_tensor2 = infer_request.get_tensor("data2_t");
|
||||
// IR v10 works with converted precisions (i64 -> i32)
|
||||
auto data2 = input_tensor1.data<int32_t>();
|
||||
// Fill first data ...
|
||||
//! [ov_api_2_0:get_input_tensor_v10]
|
||||
}
|
||||
|
||||
void inputs_aligned(ov::runtime::InferRequest& infer_request) {
|
||||
//! [ov_api_2_0:get_input_tensor_aligned]
|
||||
// Get input tensor by index
|
||||
ov::runtime::Tensor input_tensor1 = infer_request.get_input_tensor(0);
|
||||
// Element types, names and layouts are aligned with framework
|
||||
auto data1 = input_tensor1.data<int64_t>();
|
||||
// Fill first data ...
|
||||
|
||||
// Get input tensor by tensor name
|
||||
ov::runtime::Tensor input_tensor2 = infer_request.get_tensor("data2_t");
|
||||
// Element types, names and layouts are aligned with framework
|
||||
auto data2 = input_tensor1.data<int64_t>();
|
||||
// Fill first data ...
|
||||
//! [ov_api_2_0:get_input_tensor_aligned]
|
||||
}
|
||||
|
||||
void outputs_v10(ov::runtime::InferRequest& infer_request) {
|
||||
//! [ov_api_2_0:get_output_tensor_v10]
|
||||
// model has only one output
|
||||
ov::runtime::Tensor output_tensor = infer_request.get_output_tensor();
|
||||
// IR v10 works with converted precisions (i64 -> i32)
|
||||
auto out_data = output_tensor.data<int32_t>();
|
||||
// process output data
|
||||
//! [ov_api_2_0:get_output_tensor_v10]
|
||||
}
|
||||
|
||||
void outputs_aligned(ov::runtime::InferRequest& infer_request) {
|
||||
//! [ov_api_2_0:get_output_tensor_aligned]
|
||||
// model has only one output
|
||||
ov::runtime::Tensor output_tensor = infer_request.get_output_tensor();
|
||||
// Element types, names and layouts are aligned with framework
|
||||
auto out_data = output_tensor.data<int64_t>();
|
||||
// process output data
|
||||
//! [ov_api_2_0:get_output_tensor_aligned]
|
||||
}
|
||||
|
||||
int main() {
|
||||
//! [ov_api_2_0:create_core]
|
||||
ov::runtime::Core core;
|
||||
@ -14,21 +66,24 @@ int main() {
|
||||
//! [ov_api_2_0:read_model]
|
||||
|
||||
//! [ov_api_2_0:get_inputs_outputs]
|
||||
ov::ParameterVector inputs = network->get_parameters();
|
||||
ov::ResultVector outputs = network->get_results();
|
||||
std::vector<ov::Output<ov::Node>> inputs = network->inputs();
|
||||
std::vector<ov::Output<ov::Node>> outputs = network->outputs();
|
||||
//! [ov_api_2_0:get_inputs_outputs]
|
||||
|
||||
//! [ov_api_2_0:compile_model]
|
||||
ov::runtime::ExecutableNetwork exec_network = core.compile_model(network, "CPU");
|
||||
//! [ov_api_2_0:compile_model]
|
||||
|
||||
//! [ov_api_2_0:create_infer_request]
|
||||
ov::runtime::InferRequest infer_request = exec_network.create_infer_request();
|
||||
//
|
||||
// InferenceEngine::Blob::Ptr input_blob = infer_request.GetBlob(inputs.begin()->first);
|
||||
// // fill input blob
|
||||
// infer_request.Infer();
|
||||
//
|
||||
// InferenceEngine::Blob::Ptr output_blob = infer_request.GetBlob(outputs.begin()->first);
|
||||
// process output data
|
||||
//! [ov_api_2_0:create_infer_request]
|
||||
|
||||
inputs_aligned(infer_request);
|
||||
//! [ov_api_2_0:inference]
|
||||
infer_request.infer();
|
||||
//! [ov_api_2_0:inference]
|
||||
|
||||
outputs_aligned(infer_request);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
47
docs/snippets/ov_graph.cpp
Normal file
47
docs/snippets/ov_graph.cpp
Normal file
@ -0,0 +1,47 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
#include <openvino/core/core.hpp>
|
||||
#include <openvino/opsets/opset8.hpp>
|
||||
|
||||
int main() {
|
||||
//! [ov:graph]
|
||||
// _____________ _____________
|
||||
// | Parameter | | Parameter |
|
||||
// | data1 | | data2 |
|
||||
// |___________| |___________|
|
||||
// | |
|
||||
// data1_t | | data2_t
|
||||
// \ /
|
||||
// \ /
|
||||
// \ /
|
||||
// ____\____/____
|
||||
// | Concat |
|
||||
// | concat |
|
||||
// |____________|
|
||||
// |
|
||||
// | concat_t
|
||||
// |
|
||||
// _______|_______
|
||||
// | Result |
|
||||
// | result |
|
||||
// |_____________|
|
||||
auto data1 = std::make_shared<ov::opset8::Parameter>(ov::element::i64, ov::Shape{1, 3, 2, 2});
|
||||
data1->set_friendly_name("data1"); // operation name
|
||||
data1->output(0).set_names({"data1_t"}); // tensor names
|
||||
auto data2 = std::make_shared<ov::opset8::Parameter>(ov::element::i64, ov::Shape{1, 2, 2, 2});
|
||||
data2->set_friendly_name("data2"); // operation name
|
||||
data2->output(0).set_names({"data2_t"}); // tensor names
|
||||
|
||||
auto concat = std::make_shared<ov::opset8::Concat>(ov::OutputVector{data1, data2}, 1);
|
||||
concat->set_friendly_name("concat"); // operation name
|
||||
concat->output(0).set_names({"concat_t"}); // tensor name
|
||||
|
||||
auto result = std::make_shared<ov::opset8::Result>(concat);
|
||||
result->set_friendly_name("result"); // operation name
|
||||
|
||||
auto f =
|
||||
std::make_shared<ov::Function>(ov::ResultVector{result}, ov::ParameterVector{data1, data2}, "function_name");
|
||||
//! [ov:graph]
|
||||
return 0;
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
// Copyright (C) 2018-2021 Intel Corporation
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "openvino/core/core_visibility.hpp"
|
||||
|
Loading…
Reference in New Issue
Block a user