* Renamed ov::Function to ov::Model * Fixed all for macos * Fixed build * Fixed build * Revert changes in GPU plugin * Fixed ngraphFunctions * Fixed all for mac * Fixed new test * Fixed if for Windows * Fixed unit tests and renamed Function in python API * Fixed code style * Fixed import * Fixed conflict * Fixed merge issues
90 lines
3.2 KiB
C++
90 lines
3.2 KiB
C++
// Copyright (C) 2018-2021 Intel Corporation
|
|
// SPDX-License-Identifier: Apache-2.0
|
|
//
|
|
#include <openvino/core/core.hpp>
|
|
#include <openvino/runtime/runtime.hpp>
|
|
|
|
void inputs_v10(ov::runtime::InferRequest& infer_request) {
|
|
//! [ov_api_2_0:get_input_tensor_v10]
|
|
// Get input tensor by index
|
|
ov::runtime::Tensor input_tensor1 = infer_request.get_input_tensor(0);
|
|
// IR v10 works with converted precisions (i64 -> i32)
|
|
auto data1 = input_tensor1.data<int32_t>();
|
|
// Fill first data ...
|
|
|
|
// Get input tensor by tensor name
|
|
ov::runtime::Tensor input_tensor2 = infer_request.get_tensor("data2_t");
|
|
// IR v10 works with converted precisions (i64 -> i32)
|
|
auto data2 = input_tensor1.data<int32_t>();
|
|
// Fill first data ...
|
|
//! [ov_api_2_0:get_input_tensor_v10]
|
|
}
|
|
|
|
void inputs_aligned(ov::runtime::InferRequest& infer_request) {
|
|
//! [ov_api_2_0:get_input_tensor_aligned]
|
|
// Get input tensor by index
|
|
ov::runtime::Tensor input_tensor1 = infer_request.get_input_tensor(0);
|
|
// Element types, names and layouts are aligned with framework
|
|
auto data1 = input_tensor1.data<int64_t>();
|
|
// Fill first data ...
|
|
|
|
// Get input tensor by tensor name
|
|
ov::runtime::Tensor input_tensor2 = infer_request.get_tensor("data2_t");
|
|
// Element types, names and layouts are aligned with framework
|
|
auto data2 = input_tensor1.data<int64_t>();
|
|
// Fill first data ...
|
|
//! [ov_api_2_0:get_input_tensor_aligned]
|
|
}
|
|
|
|
void outputs_v10(ov::runtime::InferRequest& infer_request) {
|
|
//! [ov_api_2_0:get_output_tensor_v10]
|
|
// model has only one output
|
|
ov::runtime::Tensor output_tensor = infer_request.get_output_tensor();
|
|
// IR v10 works with converted precisions (i64 -> i32)
|
|
auto out_data = output_tensor.data<int32_t>();
|
|
// process output data
|
|
//! [ov_api_2_0:get_output_tensor_v10]
|
|
}
|
|
|
|
void outputs_aligned(ov::runtime::InferRequest& infer_request) {
|
|
//! [ov_api_2_0:get_output_tensor_aligned]
|
|
// model has only one output
|
|
ov::runtime::Tensor output_tensor = infer_request.get_output_tensor();
|
|
// Element types, names and layouts are aligned with framework
|
|
auto out_data = output_tensor.data<int64_t>();
|
|
// process output data
|
|
//! [ov_api_2_0:get_output_tensor_aligned]
|
|
}
|
|
|
|
int main() {
|
|
//! [ov_api_2_0:create_core]
|
|
ov::runtime::Core core;
|
|
//! [ov_api_2_0:create_core]
|
|
|
|
//! [ov_api_2_0:read_model]
|
|
std::shared_ptr<ov::Model> network = core.read_model("model.xml");
|
|
//! [ov_api_2_0:read_model]
|
|
|
|
//! [ov_api_2_0:get_inputs_outputs]
|
|
std::vector<ov::Output<ov::Node>> inputs = network->inputs();
|
|
std::vector<ov::Output<ov::Node>> outputs = network->outputs();
|
|
//! [ov_api_2_0:get_inputs_outputs]
|
|
|
|
//! [ov_api_2_0:compile_model]
|
|
ov::runtime::ExecutableNetwork exec_network = core.compile_model(network, "CPU");
|
|
//! [ov_api_2_0:compile_model]
|
|
|
|
//! [ov_api_2_0:create_infer_request]
|
|
ov::runtime::InferRequest infer_request = exec_network.create_infer_request();
|
|
//! [ov_api_2_0:create_infer_request]
|
|
|
|
inputs_aligned(infer_request);
|
|
//! [ov_api_2_0:inference]
|
|
infer_request.infer();
|
|
//! [ov_api_2_0:inference]
|
|
|
|
outputs_aligned(infer_request);
|
|
|
|
return 0;
|
|
}
|