Removed ov::runtime namespace (#9781)
* Removed ov::runtime namespace * clang-format * Fixes * template reference * Fixes
This commit is contained in:
@@ -6,10 +6,10 @@ Hello Classification C++ sample application demonstrates how to use the followin
|
||||
|
||||
| Feature | API | Description |
|
||||
|:--- |:--- |:---
|
||||
| Basic Infer Flow | `ov::runtime::Core::read_model`, `ov::runtime::Core::compile_model`, `ov::runtime::CompiledModel::create_infer_request`, `ov::runtime::InferRequest::get_input_tensor`, `ov::runtime::InferRequest::set_input_tensor`, `ov::runtime::InferRequest::get_output_tensor` | Common API to do inference: configure input and output tensors, reading model, create infer request
|
||||
| Synchronous Infer | `ov::runtime::InferRequest::infer` | Do synchronous inference
|
||||
| Basic Infer Flow | `ov::Core::read_model`, `ov::Core::compile_model`, `ov::CompiledModel::create_infer_request`, `ov::InferRequest::get_input_tensor`, `ov::InferRequest::set_input_tensor`, `ov::InferRequest::get_output_tensor` | Common API to do inference: configure input and output tensors, reading model, create infer request
|
||||
| Synchronous Infer | `ov::InferRequest::infer` | Do synchronous inference
|
||||
| Model Operations | `ov::Model::inputs`, `ov::Model::outputs` | Managing of model
|
||||
| Tensor Operations| `ov::runtime::Tensor::get_element_type`, `ov::runtime::Tensor::get_shape`, `ov::runtime::Tensor::data` | Work with storing inputs, outputs of the model, weights and biases of the layers
|
||||
| Tensor Operations| `ov::Tensor::get_element_type`, `ov::Tensor::get_shape`, `ov::Tensor::data` | Work with storing inputs, outputs of the model, weights and biases of the layers
|
||||
| Input auto-resize | `ov::preprocess::PreProcessSteps::resize`, `ov::preprocess::InputInfo::model::set_layout` | Set image of the original size as input for a model with other input size. Resize and layout conversions will be performed automatically by the corresponding plugin just before inference
|
||||
|
||||
| Options | Values |
|
||||
|
||||
@@ -38,7 +38,7 @@ int tmain(int argc, tchar* argv[]) {
|
||||
const std::string device_name = TSTRING2STRING(argv[3]);
|
||||
|
||||
// -------- Step 1. Initialize OpenVINO Runtime Core --------
|
||||
ov::runtime::Core core;
|
||||
ov::Core core;
|
||||
|
||||
// -------- Step 2. Read a model --------
|
||||
slog::info << "Loading model files: " << model_path << slog::endl;
|
||||
@@ -63,8 +63,8 @@ int tmain(int argc, tchar* argv[]) {
|
||||
ov::Shape input_shape = {1, reader->height(), reader->width(), 3};
|
||||
std::shared_ptr<unsigned char> input_data = reader->getData();
|
||||
|
||||
// just wrap image data by ov::runtime::Tensor without allocating of new memory
|
||||
ov::runtime::Tensor input_tensor = ov::runtime::Tensor(input_type, input_shape, input_data.get());
|
||||
// just wrap image data by ov::Tensor without allocating of new memory
|
||||
ov::Tensor input_tensor = ov::Tensor(input_type, input_shape, input_data.get());
|
||||
|
||||
const ov::Shape tensor_shape = input_tensor.get_shape();
|
||||
const ov::Layout tensor_layout{"NHWC"};
|
||||
@@ -98,10 +98,10 @@ int tmain(int argc, tchar* argv[]) {
|
||||
model = ppp.build();
|
||||
|
||||
// -------- Step 5. Loading a model to the device --------
|
||||
ov::runtime::CompiledModel compiled_model = core.compile_model(model, device_name);
|
||||
ov::CompiledModel compiled_model = core.compile_model(model, device_name);
|
||||
|
||||
// -------- Step 6. Create an infer request --------
|
||||
ov::runtime::InferRequest infer_request = compiled_model.create_infer_request();
|
||||
ov::InferRequest infer_request = compiled_model.create_infer_request();
|
||||
// -----------------------------------------------------------------------------------------------------
|
||||
|
||||
// -------- Step 7. Prepare input --------
|
||||
@@ -111,7 +111,7 @@ int tmain(int argc, tchar* argv[]) {
|
||||
infer_request.infer();
|
||||
|
||||
// -------- Step 9. Process output
|
||||
const ov::runtime::Tensor& output_tensor = infer_request.get_output_tensor();
|
||||
const ov::Tensor& output_tensor = infer_request.get_output_tensor();
|
||||
|
||||
// Print classification results
|
||||
ClassificationResult classification_result(output_tensor, {image_path});
|
||||
|
||||
Reference in New Issue
Block a user