* update hello_classification readme * update classification_async readme * update hello_query_device readme * Fix hello_classification launch line * Update hello_reshape_ssd readme * update speech sample docs * update ngraph sample docs * fix launch command * refactor py ngraph imports * Replace `network` with `model` * update example section with openvino-dev * Update samples/python/classification_sample_async/README.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Update samples/python/classification_sample_async/README.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Update samples/python/hello_classification/README.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Update samples/python/hello_classification/README.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Update samples/python/hello_reshape_ssd/README.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Update samples/python/ngraph_function_creation_sample/README.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Update samples/python/ngraph_function_creation_sample/README.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Update samples/python/ngraph_function_creation_sample/README.md Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> * Update samples/python/ngraph_function_creation_sample/README.md Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com> * Replace `Inference Engine` with `OpenVINO` * fix ngraph ref * Replace `Inference Engine` by `OpenVINO™ Runtime` * Fix IR mentions Co-authored-by: Vladimir Dudnik <vladimir.dudnik@intel.com> Co-authored-by: Anastasiya Ageeva <anastasiya.ageeva@intel.com> Co-authored-by: Andrey Zaytsev <andrey.zaytsev@intel.com>
132 lines
5.1 KiB
Python
Executable File
132 lines
5.1 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
# Copyright (C) 2018-2022 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
import argparse
|
|
import logging as log
|
|
import sys
|
|
|
|
import cv2
|
|
import numpy as np
|
|
from openvino.preprocess import PrePostProcessor
|
|
from openvino.runtime import AsyncInferQueue, Core, InferRequest, Layout, Type
|
|
|
|
|
|
def parse_args() -> argparse.Namespace:
|
|
"""Parse and return command line arguments"""
|
|
parser = argparse.ArgumentParser(add_help=False)
|
|
args = parser.add_argument_group('Options')
|
|
# fmt: off
|
|
args.add_argument('-h', '--help', action='help',
|
|
help='Show this help message and exit.')
|
|
args.add_argument('-m', '--model', type=str, required=True,
|
|
help='Required. Path to an .xml or .onnx file with a trained model.')
|
|
args.add_argument('-i', '--input', type=str, required=True, nargs='+',
|
|
help='Required. Path to an image file(s).')
|
|
args.add_argument('-d', '--device', type=str, default='CPU',
|
|
help='Optional. Specify the target device to infer on; CPU, GPU, MYRIAD, HDDL or HETERO: '
|
|
'is acceptable. The sample will look for a suitable plugin for device specified. '
|
|
'Default value is CPU.')
|
|
# fmt: on
|
|
return parser.parse_args()
|
|
|
|
|
|
def completion_callback(infer_request: InferRequest, image_path: str) -> None:
|
|
predictions = next(iter(infer_request.results.values()))
|
|
|
|
# Change a shape of a numpy.ndarray with results to get another one with one dimension
|
|
probs = predictions.reshape(-1)
|
|
|
|
# Get an array of 10 class IDs in descending order of probability
|
|
top_10 = np.argsort(probs)[-10:][::-1]
|
|
|
|
header = 'class_id probability'
|
|
|
|
log.info(f'Image path: {image_path}')
|
|
log.info('Top 10 results: ')
|
|
log.info(header)
|
|
log.info('-' * len(header))
|
|
|
|
for class_id in top_10:
|
|
probability_indent = ' ' * (len('class_id') - len(str(class_id)) + 1)
|
|
log.info(f'{class_id}{probability_indent}{probs[class_id]:.7f}')
|
|
|
|
log.info('')
|
|
|
|
|
|
def main() -> int:
|
|
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
|
|
args = parse_args()
|
|
|
|
# --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
|
|
log.info('Creating OpenVINO Runtime Core')
|
|
core = Core()
|
|
|
|
# --------------------------- Step 2. Read a model --------------------------------------------------------------------
|
|
log.info(f'Reading the model: {args.model}')
|
|
# (.xml and .bin files) or (.onnx file)
|
|
model = core.read_model(args.model)
|
|
|
|
if len(model.inputs) != 1:
|
|
log.error('Sample supports only single input topologies')
|
|
return -1
|
|
|
|
if len(model.outputs) != 1:
|
|
log.error('Sample supports only single output topologies')
|
|
return -1
|
|
|
|
# --------------------------- Step 3. Set up input --------------------------------------------------------------------
|
|
# Read input images
|
|
images = [cv2.imread(image_path) for image_path in args.input]
|
|
|
|
# Resize images to model input dims
|
|
_, _, h, w = model.input().shape
|
|
resized_images = [cv2.resize(image, (w, h)) for image in images]
|
|
|
|
# Add N dimension
|
|
input_tensors = [np.expand_dims(image, 0) for image in resized_images]
|
|
|
|
# --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
|
|
ppp = PrePostProcessor(model)
|
|
|
|
# 1) Set input tensor information:
|
|
# - input() provides information about a single model input
|
|
# - precision of tensor is supposed to be 'u8'
|
|
# - layout of data is 'NHWC'
|
|
ppp.input().tensor() \
|
|
.set_element_type(Type.u8) \
|
|
.set_layout(Layout('NHWC')) # noqa: N400
|
|
|
|
# 2) Here we suppose model has 'NCHW' layout for input
|
|
ppp.input().model().set_layout(Layout('NCHW'))
|
|
|
|
# 3) Set output tensor information:
|
|
# - precision of tensor is supposed to be 'f32'
|
|
ppp.output().tensor().set_element_type(Type.f32)
|
|
|
|
# 4) Apply preprocessing modifing the original 'model'
|
|
model = ppp.build()
|
|
|
|
# --------------------------- Step 5. Loading model to the device -----------------------------------------------------
|
|
log.info('Loading the model to the plugin')
|
|
compiled_model = core.compile_model(model, args.device)
|
|
|
|
# --------------------------- Step 6. Create infer request queue ------------------------------------------------------
|
|
log.info('Starting inference in asynchronous mode')
|
|
infer_queue = AsyncInferQueue(compiled_model, len(input_tensors))
|
|
infer_queue.set_callback(completion_callback)
|
|
|
|
# --------------------------- Step 7. Do inference --------------------------------------------------------------------
|
|
for i, input_tensor in enumerate(input_tensors):
|
|
infer_queue.start_async({0: input_tensor}, args.input[i])
|
|
|
|
infer_queue.wait_all()
|
|
# ----------------------------------------------------------------------------------------------------------------------
|
|
log.info('This sample is an API example, for any performance measurements please use the dedicated benchmark_app tool\n')
|
|
return 0
|
|
|
|
|
|
if __name__ == '__main__':
|
|
sys.exit(main())
|