Files
openvino/docs/snippets/ie_common.py
Anastasia Kuporosova 2bf8d910f6 [Docs][PyOV] update python snippets (#19367)
* [Docs][PyOV] update python snippets

* first snippet

* Fix samples debug

* Fix linter

* part1

* Fix speech sample

* update model state snippet

* add serialize

* add temp dir

* CPU snippets update (#134)

* snippets CPU 1/6

* snippets CPU 2/6

* snippets CPU 3/6

* snippets CPU 4/6

* snippets CPU 5/6

* snippets CPU 6/6

* make  module TODO: REMEMBER ABOUT EXPORTING PYTONPATH ON CIs ETC

* Add static model creation in snippets for CPU

* export_comp_model done

* leftovers

* apply comments

* apply comments -- properties

* small fixes

* rempve debug info

* return IENetwork instead of Function

* apply comments

* revert precision change in common snippets

* update opset

* [PyOV] Edit docs for the rest of plugins (#136)

* modify main.py

* GNA snippets

* GPU snippets

* AUTO snippets

* MULTI snippets

* HETERO snippets

* Added properties

* update gna

* more samples

* Update docs/OV_Runtime_UG/model_state_intro.md

* Update docs/OV_Runtime_UG/model_state_intro.md

* attempt1 fix ci

* new approach to test

* temporary remove some files from run

* revert cmake changes

* fix ci

* fix snippet

* fix py_exclusive snippet

* fix preprocessing snippet

* clean-up main

* remove numpy installation in gha

* check for GPU

* add logger

* iexclude main

* main update

* temp

* Temp2

* Temp2

* temp

* Revert temp

* add property execution devices

* hide output from samples

---------

Co-authored-by: p-wysocki <przemyslaw.wysocki@intel.com>
Co-authored-by: Jan Iwaszkiewicz <jan.iwaszkiewicz@intel.com>
Co-authored-by: Karol Blaszczak <karol.blaszczak@intel.com>
2023-09-13 21:05:24 +02:00

95 lines
2.7 KiB
Python

# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from utils import get_path_to_model, get_image, get_path_to_extension_library
#! [ie:create_core]
import numpy as np
import openvino.inference_engine as ie
core = ie.IECore()
#! [ie:create_core]
model_path = get_path_to_model(True)
#! [ie:read_model]
network = core.read_network(model_path)
#! [ie:read_model]
#! [ie:compile_model]
# Load network to the device and create infer requests
exec_network = core.load_network(network, "CPU", num_requests=4)
#! [ie:compile_model]
#! [ie:create_infer_request]
# Done in the previous step
#! [ie:create_infer_request]
#! [ie:get_input_tensor]
infer_request = exec_network.requests[0]
# Get input blobs mapped to input layers names
input_blobs = infer_request.input_blobs
data = input_blobs["data"].buffer
# Original I64 precision was converted to I32
assert data.dtype == np.int32
# Fill the first blob ...
#! [ie:get_input_tensor]
#! [ie:inference]
results = infer_request.infer()
#! [ie:inference]
input_data = get_image()
def process_results(results, frame_id):
pass
#! [ie:start_async_and_wait]
# Start async inference on a single infer request
infer_request.async_infer()
# Wait for 1 milisecond
infer_request.wait(1)
# Wait for inference completion
infer_request.wait()
# Demonstrates async pipeline using ExecutableNetwork
results = []
# Callback to process inference results
def callback(output_blobs, _):
# Copy the data from output blobs to numpy array
results_copy = {out_name: out_blob.buffer[:] for out_name, out_blob in output_blobs.items()}
results.append(process_results(results_copy))
# Setting callback for each infer requests
for infer_request in exec_network.requests:
infer_request.set_completion_callback(callback, py_data=infer_request.output_blobs)
# Async pipline is managed by ExecutableNetwork
total_frames = 100
for _ in range(total_frames):
# Wait for at least one free request
exec_network.wait(num_requests=1)
# Get idle id
idle_id = exec_network.get_idle_request_id()
# Start asynchronous inference on idle request
exec_network.start_async(request_id=idle_id, inputs={"data": input_data})
# Wait for all requests to complete
exec_network.wait()
#! [ie:start_async_and_wait]
#! [ie:get_output_tensor]
# Get output blobs mapped to output layers names
output_blobs = infer_request.output_blobs
data = output_blobs["relu"].buffer
# Original I64 precision was converted to I32
assert data.dtype == np.int32
# Process output data
#! [ie:get_output_tensor]
path_to_extension_library = get_path_to_extension_library()
#! [ie:load_old_extension]
core.add_extension(path_to_extension_library, "CPU")
#! [ie:load_old_extension]