* [Docs][PyOV] update python snippets * first snippet * Fix samples debug * Fix linter * part1 * Fix speech sample * update model state snippet * add serialize * add temp dir * CPU snippets update (#134) * snippets CPU 1/6 * snippets CPU 2/6 * snippets CPU 3/6 * snippets CPU 4/6 * snippets CPU 5/6 * snippets CPU 6/6 * make module TODO: REMEMBER ABOUT EXPORTING PYTONPATH ON CIs ETC * Add static model creation in snippets for CPU * export_comp_model done * leftovers * apply comments * apply comments -- properties * small fixes * rempve debug info * return IENetwork instead of Function * apply comments * revert precision change in common snippets * update opset * [PyOV] Edit docs for the rest of plugins (#136) * modify main.py * GNA snippets * GPU snippets * AUTO snippets * MULTI snippets * HETERO snippets * Added properties * update gna * more samples * Update docs/OV_Runtime_UG/model_state_intro.md * Update docs/OV_Runtime_UG/model_state_intro.md * attempt1 fix ci * new approach to test * temporary remove some files from run * revert cmake changes * fix ci * fix snippet * fix py_exclusive snippet * fix preprocessing snippet * clean-up main * remove numpy installation in gha * check for GPU * add logger * iexclude main * main update * temp * Temp2 * Temp2 * temp * Revert temp * add property execution devices * hide output from samples --------- Co-authored-by: p-wysocki <przemyslaw.wysocki@intel.com> Co-authored-by: Jan Iwaszkiewicz <jan.iwaszkiewicz@intel.com> Co-authored-by: Karol Blaszczak <karol.blaszczak@intel.com>
119 lines
3.1 KiB
Python
119 lines
3.1 KiB
Python
# Copyright (C) 2018-2023 Intel Corporation
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
import numpy as np
|
|
from utils import get_dynamic_model
|
|
|
|
#! [import]
|
|
import openvino as ov
|
|
#! [import]
|
|
|
|
model = get_dynamic_model()
|
|
|
|
#! [reshape_undefined]
|
|
core = ov.Core()
|
|
|
|
# Set first dimension to be dynamic while keeping others static
|
|
model.reshape([-1, 3, 224, 224])
|
|
|
|
# Or, set third and fourth dimensions as dynamic
|
|
model.reshape([1, 3, -1, -1])
|
|
#! [reshape_undefined]
|
|
|
|
#! [reshape_bounds]
|
|
# Example 1 - set first dimension as dynamic (no bounds) and third and fourth dimensions to range of 112..448
|
|
model.reshape([-1, 3, (112, 448), (112, 448)])
|
|
|
|
# Example 2 - Set first dimension to a range of 1..8 and third and fourth dimensions to range of 112..448
|
|
model.reshape([(1, 8), 3, (112, 448), (112, 448)])
|
|
#! [reshape_bounds]
|
|
|
|
model = get_dynamic_model()
|
|
|
|
#! [print_dynamic]
|
|
# Print output partial shape
|
|
print(model.output().partial_shape)
|
|
|
|
# Print input partial shape
|
|
print(model.input().partial_shape)
|
|
#! [print_dynamic]
|
|
|
|
model = get_dynamic_model()
|
|
|
|
#! [detect_dynamic]
|
|
|
|
if model.input(0).partial_shape.is_dynamic:
|
|
# input is dynamic
|
|
pass
|
|
|
|
if model.output(0).partial_shape.is_dynamic:
|
|
# output is dynamic
|
|
pass
|
|
|
|
if model.output(0).partial_shape[1].is_dynamic:
|
|
# 1-st dimension of output is dynamic
|
|
pass
|
|
#! [detect_dynamic]
|
|
|
|
executable = core.compile_model(model)
|
|
infer_request = executable.create_infer_request()
|
|
input_tensor_name = "input"
|
|
|
|
#! [set_input_tensor]
|
|
# For first inference call, prepare an input tensor with 1x128 shape and run inference request
|
|
input_data1 = np.ones(shape=[1,128])
|
|
infer_request.infer({input_tensor_name: input_data1})
|
|
|
|
# Get resulting outputs
|
|
output_tensor1 = infer_request.get_output_tensor()
|
|
output_data1 = output_tensor1.data[:]
|
|
|
|
# For second inference call, prepare a 1x200 input tensor and run inference request
|
|
input_data2 = np.ones(shape=[1,200])
|
|
infer_request.infer({input_tensor_name: input_data2})
|
|
|
|
# Get resulting outputs
|
|
output_tensor2 = infer_request.get_output_tensor()
|
|
output_data2 = output_tensor2.data[:]
|
|
#! [set_input_tensor]
|
|
|
|
infer_request = executable.create_infer_request()
|
|
|
|
#! [get_input_tensor]
|
|
# Get the tensor, shape is not initialized
|
|
input_tensor = infer_request.get_input_tensor()
|
|
|
|
# Set shape is required
|
|
input_tensor.shape = [1, 128]
|
|
# ... write values to input_tensor
|
|
|
|
infer_request.infer()
|
|
output_tensor = infer_request.get_output_tensor()
|
|
data1 = output_tensor.data[:]
|
|
|
|
# The second inference call, repeat steps:
|
|
|
|
# Set a new shape, may reallocate tensor memory
|
|
input_tensor.shape = [1, 200]
|
|
# ... write values to input_tensor
|
|
|
|
infer_request.infer()
|
|
data2 = output_tensor.data[:]
|
|
#! [get_input_tensor]
|
|
|
|
model = get_dynamic_model()
|
|
|
|
#! [check_inputs]
|
|
# Print model input layer info
|
|
for input_layer in model.inputs:
|
|
print(input_layer.names, input_layer.partial_shape)
|
|
#! [check_inputs]
|
|
|
|
#! [reshape_multiple_inputs]
|
|
# Assign dynamic shapes to second dimension in every input layer
|
|
shapes = {}
|
|
for input_layer in model.inputs:
|
|
shapes[input_layer] = input_layer.partial_shape
|
|
shapes[input_layer][1] = -1
|
|
model.reshape(shapes)
|
|
#! [reshape_multiple_inputs] |