From 68ba8873a2f87939616cb4ae99c65d28f22a4e29 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Fri, 15 Sep 2023 14:19:44 +0200 Subject: [PATCH] [Docs] Update python snippets with new properties imports (#19872) --- docs/snippets/cpu/Bfloat16Inference.py | 10 +++-- docs/snippets/cpu/ov_execution_mode.py | 7 ++-- .../cpu/ov_sparse_weights_decompression.py | 4 +- docs/snippets/gpu/compile_model.py | 4 +- docs/snippets/ov_auto.py | 42 +++++++++++-------- docs/snippets/ov_auto_batching.py | 4 +- docs/snippets/ov_caching.py | 6 ++- docs/snippets/ov_denormals.py | 4 +- docs/snippets/ov_hetero.py | 13 ++++-- docs/snippets/ov_multi.py | 20 +++++---- docs/snippets/ov_properties_api.py | 13 +++--- docs/snippets/ov_properties_migration.py | 14 ++++--- 12 files changed, 86 insertions(+), 55 deletions(-) diff --git a/docs/snippets/cpu/Bfloat16Inference.py b/docs/snippets/cpu/Bfloat16Inference.py index 2d3fabfe6c4..7d6516ea7d4 100644 --- a/docs/snippets/cpu/Bfloat16Inference.py +++ b/docs/snippets/cpu/Bfloat16Inference.py @@ -1,8 +1,10 @@ -# Copyright (C) 2022 Intel Corporation +# Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import openvino as ov +import openvino.properties.device as device +import openvino.properties.hint as hints from snippets import get_model @@ -10,16 +12,16 @@ model = get_model() #! [part0] core = ov.Core() -cpu_optimization_capabilities = core.get_property("CPU", ov.properties.device.capabilities()) +cpu_optimization_capabilities = core.get_property("CPU", device.capabilities()) #! [part0] #! [part1] core = ov.Core() compiled_model = core.compile_model(model, "CPU") -inference_precision = core.get_property("CPU", ov.properties.hint.inference_precision()) +inference_precision = core.get_property("CPU", hints.inference_precision()) #! [part1] #! [part2] core = ov.Core() -core.set_property("CPU", {ov.properties.hint.inference_precision(): ov.Type.f32}) +core.set_property("CPU", {hints.inference_precision(): ov.Type.f32}) #! [part2] diff --git a/docs/snippets/cpu/ov_execution_mode.py b/docs/snippets/cpu/ov_execution_mode.py index 2feff2777f7..5476426e77a 100644 --- a/docs/snippets/cpu/ov_execution_mode.py +++ b/docs/snippets/cpu/ov_execution_mode.py @@ -1,18 +1,19 @@ -# Copyright (C) 2023 Intel Corporation +# Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 #! [ov:execution_mode:part0] import openvino as ov +import openvino.properties.hint as hints core = ov.Core() # in case of Accuracy core.set_property( "CPU", - {ov.properties.hint.execution_mode(): ov.properties.hint.ExecutionMode.ACCURACY}, + {hints.execution_mode(): hints.ExecutionMode.ACCURACY}, ) # in case of Performance core.set_property( "CPU", - {ov.properties.hint.execution_mode(): ov.properties.hint.ExecutionMode.PERFORMANCE}, + {hints.execution_mode(): hints.ExecutionMode.PERFORMANCE}, ) #! [ov:execution_mode:part0] diff --git a/docs/snippets/cpu/ov_sparse_weights_decompression.py b/docs/snippets/cpu/ov_sparse_weights_decompression.py index 76de90311d2..9a2a0f14808 100644 --- a/docs/snippets/cpu/ov_sparse_weights_decompression.py +++ b/docs/snippets/cpu/ov_sparse_weights_decompression.py @@ -10,8 +10,10 @@ model = get_model() device_name = "CPU" xml_path = "model.xml" # ! [ov:intel_cpu:sparse_weights_decompression:part0] +import openvino.properties.intel_cpu as intel_cpu + core = ov.Core() -core.set_property("CPU", ov.properties.intel_cpu.sparse_weights_decompression_rate(0.8)) +core.set_property("CPU", intel_cpu.sparse_weights_decompression_rate(0.8)) compiled_model = core.compile_model(model=model, device_name=device_name) # ! [ov:intel_cpu:sparse_weights_decompression:part0] assert compiled_model diff --git a/docs/snippets/gpu/compile_model.py b/docs/snippets/gpu/compile_model.py index 15e67d41ab0..152de661c1f 100644 --- a/docs/snippets/gpu/compile_model.py +++ b/docs/snippets/gpu/compile_model.py @@ -39,12 +39,14 @@ def main(): #! [compile_model_batch_plugin] #! [compile_model_auto_batch] + import openvino.properties.hint as hints + core = ov.Core() compiled_model = core.compile_model( model, "GPU", { - ov.properties.hint.performance_mode(): ov.properties.hint.PerformanceMode.THROUGHPUT, + hints.performance_mode(): hints.PerformanceMode.THROUGHPUT, }, ) #! [compile_model_auto_batch] diff --git a/docs/snippets/ov_auto.py b/docs/snippets/ov_auto.py index e0dc58587a4..35454e0527b 100644 --- a/docs/snippets/ov_auto.py +++ b/docs/snippets/ov_auto.py @@ -1,5 +1,13 @@ -import sys +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + import openvino as ov +import openvino.properties as properties +import openvino.properties.device as device +import openvino.properties.hint as hints +import openvino.properties.log as log + from openvino.inference_engine import IECore from utils import get_model, get_ngraph_model @@ -23,13 +31,13 @@ def part0(): compiled_model = core.compile_model( model=model, device_name="AUTO", - config={ov.properties.device.priorities(): "GPU,CPU"}, + config={device.priorities(): "GPU,CPU"}, ) # Optional # the AUTO plugin is pre-configured (globally) with the explicit option: core.set_property( - device_name="AUTO", properties={ov.properties.device.priorities(): "GPU,CPU"} + device_name="AUTO", properties={device.priorities(): "GPU,CPU"} ) #! [part0] @@ -52,13 +60,13 @@ def part1(): exec_net = ie.load_network( network=net, device_name="AUTO", - config={ov.properties.device.priorities(): "GPU,CPU"}, + config={device.priorities(): "GPU,CPU"}, ) # Optional # the AUTO plugin is pre-configured (globally) with the explicit option: ie.set_config( - config={ov.properties.device.priorities(): "GPU,CPU"}, device_name="AUTO" + config={device.priorities(): "GPU,CPU"}, device_name="AUTO" ) #! [part1] @@ -73,7 +81,7 @@ def part3(): model=model, device_name="AUTO", config={ - ov.properties.hint.performance_mode(): ov.properties.hint.PerformanceMode.THROUGHPUT + hints.performance_mode(): hints.PerformanceMode.THROUGHPUT }, ) # To use the “LATENCY” mode: @@ -81,7 +89,7 @@ def part3(): model=model, device_name="AUTO", config={ - ov.properties.hint.performance_mode(): ov.properties.hint.PerformanceMode.LATENCY + hints.performance_mode(): hints.PerformanceMode.LATENCY }, ) # To use the “CUMULATIVE_THROUGHPUT” mode: @@ -89,7 +97,7 @@ def part3(): model=model, device_name="AUTO", config={ - ov.properties.hint.performance_mode(): ov.properties.hint.PerformanceMode.CUMULATIVE_THROUGHPUT + hints.performance_mode(): hints.PerformanceMode.CUMULATIVE_THROUGHPUT }, ) #! [part3] @@ -103,19 +111,19 @@ def part4(): compiled_model0 = core.compile_model( model=model, device_name="AUTO", - config={ov.properties.hint.model_priority(): ov.properties.hint.Priority.HIGH}, + config={hints.model_priority(): hints.Priority.HIGH}, ) compiled_model1 = core.compile_model( model=model, device_name="AUTO", config={ - ov.properties.hint.model_priority(): ov.properties.hint.Priority.MEDIUM + hints.model_priority(): hints.Priority.MEDIUM }, ) compiled_model2 = core.compile_model( model=model, device_name="AUTO", - config={ov.properties.hint.model_priority(): ov.properties.hint.Priority.LOW}, + config={hints.model_priority(): hints.Priority.LOW}, ) # Assume that all the devices (CPU and GPUs) can support all the networks. # Result: compiled_model0 will use GPU.1, compiled_model1 will use GPU.0, compiled_model2 will use CPU. @@ -124,19 +132,19 @@ def part4(): compiled_model3 = core.compile_model( model=model, device_name="AUTO", - config={ov.properties.hint.model_priority(): ov.properties.hint.Priority.HIGH}, + config={hints.model_priority(): hints.Priority.HIGH}, ) compiled_model4 = core.compile_model( model=model, device_name="AUTO", config={ - ov.properties.hint.model_priority(): ov.properties.hint.Priority.MEDIUM + hints.model_priority(): hints.Priority.MEDIUM }, ) compiled_model5 = core.compile_model( model=model, device_name="AUTO", - config={ov.properties.hint.model_priority(): ov.properties.hint.Priority.LOW}, + config={hints.model_priority(): hints.Priority.LOW}, ) # Assume that all the devices (CPU ang GPUs) can support all the networks. # Result: compiled_model3 will use GPU.1, compiled_model4 will use GPU.1, compiled_model5 will use GPU.0. @@ -161,12 +169,12 @@ def part6(): compiled_model = core.compile_model( model=model, device_name="AUTO", - config={ov.properties.log.level(): ov.properties.log.Level.DEBUG}, + config={log.level(): log.Level.DEBUG}, ) # set log level with set_property and compile model core.set_property( device_name="AUTO", - properties={ov.properties.log.level(): ov.properties.log.Level.DEBUG}, + properties={log.level(): log.Level.DEBUG}, ) compiled_model = core.compile_model(model=model, device_name="AUTO") #! [part6] @@ -179,7 +187,7 @@ def part7(): # compile a model on AUTO and set log level to debug compiled_model = core.compile_model(model=model, device_name="AUTO") # query the runtime target devices on which the inferences are being executed - execution_devices = compiled_model.get_property(ov.properties.execution_devices()) + execution_devices = compiled_model.get_property(properties.execution_devices()) #! [part7] diff --git a/docs/snippets/ov_auto_batching.py b/docs/snippets/ov_auto_batching.py index 5e4f3032f7d..7508c90337c 100644 --- a/docs/snippets/ov_auto_batching.py +++ b/docs/snippets/ov_auto_batching.py @@ -15,8 +15,8 @@ def main(): return 0 # [compile_model] - import openvino.runtime.properties as props - import openvino.runtime.properties.hint as hints + import openvino.properties as props + import openvino.properties.hint as hints config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT} compiled_model = core.compile_model(model, "GPU", config) diff --git a/docs/snippets/ov_caching.py b/docs/snippets/ov_caching.py index ef3fd0b4861..87d9907a8b9 100644 --- a/docs/snippets/ov_caching.py +++ b/docs/snippets/ov_caching.py @@ -5,7 +5,7 @@ from utils import get_path_to_model, get_temp_dir import openvino as ov -import openvino.runtime.properties as props +import openvino.properties as props device_name = 'GNA' model_path = get_path_to_model() @@ -35,6 +35,8 @@ compiled_model = core.compile_model(model=model_path, device_name=device_name) assert compiled_model # ! [ov:caching:part3] +import openvino.properties.device as device + # Find 'EXPORT_IMPORT' capability in supported capabilities -caching_supported = 'EXPORT_IMPORT' in core.get_property(device_name, props.device.capabilities()) +caching_supported = 'EXPORT_IMPORT' in core.get_property(device_name, device.capabilities()) # ! [ov:caching:part3] diff --git a/docs/snippets/ov_denormals.py b/docs/snippets/ov_denormals.py index 45b77483fcc..fe7cc8f797b 100644 --- a/docs/snippets/ov_denormals.py +++ b/docs/snippets/ov_denormals.py @@ -9,8 +9,10 @@ device_name = 'CPU' model = get_model() # ! [ov:intel_cpu:denormals_optimization:part0] +import openvino.properties.intel_cpu as intel_cpu + core = ov.Core() -core.set_property("CPU", ov.properties.intel_cpu.denormals_optimization(True)) +core.set_property("CPU", intel_cpu.denormals_optimization(True)) compiled_model = core.compile_model(model=model, device_name=device_name) # ! [ov:intel_cpu:denormals_optimization:part0] assert compiled_model diff --git a/docs/snippets/ov_hetero.py b/docs/snippets/ov_hetero.py index 95623a1f74a..aa0eac7784f 100644 --- a/docs/snippets/ov_hetero.py +++ b/docs/snippets/ov_hetero.py @@ -1,4 +1,5 @@ import openvino as ov +import openvino as properties from utils import get_model def main(): @@ -35,16 +36,20 @@ def main(): #! [fix_automatic_affinities] #! [compile_model] + import openvino.device as device + compiled_model = core.compile_model(model, device_name="HETERO:GPU,CPU") # device priorities via configuration property compiled_model = core.compile_model( - model, device_name="HETERO", config={ov.properties.device.priorities(): "GPU,CPU"} + model, device_name="HETERO", config={device.priorities(): "GPU,CPU"} ) #! [compile_model] #! [configure_fallback_devices] - core.set_property("HETERO", {ov.properties.device.priorities(): "GPU,CPU"}) - core.set_property("GPU", {ov.properties.enable_profiling(): True}) - core.set_property("CPU", {ov.properties.hint.inference_precision(): ov.Type.f32}) + import openvino.hint as hints + + core.set_property("HETERO", {device.priorities(): "GPU,CPU"}) + core.set_property("GPU", {properties.enable_profiling(): True}) + core.set_property("CPU", {hints.inference_precision(): ov.Type.f32}) compiled_model = core.compile_model(model=model, device_name="HETERO") #! [configure_fallback_devices] diff --git a/docs/snippets/ov_multi.py b/docs/snippets/ov_multi.py index 19ec07d8f9a..bae82aa3d47 100644 --- a/docs/snippets/ov_multi.py +++ b/docs/snippets/ov_multi.py @@ -1,5 +1,9 @@ -import sys +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + import openvino as ov +import openvino.properties as properties +import openvino.properties.device as device from utils import get_model model = get_model() @@ -13,7 +17,7 @@ def MULTI_0(): # Pre-configure MULTI globally with explicitly defined devices, # and compile the model on MULTI using the newly specified default device list. core.set_property( - device_name="MULTI", properties={ov.properties.device.priorities(): "GPU,CPU"} + device_name="MULTI", properties={device.priorities(): "GPU,CPU"} ) compiled_model = core.compile_model(model=model, device_name="MULTI") @@ -24,7 +28,7 @@ def MULTI_0(): compiled_model = core.compile_model( model=model, device_name="MULTI", - config={ov.properties.device.priorities(): "GPU,CPU"}, + config={device.priorities(): "GPU,CPU"}, ) #! [MULTI_0] @@ -34,22 +38,22 @@ def MULTI_1(): core = ov.Core() core.set_property( - device_name="MULTI", properties={ov.properties.device.priorities(): "CPU,GPU"} + device_name="MULTI", properties={device.priorities(): "CPU,GPU"} ) # Once the priority list is set, you can alter it on the fly: # reverse the order of priorities core.set_property( - device_name="MULTI", properties={ov.properties.device.priorities(): "GPU,CPU"} + device_name="MULTI", properties={device.priorities(): "GPU,CPU"} ) # exclude some devices (in this case, CPU) core.set_property( - device_name="MULTI", properties={ov.properties.device.priorities(): "GPU"} + device_name="MULTI", properties={device.priorities(): "GPU"} ) # bring back the excluded devices core.set_property( - device_name="MULTI", properties={ov.properties.device.priorities(): "GPU,CPU"} + device_name="MULTI", properties={device.priorities(): "GPU,CPU"} ) # You cannot add new devices on the fly! @@ -105,7 +109,7 @@ def MULTI_4(): # Optionally, query the optimal number of requests: nireq = compiled_model.get_property( - ov.properties.optimal_number_of_infer_requests() + properties.optimal_number_of_infer_requests() ) #! [MULTI_4] diff --git a/docs/snippets/ov_properties_api.py b/docs/snippets/ov_properties_api.py index cc9645a7682..369e88ad572 100644 --- a/docs/snippets/ov_properties_api.py +++ b/docs/snippets/ov_properties_api.py @@ -3,8 +3,9 @@ # import openvino as ov -import openvino.runtime.properties as props -import openvino.runtime.properties.hint as hints +import openvino.properties as props +import openvino.properties.hint as hints +import openvino.properties.device as device from utils import get_model @@ -16,11 +17,11 @@ def main(): # [get_available_devices] # [hetero_priorities] - device_priorites = core.get_property("HETERO", props.device.priorities()) + device_priorites = core.get_property("HETERO", device.priorities()) # [hetero_priorities] # [cpu_device_name] - cpu_device_name = core.get_property("CPU", props.device.full_name()) + cpu_device_name = core.get_property("CPU", device.full_name()) # [cpu_device_name] model = get_model() @@ -56,8 +57,8 @@ def main(): return 0 # [multi_device] - config = {props.device.priorities(): "CPU,GPU"} + config = {device.priorities(): "CPU,GPU"} compiled_model = core.compile_model(model, "MULTI", config) # change the order of priorities - compiled_model.set_property({props.device.priorities(): "GPU,CPU"}) + compiled_model.set_property({device.priorities(): "GPU,CPU"}) # [multi_device] diff --git a/docs/snippets/ov_properties_migration.py b/docs/snippets/ov_properties_migration.py index a590e3053e8..88addb6a72d 100644 --- a/docs/snippets/ov_properties_migration.py +++ b/docs/snippets/ov_properties_migration.py @@ -3,8 +3,10 @@ # import openvino as ov -import openvino.runtime.properties as props -import openvino.runtime.properties.hint as hints +import openvino.properties as props +import openvino.properties.hint as hints +import openvino.properties.device as device +import openvino.properties.streams as streams from utils import get_model @@ -23,7 +25,7 @@ def main(): # ! [core_compile_model] compiled_model = core.compile_model(model=model, device_name="MULTI", config= { - props.device.priorities(): "GPU,CPU", + device.priorities(): "GPU,CPU", hints.performance_mode(): hints.PerformanceMode.THROUGHPUT, hints.inference_precision(): ov.Type.f32 }) @@ -31,15 +33,15 @@ def main(): # ! [compiled_model_set_property] # turn CPU off for multi-device execution - compiled_model.set_property(properties={props.device.priorities(): "GPU"}) + compiled_model.set_property(properties={device.priorities(): "GPU"}) # ! [compiled_model_set_property] # ! [core_get_rw_property] - num_streams = core.get_property("CPU", props.streams.num()) + num_streams = core.get_property("CPU", streams.num()) # ! [core_get_rw_property] # ! [core_get_ro_property] - full_device_name = core.get_property("CPU", props.device.full_name()) + full_device_name = core.get_property("CPU", device.full_name()) # ! [core_get_ro_property] # ! [compiled_model_get_rw_property]