[PyOV][Docs] Update docs with new way of using properties (#20116)

This commit is contained in:
Anastasia Kuporosova 2023-10-03 06:59:57 +02:00 committed by Alexander Nesterov
parent 64b6de4c43
commit df9fa10312
10 changed files with 64 additions and 64 deletions

View File

@ -12,16 +12,16 @@ model = get_model()
#! [part0]
core = ov.Core()
cpu_optimization_capabilities = core.get_property("CPU", device.capabilities())
cpu_optimization_capabilities = core.get_property("CPU", device.capabilities)
#! [part0]
#! [part1]
core = ov.Core()
compiled_model = core.compile_model(model, "CPU")
inference_precision = core.get_property("CPU", hints.inference_precision())
inference_precision = core.get_property("CPU", hints.inference_precision)
#! [part1]
#! [part2]
core = ov.Core()
core.set_property("CPU", {hints.inference_precision(): ov.Type.f32})
core.set_property("CPU", {hints.inference_precision: ov.Type.f32})
#! [part2]

View File

@ -9,11 +9,11 @@ core = ov.Core()
# in case of Accuracy
core.set_property(
"CPU",
{hints.execution_mode(): hints.ExecutionMode.ACCURACY},
{hints.execution_mode: hints.ExecutionMode.ACCURACY},
)
# in case of Performance
core.set_property(
"CPU",
{hints.execution_mode(): hints.ExecutionMode.PERFORMANCE},
{hints.execution_mode: hints.ExecutionMode.PERFORMANCE},
)
#! [ov:execution_mode:part0]

View File

@ -46,7 +46,7 @@ def main():
model,
"GPU",
{
hints.performance_mode(): hints.PerformanceMode.THROUGHPUT,
hints.performance_mode: hints.PerformanceMode.THROUGHPUT,
},
)
#! [compile_model_auto_batch]

View File

@ -31,13 +31,13 @@ def part0():
compiled_model = core.compile_model(
model=model,
device_name="AUTO",
config={device.priorities(): "GPU,CPU"},
config={device.priorities: "GPU,CPU"},
)
# Optional
# the AUTO plugin is pre-configured (globally) with the explicit option:
core.set_property(
device_name="AUTO", properties={device.priorities(): "GPU,CPU"}
device_name="AUTO", properties={device.priorities: "GPU,CPU"}
)
#! [part0]
@ -60,13 +60,13 @@ def part1():
exec_net = ie.load_network(
network=net,
device_name="AUTO",
config={device.priorities(): "GPU,CPU"},
config={"MULTI_DEVICE_PRIORITIES": "GPU,CPU"},
)
# Optional
# the AUTO plugin is pre-configured (globally) with the explicit option:
ie.set_config(
config={device.priorities(): "GPU,CPU"}, device_name="AUTO"
config={"MULTI_DEVICE_PRIORITIES": "GPU,CPU"}, device_name="AUTO"
)
#! [part1]
@ -81,7 +81,7 @@ def part3():
model=model,
device_name="AUTO",
config={
hints.performance_mode(): hints.PerformanceMode.THROUGHPUT
hints.performance_mode: hints.PerformanceMode.THROUGHPUT
},
)
# To use the “LATENCY” mode:
@ -89,7 +89,7 @@ def part3():
model=model,
device_name="AUTO",
config={
hints.performance_mode(): hints.PerformanceMode.LATENCY
hints.performance_mode: hints.PerformanceMode.LATENCY
},
)
# To use the “CUMULATIVE_THROUGHPUT” mode:
@ -97,7 +97,7 @@ def part3():
model=model,
device_name="AUTO",
config={
hints.performance_mode(): hints.PerformanceMode.CUMULATIVE_THROUGHPUT
hints.performance_mode: hints.PerformanceMode.CUMULATIVE_THROUGHPUT
},
)
#! [part3]
@ -111,19 +111,19 @@ def part4():
compiled_model0 = core.compile_model(
model=model,
device_name="AUTO",
config={hints.model_priority(): hints.Priority.HIGH},
config={hints.model_priority: hints.Priority.HIGH},
)
compiled_model1 = core.compile_model(
model=model,
device_name="AUTO",
config={
hints.model_priority(): hints.Priority.MEDIUM
hints.model_priority: hints.Priority.MEDIUM
},
)
compiled_model2 = core.compile_model(
model=model,
device_name="AUTO",
config={hints.model_priority(): hints.Priority.LOW},
config={hints.model_priority: hints.Priority.LOW},
)
# Assume that all the devices (CPU and GPUs) can support all the networks.
# Result: compiled_model0 will use GPU.1, compiled_model1 will use GPU.0, compiled_model2 will use CPU.
@ -132,19 +132,19 @@ def part4():
compiled_model3 = core.compile_model(
model=model,
device_name="AUTO",
config={hints.model_priority(): hints.Priority.HIGH},
config={hints.model_priority: hints.Priority.HIGH},
)
compiled_model4 = core.compile_model(
model=model,
device_name="AUTO",
config={
hints.model_priority(): hints.Priority.MEDIUM
hints.model_priority: hints.Priority.MEDIUM
},
)
compiled_model5 = core.compile_model(
model=model,
device_name="AUTO",
config={hints.model_priority(): hints.Priority.LOW},
config={hints.model_priority: hints.Priority.LOW},
)
# Assume that all the devices (CPU ang GPUs) can support all the networks.
# Result: compiled_model3 will use GPU.1, compiled_model4 will use GPU.1, compiled_model5 will use GPU.0.
@ -169,12 +169,12 @@ def part6():
compiled_model = core.compile_model(
model=model,
device_name="AUTO",
config={log.level(): log.Level.DEBUG},
config={log.level: log.Level.DEBUG},
)
# set log level with set_property and compile model
core.set_property(
device_name="AUTO",
properties={log.level(): log.Level.DEBUG},
properties={log.level: log.Level.DEBUG},
)
compiled_model = core.compile_model(model=model, device_name="AUTO")
#! [part6]
@ -187,7 +187,7 @@ def part7():
# compile a model on AUTO and set log level to debug
compiled_model = core.compile_model(model=model, device_name="AUTO")
# query the runtime target devices on which the inferences are being executed
execution_devices = compiled_model.get_property(properties.execution_devices())
execution_devices = compiled_model.get_property(properties.execution_devices)
#! [part7]

View File

@ -18,37 +18,37 @@ def main():
import openvino.properties as props
import openvino.properties.hint as hints
config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT}
config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT}
compiled_model = core.compile_model(model, "GPU", config)
# [compile_model]
# [compile_model_no_auto_batching]
# disabling the automatic batching
# leaving intact other configurations options that the device selects for the 'throughput' hint
config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT,
hints.allow_auto_batching(): False}
config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT,
hints.allow_auto_batching: False}
compiled_model = core.compile_model(model, "GPU", config)
# [compile_model_no_auto_batching]
# [query_optimal_num_requests]
# when the batch size is automatically selected by the implementation
# it is important to query/create and run the sufficient requests
config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT}
config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT}
compiled_model = core.compile_model(model, "GPU", config)
num_requests = compiled_model.get_property(props.optimal_number_of_infer_requests())
num_requests = compiled_model.get_property(props.optimal_number_of_infer_requests)
# [query_optimal_num_requests]
# [hint_num_requests]
config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT,
hints.num_requests(): "4"}
config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT,
hints.num_requests: "4"}
# limiting the available parallel slack for the 'throughput'
# so that certain parameters (like selected batch size) are automatically accommodated accordingly
compiled_model = core.compile_model(model, "GPU", config)
# [hint_num_requests]
# [hint_plus_low_level]
config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT,
props.inference_num_threads(): "4"}
config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT,
props.inference_num_threads: "4"}
# limiting the available parallel slack for the 'throughput'
# so that certain parameters (like selected batch size) are automatically accommodated accordingly
compiled_model = core.compile_model(model, "CPU", config)

View File

@ -12,7 +12,7 @@ model_path = get_path_to_model()
path_to_cache_dir = get_temp_dir()
# ! [ov:caching:part0]
core = ov.Core()
core.set_property({props.cache_dir(): path_to_cache_dir})
core.set_property({props.cache_dir: path_to_cache_dir})
model = core.read_model(model=model_path)
compiled_model = core.compile_model(model=model, device_name=device_name)
# ! [ov:caching:part0]
@ -28,7 +28,7 @@ assert compiled_model
# ! [ov:caching:part2]
core = ov.Core()
core.set_property({props.cache_dir(): path_to_cache_dir})
core.set_property({props.cache_dir: path_to_cache_dir})
compiled_model = core.compile_model(model=model_path, device_name=device_name)
# ! [ov:caching:part2]
@ -38,5 +38,5 @@ assert compiled_model
import openvino.properties.device as device
# Find 'EXPORT_IMPORT' capability in supported capabilities
caching_supported = 'EXPORT_IMPORT' in core.get_property(device_name, device.capabilities())
caching_supported = 'EXPORT_IMPORT' in core.get_property(device_name, device.capabilities)
# ! [ov:caching:part3]

View File

@ -41,15 +41,15 @@ def main():
compiled_model = core.compile_model(model, device_name="HETERO:GPU,CPU")
# device priorities via configuration property
compiled_model = core.compile_model(
model, device_name="HETERO", config={device.priorities(): "GPU,CPU"}
model, device_name="HETERO", config={device.priorities: "GPU,CPU"}
)
#! [compile_model]
#! [configure_fallback_devices]
import openvino.hint as hints
core.set_property("HETERO", {device.priorities(): "GPU,CPU"})
core.set_property("GPU", {properties.enable_profiling(): True})
core.set_property("CPU", {hints.inference_precision(): ov.Type.f32})
core.set_property("HETERO", {device.priorities: "GPU,CPU"})
core.set_property("GPU", {properties.enable_profiling: True})
core.set_property("CPU", {hints.inference_precision: ov.Type.f32})
compiled_model = core.compile_model(model=model, device_name="HETERO")
#! [configure_fallback_devices]

View File

@ -17,7 +17,7 @@ def MULTI_0():
# Pre-configure MULTI globally with explicitly defined devices,
# and compile the model on MULTI using the newly specified default device list.
core.set_property(
device_name="MULTI", properties={device.priorities(): "GPU,CPU"}
device_name="MULTI", properties={device.priorities: "GPU,CPU"}
)
compiled_model = core.compile_model(model=model, device_name="MULTI")
@ -28,7 +28,7 @@ def MULTI_0():
compiled_model = core.compile_model(
model=model,
device_name="MULTI",
config={device.priorities(): "GPU,CPU"},
config={device.priorities: "GPU,CPU"},
)
#! [MULTI_0]
@ -38,22 +38,22 @@ def MULTI_1():
core = ov.Core()
core.set_property(
device_name="MULTI", properties={device.priorities(): "CPU,GPU"}
device_name="MULTI", properties={device.priorities: "CPU,GPU"}
)
# Once the priority list is set, you can alter it on the fly:
# reverse the order of priorities
core.set_property(
device_name="MULTI", properties={device.priorities(): "GPU,CPU"}
device_name="MULTI", properties={device.priorities: "GPU,CPU"}
)
# exclude some devices (in this case, CPU)
core.set_property(
device_name="MULTI", properties={device.priorities(): "GPU"}
device_name="MULTI", properties={device.priorities: "GPU"}
)
# bring back the excluded devices
core.set_property(
device_name="MULTI", properties={device.priorities(): "GPU,CPU"}
device_name="MULTI", properties={device.priorities: "GPU,CPU"}
)
# You cannot add new devices on the fly!
@ -109,7 +109,7 @@ def MULTI_4():
# Optionally, query the optimal number of requests:
nireq = compiled_model.get_property(
properties.optimal_number_of_infer_requests()
properties.optimal_number_of_infer_requests
)
#! [MULTI_4]

View File

@ -17,48 +17,48 @@ def main():
# [get_available_devices]
# [hetero_priorities]
device_priorites = core.get_property("HETERO", device.priorities())
device_priorites = core.get_property("HETERO", device.priorities)
# [hetero_priorities]
# [cpu_device_name]
cpu_device_name = core.get_property("CPU", device.full_name())
cpu_device_name = core.get_property("CPU", device.full_name)
# [cpu_device_name]
model = get_model()
# [compile_model_with_property]
config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT,
hints.inference_precision(): ov.Type.f32}
config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT,
hints.inference_precision: ov.Type.f32}
compiled_model = core.compile_model(model, "CPU", config)
# [compile_model_with_property]
# [optimal_number_of_infer_requests]
compiled_model = core.compile_model(model, "CPU")
nireq = compiled_model.get_property(props.optimal_number_of_infer_requests())
nireq = compiled_model.get_property(props.optimal_number_of_infer_requests)
# [optimal_number_of_infer_requests]
# [core_set_property_then_compile]
# latency hint is a default for CPU
core.set_property("CPU", {hints.performance_mode(): hints.PerformanceMode.LATENCY})
core.set_property("CPU", {hints.performance_mode: hints.PerformanceMode.LATENCY})
# compiled with latency configuration hint
compiled_model_latency = core.compile_model(model, "CPU")
# compiled with overriden performance hint value
config = {hints.performance_mode(): hints.PerformanceMode.THROUGHPUT}
config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT}
compiled_model_thrp = core.compile_model(model, "CPU", config)
# [core_set_property_then_compile]
# [inference_num_threads]
compiled_model = core.compile_model(model, "CPU")
nthreads = compiled_model.get_property(props.inference_num_threads())
nthreads = compiled_model.get_property(props.inference_num_threads)
# [inference_num_threads]
if "GPU" not in available_devices:
return 0
# [multi_device]
config = {device.priorities(): "CPU,GPU"}
config = {device.priorities: "CPU,GPU"}
compiled_model = core.compile_model(model, "MULTI", config)
# change the order of priorities
compiled_model.set_property({device.priorities(): "GPU,CPU"})
compiled_model.set_property({device.priorities: "GPU,CPU"})
# [multi_device]

View File

@ -14,7 +14,7 @@ def main():
core = ov.Core()
# ! [core_set_property]
core.set_property(device_name="CPU", properties={props.enable_profiling(): True})
core.set_property(device_name="CPU", properties={props.enable_profiling: True})
# ! [core_set_property]
model = get_model()
@ -25,31 +25,31 @@ def main():
# ! [core_compile_model]
compiled_model = core.compile_model(model=model, device_name="MULTI", config=
{
device.priorities(): "GPU,CPU",
hints.performance_mode(): hints.PerformanceMode.THROUGHPUT,
hints.inference_precision(): ov.Type.f32
device.priorities: "GPU,CPU",
hints.performance_mode: hints.PerformanceMode.THROUGHPUT,
hints.inference_precision: ov.Type.f32
})
# ! [core_compile_model]
# ! [compiled_model_set_property]
# turn CPU off for multi-device execution
compiled_model.set_property(properties={device.priorities(): "GPU"})
compiled_model.set_property(properties={device.priorities: "GPU"})
# ! [compiled_model_set_property]
# ! [core_get_rw_property]
num_streams = core.get_property("CPU", streams.num())
num_streams = core.get_property("CPU", streams.num)
# ! [core_get_rw_property]
# ! [core_get_ro_property]
full_device_name = core.get_property("CPU", device.full_name())
full_device_name = core.get_property("CPU", device.full_name)
# ! [core_get_ro_property]
# ! [compiled_model_get_rw_property]
perf_mode = compiled_model.get_property(hints.performance_mode())
perf_mode = compiled_model.get_property(hints.performance_mode)
# ! [compiled_model_get_rw_property]
# ! [compiled_model_get_ro_property]
nireq = compiled_model.get_property(props.optimal_number_of_infer_requests())
nireq = compiled_model.get_property(props.optimal_number_of_infer_requests)
# ! [compiled_model_get_ro_property]
import ngraph as ng