[PyOV] Import properties from openvino (#19815)

This commit is contained in:
Anastasia Kuporosova 2023-09-15 10:17:44 +02:00 committed by GitHub
parent 619c4bfce1
commit d62348337f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 328 additions and 182 deletions

View File

@ -0,0 +1,36 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Enums
from openvino._pyopenvino.properties import Affinity
# Properties
from openvino._pyopenvino.properties import enable_profiling
from openvino._pyopenvino.properties import cache_dir
from openvino._pyopenvino.properties import auto_batch_timeout
from openvino._pyopenvino.properties import num_streams
from openvino._pyopenvino.properties import inference_num_threads
from openvino._pyopenvino.properties import compilation_num_threads
from openvino._pyopenvino.properties import affinity
from openvino._pyopenvino.properties import force_tbb_terminate
from openvino._pyopenvino.properties import enable_mmap
from openvino._pyopenvino.properties import supported_properties
from openvino._pyopenvino.properties import available_devices
from openvino._pyopenvino.properties import model_name
from openvino._pyopenvino.properties import optimal_number_of_infer_requests
from openvino._pyopenvino.properties import range_for_streams
from openvino._pyopenvino.properties import optimal_batch_size
from openvino._pyopenvino.properties import max_batch_size
from openvino._pyopenvino.properties import range_for_async_infer_requests
from openvino._pyopenvino.properties import execution_devices
from openvino._pyopenvino.properties import loaded_from_cache
# Submodules
from openvino.runtime.properties import hint
from openvino.runtime.properties import intel_cpu
from openvino.runtime.properties import intel_gpu
from openvino.runtime.properties import intel_auto
from openvino.runtime.properties import device
from openvino.runtime.properties import log
from openvino.runtime.properties import streams

View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Enums
from openvino._pyopenvino.properties.device import Type
# Properties
from openvino._pyopenvino.properties.device import priorities
from openvino._pyopenvino.properties.device import id
from openvino._pyopenvino.properties.device import full_name
from openvino._pyopenvino.properties.device import architecture
from openvino._pyopenvino.properties.device import type
from openvino._pyopenvino.properties.device import gops
from openvino._pyopenvino.properties.device import thermal
from openvino._pyopenvino.properties.device import capabilities
from openvino._pyopenvino.properties.device import uuid
from openvino._pyopenvino.properties.device import luid
from openvino._pyopenvino.properties.device import properties
# Classes
from openvino._pyopenvino.properties.device import Capability

View File

@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Enums
from openvino._pyopenvino.properties.hint import Priority
from openvino._pyopenvino.properties.hint import SchedulingCoreType
from openvino._pyopenvino.properties.hint import ExecutionMode
from openvino.runtime.properties.hint.overloads import PerformanceMode
# Properties
from openvino._pyopenvino.properties.hint import inference_precision
from openvino._pyopenvino.properties.hint import model_priority
from openvino._pyopenvino.properties.hint import performance_mode
from openvino._pyopenvino.properties.hint import enable_cpu_pinning
from openvino._pyopenvino.properties.hint import scheduling_core_type
from openvino._pyopenvino.properties.hint import enable_hyper_threading
from openvino._pyopenvino.properties.hint import execution_mode
from openvino._pyopenvino.properties.hint import num_requests
from openvino._pyopenvino.properties.hint import model
from openvino._pyopenvino.properties.hint import allow_auto_batching

View File

@ -0,0 +1,7 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino._pyopenvino.properties.intel_auto import device_bind_buffer
from openvino._pyopenvino.properties.intel_auto import enable_startup_fallback
from openvino._pyopenvino.properties.intel_auto import enable_runtime_fallback

View File

@ -0,0 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino._pyopenvino.properties.intel_cpu import denormals_optimization
from openvino._pyopenvino.properties.intel_cpu import sparse_weights_decompression_rate

View File

@ -0,0 +1,15 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Properties
from openvino._pyopenvino.properties.intel_gpu import device_total_mem_size
from openvino._pyopenvino.properties.intel_gpu import uarch_version
from openvino._pyopenvino.properties.intel_gpu import execution_units_count
from openvino._pyopenvino.properties.intel_gpu import memory_statistics
from openvino._pyopenvino.properties.intel_gpu import enable_loop_unrolling
from openvino._pyopenvino.properties.intel_gpu import disable_winograd_convolution
# Classes
from openvino._pyopenvino.properties.intel_gpu import MemoryType
from openvino._pyopenvino.properties.intel_gpu import CapabilityGPU

View File

@ -0,0 +1,12 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Properties
from openvino._pyopenvino.properties.intel_gpu.hint import queue_throttle
from openvino._pyopenvino.properties.intel_gpu.hint import queue_priority
from openvino._pyopenvino.properties.intel_gpu.hint import host_task_priority
from openvino._pyopenvino.properties.intel_gpu.hint import available_device_mem
# Classes
from openvino._pyopenvino.properties.intel_gpu.hint import ThrottleLevel

View File

@ -0,0 +1,9 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Enums
from openvino._pyopenvino.properties.log import Level
# Properties
from openvino._pyopenvino.properties.log import level

View File

@ -0,0 +1,9 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# Properties
from openvino._pyopenvino.properties.streams import num
# Classes
from openvino._pyopenvino.properties.streams import Num

View File

@ -6,7 +6,16 @@ import pytest
import numpy as np
import os
from openvino import Core, Type, OVAny, properties
import openvino.properties as props
import openvino.properties.hint as hints
import openvino.properties.intel_cpu as intel_cpu
import openvino.properties.intel_auto as intel_auto
import openvino.properties.intel_gpu as intel_gpu
import openvino.properties.intel_gpu.hint as intel_gpu_hint
import openvino.properties.device as device
import openvino.properties.log as log
import openvino.properties.streams as streams
from openvino import Core, Type, OVAny
###
@ -14,22 +23,22 @@ from openvino import Core, Type, OVAny, properties
###
def test_properties_ro_base():
with pytest.raises(TypeError) as e:
properties.supported_properties("something")
props.supported_properties("something")
assert "incompatible function arguments" in str(e.value)
def test_properties_rw_base():
assert properties.cache_dir() == "CACHE_DIR"
assert properties.cache_dir("./test_dir") == ("CACHE_DIR", OVAny("./test_dir"))
assert props.cache_dir() == "CACHE_DIR"
assert props.cache_dir("./test_dir") == ("CACHE_DIR", OVAny("./test_dir"))
with pytest.raises(TypeError) as e:
properties.cache_dir(6)
props.cache_dir(6)
assert "incompatible function arguments" in str(e.value)
def test_deprecation():
with pytest.warns(DeprecationWarning) as w:
_ = properties.hint.PerformanceMode.UNDEFINED
_ = hints.PerformanceMode.UNDEFINED
assert issubclass(w[0].category, DeprecationWarning)
assert "PerformanceMode.UNDEFINED is deprecated and will be removed" in str(w[0].message)
@ -41,63 +50,63 @@ def test_deprecation():
("ov_enum", "expected_values"),
[
(
properties.Affinity,
props.Affinity,
(
(properties.Affinity.NONE, "Affinity.NONE", -1),
(properties.Affinity.CORE, "Affinity.CORE", 0),
(properties.Affinity.NUMA, "Affinity.NUMA", 1),
(properties.Affinity.HYBRID_AWARE, "Affinity.HYBRID_AWARE", 2),
(props.Affinity.NONE, "Affinity.NONE", -1),
(props.Affinity.CORE, "Affinity.CORE", 0),
(props.Affinity.NUMA, "Affinity.NUMA", 1),
(props.Affinity.HYBRID_AWARE, "Affinity.HYBRID_AWARE", 2),
),
),
(
properties.hint.Priority,
hints.Priority,
(
(properties.hint.Priority.LOW, "Priority.LOW", 0),
(properties.hint.Priority.MEDIUM, "Priority.MEDIUM", 1),
(properties.hint.Priority.HIGH, "Priority.HIGH", 2),
(properties.hint.Priority.DEFAULT, "Priority.MEDIUM", 1),
(hints.Priority.LOW, "Priority.LOW", 0),
(hints.Priority.MEDIUM, "Priority.MEDIUM", 1),
(hints.Priority.HIGH, "Priority.HIGH", 2),
(hints.Priority.DEFAULT, "Priority.MEDIUM", 1),
),
),
(
properties.hint.PerformanceMode,
hints.PerformanceMode,
(
(properties.hint.PerformanceMode.UNDEFINED, "PerformanceMode.UNDEFINED", -1),
(properties.hint.PerformanceMode.LATENCY, "PerformanceMode.LATENCY", 1),
(properties.hint.PerformanceMode.THROUGHPUT, "PerformanceMode.THROUGHPUT", 2),
(properties.hint.PerformanceMode.CUMULATIVE_THROUGHPUT, "PerformanceMode.CUMULATIVE_THROUGHPUT", 3),
(hints.PerformanceMode.UNDEFINED, "PerformanceMode.UNDEFINED", -1),
(hints.PerformanceMode.LATENCY, "PerformanceMode.LATENCY", 1),
(hints.PerformanceMode.THROUGHPUT, "PerformanceMode.THROUGHPUT", 2),
(hints.PerformanceMode.CUMULATIVE_THROUGHPUT, "PerformanceMode.CUMULATIVE_THROUGHPUT", 3),
),
),
(
properties.hint.SchedulingCoreType,
hints.SchedulingCoreType,
(
(properties.hint.SchedulingCoreType.ANY_CORE, "SchedulingCoreType.ANY_CORE", 0),
(properties.hint.SchedulingCoreType.PCORE_ONLY, "SchedulingCoreType.PCORE_ONLY", 1),
(properties.hint.SchedulingCoreType.ECORE_ONLY, "SchedulingCoreType.ECORE_ONLY", 2),
(hints.SchedulingCoreType.ANY_CORE, "SchedulingCoreType.ANY_CORE", 0),
(hints.SchedulingCoreType.PCORE_ONLY, "SchedulingCoreType.PCORE_ONLY", 1),
(hints.SchedulingCoreType.ECORE_ONLY, "SchedulingCoreType.ECORE_ONLY", 2),
),
),
(
properties.hint.ExecutionMode,
hints.ExecutionMode,
(
(properties.hint.ExecutionMode.PERFORMANCE, "ExecutionMode.PERFORMANCE", 1),
(properties.hint.ExecutionMode.ACCURACY, "ExecutionMode.ACCURACY", 2),
(hints.ExecutionMode.PERFORMANCE, "ExecutionMode.PERFORMANCE", 1),
(hints.ExecutionMode.ACCURACY, "ExecutionMode.ACCURACY", 2),
),
),
(
properties.device.Type,
device.Type,
(
(properties.device.Type.INTEGRATED, "Type.INTEGRATED", 0),
(properties.device.Type.DISCRETE, "Type.DISCRETE", 1),
(device.Type.INTEGRATED, "Type.INTEGRATED", 0),
(device.Type.DISCRETE, "Type.DISCRETE", 1),
),
),
(
properties.log.Level,
log.Level,
(
(properties.log.Level.NO, "Level.NO", -1),
(properties.log.Level.ERR, "Level.ERR", 0),
(properties.log.Level.WARNING, "Level.WARNING", 1),
(properties.log.Level.INFO, "Level.INFO", 2),
(properties.log.Level.DEBUG, "Level.DEBUG", 3),
(properties.log.Level.TRACE, "Level.TRACE", 4),
(log.Level.NO, "Level.NO", -1),
(log.Level.ERR, "Level.ERR", 0),
(log.Level.WARNING, "Level.WARNING", 1),
(log.Level.INFO, "Level.INFO", 2),
(log.Level.DEBUG, "Level.DEBUG", 3),
(log.Level.TRACE, "Level.TRACE", 4),
),
),
],
@ -117,10 +126,10 @@ def test_properties_enums(ov_enum, expected_values):
[
(
(
properties.intel_gpu.hint.ThrottleLevel.LOW,
properties.intel_gpu.hint.ThrottleLevel.MEDIUM,
properties.intel_gpu.hint.ThrottleLevel.HIGH,
properties.intel_gpu.hint.ThrottleLevel.DEFAULT,
intel_gpu_hint.ThrottleLevel.LOW,
intel_gpu_hint.ThrottleLevel.MEDIUM,
intel_gpu_hint.ThrottleLevel.HIGH,
intel_gpu_hint.ThrottleLevel.DEFAULT,
),
(
("Priority.LOW", 0),
@ -145,28 +154,28 @@ def test_conflicting_enum(proxy_enums, expected_values):
@pytest.mark.parametrize(
("ov_property_ro", "expected_value"),
[
(properties.supported_properties, "SUPPORTED_PROPERTIES"),
(properties.available_devices, "AVAILABLE_DEVICES"),
(properties.model_name, "NETWORK_NAME"),
(properties.optimal_number_of_infer_requests, "OPTIMAL_NUMBER_OF_INFER_REQUESTS"),
(properties.range_for_streams, "RANGE_FOR_STREAMS"),
(properties.optimal_batch_size, "OPTIMAL_BATCH_SIZE"),
(properties.max_batch_size, "MAX_BATCH_SIZE"),
(properties.range_for_async_infer_requests, "RANGE_FOR_ASYNC_INFER_REQUESTS"),
(properties.execution_devices, "EXECUTION_DEVICES"),
(properties.loaded_from_cache, "LOADED_FROM_CACHE"),
(properties.device.full_name, "FULL_DEVICE_NAME"),
(properties.device.architecture, "DEVICE_ARCHITECTURE"),
(properties.device.type, "DEVICE_TYPE"),
(properties.device.gops, "DEVICE_GOPS"),
(properties.device.thermal, "DEVICE_THERMAL"),
(properties.device.uuid, "DEVICE_UUID"),
(properties.device.luid, "DEVICE_LUID"),
(properties.device.capabilities, "OPTIMIZATION_CAPABILITIES"),
(properties.intel_gpu.device_total_mem_size, "GPU_DEVICE_TOTAL_MEM_SIZE"),
(properties.intel_gpu.uarch_version, "GPU_UARCH_VERSION"),
(properties.intel_gpu.execution_units_count, "GPU_EXECUTION_UNITS_COUNT"),
(properties.intel_gpu.memory_statistics, "GPU_MEMORY_STATISTICS"),
(props.supported_properties, "SUPPORTED_PROPERTIES"),
(props.available_devices, "AVAILABLE_DEVICES"),
(props.model_name, "NETWORK_NAME"),
(props.optimal_number_of_infer_requests, "OPTIMAL_NUMBER_OF_INFER_REQUESTS"),
(props.range_for_streams, "RANGE_FOR_STREAMS"),
(props.optimal_batch_size, "OPTIMAL_BATCH_SIZE"),
(props.max_batch_size, "MAX_BATCH_SIZE"),
(props.range_for_async_infer_requests, "RANGE_FOR_ASYNC_INFER_REQUESTS"),
(props.execution_devices, "EXECUTION_DEVICES"),
(props.loaded_from_cache, "LOADED_FROM_CACHE"),
(device.full_name, "FULL_DEVICE_NAME"),
(device.architecture, "DEVICE_ARCHITECTURE"),
(device.type, "DEVICE_TYPE"),
(device.gops, "DEVICE_GOPS"),
(device.thermal, "DEVICE_THERMAL"),
(device.uuid, "DEVICE_UUID"),
(device.luid, "DEVICE_LUID"),
(device.capabilities, "OPTIMIZATION_CAPABILITIES"),
(intel_gpu.device_total_mem_size, "GPU_DEVICE_TOTAL_MEM_SIZE"),
(intel_gpu.uarch_version, "GPU_UARCH_VERSION"),
(intel_gpu.execution_units_count, "GPU_EXECUTION_UNITS_COUNT"),
(intel_gpu.memory_statistics, "GPU_MEMORY_STATISTICS"),
],
)
def test_properties_ro(ov_property_ro, expected_value):
@ -181,7 +190,7 @@ def test_properties_ro(ov_property_ro, expected_value):
("ov_property_rw", "expected_value", "test_values"),
[
(
properties.enable_profiling,
props.enable_profiling,
"PERF_COUNT",
(
(True, True),
@ -191,12 +200,12 @@ def test_properties_ro(ov_property_ro, expected_value):
),
),
(
properties.cache_dir,
props.cache_dir,
"CACHE_DIR",
(("./test_cache", "./test_cache"),),
),
(
properties.auto_batch_timeout,
props.auto_batch_timeout,
"AUTO_BATCH_TIMEOUT",
(
(21, 21),
@ -206,7 +215,7 @@ def test_properties_ro(ov_property_ro, expected_value):
),
),
(
properties.inference_num_threads,
props.inference_num_threads,
"INFERENCE_NUM_THREADS",
(
(-8, -8),
@ -214,30 +223,30 @@ def test_properties_ro(ov_property_ro, expected_value):
),
),
(
properties.compilation_num_threads,
props.compilation_num_threads,
"COMPILATION_NUM_THREADS",
((44, 44),),
),
(
properties.affinity,
props.affinity,
"AFFINITY",
((properties.Affinity.NONE, properties.Affinity.NONE),),
((props.Affinity.NONE, props.Affinity.NONE),),
),
(properties.force_tbb_terminate, "FORCE_TBB_TERMINATE", ((True, True), (False, False))),
(properties.enable_mmap, "ENABLE_MMAP", ((True, True), (False, False))),
(properties.hint.inference_precision, "INFERENCE_PRECISION_HINT", ((Type.f32, Type.f32),)),
(props.force_tbb_terminate, "FORCE_TBB_TERMINATE", ((True, True), (False, False))),
(props.enable_mmap, "ENABLE_MMAP", ((True, True), (False, False))),
(hints.inference_precision, "INFERENCE_PRECISION_HINT", ((Type.f32, Type.f32),)),
(
properties.hint.model_priority,
hints.model_priority,
"MODEL_PRIORITY",
((properties.hint.Priority.LOW, properties.hint.Priority.LOW),),
((hints.Priority.LOW, hints.Priority.LOW),),
),
(
properties.hint.performance_mode,
hints.performance_mode,
"PERFORMANCE_HINT",
((properties.hint.PerformanceMode.UNDEFINED, properties.hint.PerformanceMode.UNDEFINED),),
((hints.PerformanceMode.UNDEFINED, hints.PerformanceMode.UNDEFINED),),
),
(
properties.hint.enable_cpu_pinning,
hints.enable_cpu_pinning,
"ENABLE_CPU_PINNING",
(
(True, True),
@ -247,12 +256,12 @@ def test_properties_ro(ov_property_ro, expected_value):
),
),
(
properties.hint.scheduling_core_type,
hints.scheduling_core_type,
"SCHEDULING_CORE_TYPE",
((properties.hint.SchedulingCoreType.PCORE_ONLY, properties.hint.SchedulingCoreType.PCORE_ONLY),),
((hints.SchedulingCoreType.PCORE_ONLY, hints.SchedulingCoreType.PCORE_ONLY),),
),
(
properties.hint.enable_hyper_threading,
hints.enable_hyper_threading,
"ENABLE_HYPER_THREADING",
(
(True, True),
@ -262,27 +271,27 @@ def test_properties_ro(ov_property_ro, expected_value):
),
),
(
properties.hint.execution_mode,
hints.execution_mode,
"EXECUTION_MODE_HINT",
((properties.hint.ExecutionMode.PERFORMANCE, properties.hint.ExecutionMode.PERFORMANCE),),
((hints.ExecutionMode.PERFORMANCE, hints.ExecutionMode.PERFORMANCE),),
),
(
properties.hint.num_requests,
hints.num_requests,
"PERFORMANCE_HINT_NUM_REQUESTS",
((8, 8),),
),
(
properties.hint.allow_auto_batching,
hints.allow_auto_batching,
"ALLOW_AUTO_BATCHING",
((True, True),),
),
(
properties.intel_cpu.denormals_optimization,
intel_cpu.denormals_optimization,
"CPU_DENORMALS_OPTIMIZATION",
((True, True),),
),
(
properties.intel_cpu.sparse_weights_decompression_rate,
intel_cpu.sparse_weights_decompression_rate,
"CPU_SPARSE_WEIGHTS_DECOMPRESSION_RATE",
(
(0.1, np.float32(0.1)),
@ -290,7 +299,7 @@ def test_properties_ro(ov_property_ro, expected_value):
),
),
(
properties.intel_auto.device_bind_buffer,
intel_auto.device_bind_buffer,
"DEVICE_BIND_BUFFER",
(
(True, True),
@ -300,7 +309,7 @@ def test_properties_ro(ov_property_ro, expected_value):
),
),
(
properties.intel_auto.enable_startup_fallback,
intel_auto.enable_startup_fallback,
"ENABLE_STARTUP_FALLBACK",
(
(True, True),
@ -310,7 +319,7 @@ def test_properties_ro(ov_property_ro, expected_value):
),
),
(
properties.intel_auto.enable_runtime_fallback,
intel_auto.enable_runtime_fallback,
"ENABLE_RUNTIME_FALLBACK",
(
(True, True),
@ -319,39 +328,39 @@ def test_properties_ro(ov_property_ro, expected_value):
(0, False),
),
),
(properties.device.id, "DEVICE_ID", (("0", "0"),)),
(device.id, "DEVICE_ID", (("0", "0"),)),
(
properties.log.level,
log.level,
"LOG_LEVEL",
((properties.log.Level.NO, properties.log.Level.NO),),
((log.Level.NO, log.Level.NO),),
),
(
properties.intel_gpu.enable_loop_unrolling,
intel_gpu.enable_loop_unrolling,
"GPU_ENABLE_LOOP_UNROLLING",
((True, True),),
),
(
properties.intel_gpu.disable_winograd_convolution,
intel_gpu.disable_winograd_convolution,
"GPU_DISABLE_WINOGRAD_CONVOLUTION",
((True, True),),
),
(
properties.intel_gpu.hint.queue_throttle,
intel_gpu_hint.queue_throttle,
"GPU_QUEUE_THROTTLE",
((properties.intel_gpu.hint.ThrottleLevel.LOW, properties.hint.Priority.LOW),),
((intel_gpu_hint.ThrottleLevel.LOW, hints.Priority.LOW),),
),
(
properties.intel_gpu.hint.queue_priority,
intel_gpu_hint.queue_priority,
"GPU_QUEUE_PRIORITY",
((properties.hint.Priority.LOW, properties.hint.Priority.LOW),),
((hints.Priority.LOW, hints.Priority.LOW),),
),
(
properties.intel_gpu.hint.host_task_priority,
intel_gpu_hint.host_task_priority,
"GPU_HOST_TASK_PRIORITY",
((properties.hint.Priority.LOW, properties.hint.Priority.LOW),),
((hints.Priority.LOW, hints.Priority.LOW),),
),
(
properties.intel_gpu.hint.available_device_mem,
intel_gpu_hint.available_device_mem,
"AVAILABLE_DEVICE_MEM_SIZE",
((128, 128),),
),
@ -372,74 +381,74 @@ def test_properties_rw(ov_property_rw, expected_value, test_values):
# Special cases
###
def test_properties_device_priorities():
assert properties.device.priorities() == "MULTI_DEVICE_PRIORITIES"
assert properties.device.priorities("CPU,GPU") == ("MULTI_DEVICE_PRIORITIES", OVAny("CPU,GPU,"))
assert properties.device.priorities("CPU", "GPU") == ("MULTI_DEVICE_PRIORITIES", OVAny("CPU,GPU,"))
assert device.priorities() == "MULTI_DEVICE_PRIORITIES"
assert device.priorities("CPU,GPU") == ("MULTI_DEVICE_PRIORITIES", OVAny("CPU,GPU,"))
assert device.priorities("CPU", "GPU") == ("MULTI_DEVICE_PRIORITIES", OVAny("CPU,GPU,"))
with pytest.raises(TypeError) as e:
value = 6
properties.device.priorities("CPU", value)
device.priorities("CPU", value)
assert f"Incorrect passed value: {value} , expected string values." in str(e.value)
def test_properties_device_properties():
assert properties.device.properties() == "DEVICE_PROPERTIES"
assert device.properties() == "DEVICE_PROPERTIES"
def make_dict(*arg):
return dict( # noqa: C406
[*arg])
def check(value1, value2):
assert properties.device.properties(value1) == ("DEVICE_PROPERTIES", OVAny(value2))
assert device.properties(value1) == ("DEVICE_PROPERTIES", OVAny(value2))
check({"CPU": {properties.streams.num(): 2}},
check({"CPU": {streams.num(): 2}},
{"CPU": {"NUM_STREAMS": 2}})
check({"CPU": make_dict(properties.streams.num(2))},
{"CPU": {"NUM_STREAMS": properties.streams.Num(2)}})
check({"GPU": make_dict(properties.hint.inference_precision(Type.f32))},
check({"CPU": make_dict(streams.num(2))},
{"CPU": {"NUM_STREAMS": streams.Num(2)}})
check({"GPU": make_dict(hints.inference_precision(Type.f32))},
{"GPU": {"INFERENCE_PRECISION_HINT": Type.f32}})
check({"CPU": make_dict(properties.streams.num(2), properties.hint.inference_precision(Type.f32))},
{"CPU": {"INFERENCE_PRECISION_HINT": Type.f32, "NUM_STREAMS": properties.streams.Num(2)}})
check({"CPU": make_dict(properties.streams.num(2), properties.hint.inference_precision(Type.f32)),
"GPU": make_dict(properties.streams.num(1), properties.hint.inference_precision(Type.f16))},
{"CPU": {"INFERENCE_PRECISION_HINT": Type.f32, "NUM_STREAMS": properties.streams.Num(2)},
"GPU": {"INFERENCE_PRECISION_HINT": Type.f16, "NUM_STREAMS": properties.streams.Num(1)}})
check({"CPU": make_dict(streams.num(2), hints.inference_precision(Type.f32))},
{"CPU": {"INFERENCE_PRECISION_HINT": Type.f32, "NUM_STREAMS": streams.Num(2)}})
check({"CPU": make_dict(streams.num(2), hints.inference_precision(Type.f32)),
"GPU": make_dict(streams.num(1), hints.inference_precision(Type.f16))},
{"CPU": {"INFERENCE_PRECISION_HINT": Type.f32, "NUM_STREAMS": streams.Num(2)},
"GPU": {"INFERENCE_PRECISION_HINT": Type.f16, "NUM_STREAMS": streams.Num(1)}})
def test_properties_streams():
# Test extra Num class
assert properties.streams.Num().to_integer() == -1
assert properties.streams.Num(2).to_integer() == 2
assert properties.streams.Num.AUTO.to_integer() == -1
assert properties.streams.Num.NUMA.to_integer() == -2
assert streams.Num().to_integer() == -1
assert streams.Num(2).to_integer() == 2
assert streams.Num.AUTO.to_integer() == -1
assert streams.Num.NUMA.to_integer() == -2
# Test RW property
property_tuple = properties.streams.num(properties.streams.Num.AUTO)
property_tuple = streams.num(streams.Num.AUTO)
assert property_tuple[0] == "NUM_STREAMS"
assert property_tuple[1].value == -1
property_tuple = properties.streams.num(42)
property_tuple = streams.num(42)
assert property_tuple[0] == "NUM_STREAMS"
assert property_tuple[1].value == 42
def test_properties_capability():
assert properties.device.Capability.FP32 == "FP32"
assert properties.device.Capability.BF16 == "BF16"
assert properties.device.Capability.FP16 == "FP16"
assert properties.device.Capability.INT8 == "INT8"
assert properties.device.Capability.INT16 == "INT16"
assert properties.device.Capability.BIN == "BIN"
assert properties.device.Capability.WINOGRAD == "WINOGRAD"
assert properties.device.Capability.EXPORT_IMPORT == "EXPORT_IMPORT"
assert device.Capability.FP32 == "FP32"
assert device.Capability.BF16 == "BF16"
assert device.Capability.FP16 == "FP16"
assert device.Capability.INT8 == "INT8"
assert device.Capability.INT16 == "INT16"
assert device.Capability.BIN == "BIN"
assert device.Capability.WINOGRAD == "WINOGRAD"
assert device.Capability.EXPORT_IMPORT == "EXPORT_IMPORT"
def test_properties_memory_type_gpu():
assert properties.intel_gpu.MemoryType.surface == "GPU_SURFACE"
assert properties.intel_gpu.MemoryType.buffer == "GPU_BUFFER"
assert intel_gpu.MemoryType.surface == "GPU_SURFACE"
assert intel_gpu.MemoryType.buffer == "GPU_BUFFER"
def test_properties_capability_gpu():
assert properties.intel_gpu.CapabilityGPU.HW_MATMUL == "GPU_HW_MATMUL"
assert intel_gpu.CapabilityGPU.HW_MATMUL == "GPU_HW_MATMUL"
def test_properties_hint_model():
@ -448,19 +457,19 @@ def test_properties_hint_model():
model = generate_add_model()
assert properties.hint.model() == "MODEL_PTR"
assert hints.model() == "MODEL_PTR"
property_tuple = properties.hint.model(model)
property_tuple = hints.model(model)
assert property_tuple[0] == "MODEL_PTR"
def test_single_property_setting(device):
core = Core()
core.set_property(device, properties.streams.num(properties.streams.Num.AUTO))
core.set_property(device, streams.num(streams.Num.AUTO))
assert properties.streams.Num.AUTO.to_integer() == -1
assert type(core.get_property(device, properties.streams.num())) == int
assert streams.Num.AUTO.to_integer() == -1
assert type(core.get_property(device, streams.num())) == int
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test")
@ -470,44 +479,44 @@ def test_single_property_setting(device):
# Dict from list of tuples
dict( # noqa: C406
[ # noqa: C406
properties.enable_profiling(True),
properties.cache_dir("./"),
properties.inference_num_threads(9),
properties.affinity(properties.Affinity.NONE),
properties.hint.inference_precision(Type.f32),
properties.hint.performance_mode(properties.hint.PerformanceMode.LATENCY),
properties.hint.enable_cpu_pinning(True),
properties.hint.scheduling_core_type(properties.hint.SchedulingCoreType.PCORE_ONLY),
properties.hint.enable_hyper_threading(True),
properties.hint.num_requests(12),
properties.streams.num(5),
props.enable_profiling(True),
props.cache_dir("./"),
props.inference_num_threads(9),
props.affinity(props.Affinity.NONE),
hints.inference_precision(Type.f32),
hints.performance_mode(hints.PerformanceMode.LATENCY),
hints.enable_cpu_pinning(True),
hints.scheduling_core_type(hints.SchedulingCoreType.PCORE_ONLY),
hints.enable_hyper_threading(True),
hints.num_requests(12),
streams.num(5),
],
),
# Pure dict
{
properties.enable_profiling(): True,
properties.cache_dir(): "./",
properties.inference_num_threads(): 9,
properties.affinity(): properties.Affinity.NONE,
properties.hint.inference_precision(): Type.f32,
properties.hint.performance_mode(): properties.hint.PerformanceMode.LATENCY,
properties.hint.enable_cpu_pinning(): True,
properties.hint.scheduling_core_type(): properties.hint.SchedulingCoreType.PCORE_ONLY,
properties.hint.enable_hyper_threading(): True,
properties.hint.num_requests(): 12,
properties.streams.num(): 5,
props.enable_profiling(): True,
props.cache_dir(): "./",
props.inference_num_threads(): 9,
props.affinity(): props.Affinity.NONE,
hints.inference_precision(): Type.f32,
hints.performance_mode(): hints.PerformanceMode.LATENCY,
hints.enable_cpu_pinning(): True,
hints.scheduling_core_type(): hints.SchedulingCoreType.PCORE_ONLY,
hints.enable_hyper_threading(): True,
hints.num_requests(): 12,
streams.num(): 5,
},
# Mixed dict
{
properties.enable_profiling(): True,
props.enable_profiling(): True,
"CACHE_DIR": "./",
properties.inference_num_threads(): 9,
properties.affinity(): "NONE",
props.inference_num_threads(): 9,
props.affinity(): "NONE",
"INFERENCE_PRECISION_HINT": Type.f32,
properties.hint.performance_mode(): properties.hint.PerformanceMode.LATENCY,
properties.hint.scheduling_core_type(): properties.hint.SchedulingCoreType.PCORE_ONLY,
properties.hint.num_requests(): 12,
"NUM_STREAMS": properties.streams.Num(5),
hints.performance_mode(): hints.PerformanceMode.LATENCY,
hints.scheduling_core_type(): hints.SchedulingCoreType.PCORE_ONLY,
hints.num_requests(): 12,
"NUM_STREAMS": streams.Num(5),
"ENABLE_MMAP": "NO",
},
],
@ -521,17 +530,17 @@ def test_core_cpu_properties(properties_to_set):
core.set_property(properties_to_set)
# RW properties
assert core.get_property("CPU", properties.enable_profiling()) is True
assert core.get_property("CPU", properties.cache_dir()) == "./"
assert core.get_property("CPU", properties.inference_num_threads()) == 9
assert core.get_property("CPU", properties.affinity()) == properties.Affinity.NONE
assert core.get_property("CPU", properties.streams.num()) == 5
assert core.get_property("CPU", props.enable_profiling()) is True
assert core.get_property("CPU", props.cache_dir()) == "./"
assert core.get_property("CPU", props.inference_num_threads()) == 9
assert core.get_property("CPU", props.affinity()) == props.Affinity.NONE
assert core.get_property("CPU", streams.num()) == 5
# RO properties
assert type(core.get_property("CPU", properties.supported_properties())) == dict
assert type(core.get_property("CPU", properties.available_devices())) == list
assert type(core.get_property("CPU", properties.optimal_number_of_infer_requests())) == int
assert type(core.get_property("CPU", properties.range_for_streams())) == tuple
assert type(core.get_property("CPU", properties.range_for_async_infer_requests())) == tuple
assert type(core.get_property("CPU", properties.device.full_name())) == str
assert type(core.get_property("CPU", properties.device.capabilities())) == list
assert type(core.get_property("CPU", props.supported_properties())) == dict
assert type(core.get_property("CPU", props.available_devices())) == list
assert type(core.get_property("CPU", props.optimal_number_of_infer_requests())) == int
assert type(core.get_property("CPU", props.range_for_streams())) == tuple
assert type(core.get_property("CPU", props.range_for_async_infer_requests())) == tuple
assert type(core.get_property("CPU", device.full_name())) == str
assert type(core.get_property("CPU", device.capabilities())) == list