diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml index bfed994a137..e553f6d775b 100644 --- a/.ci/azure/linux.yml +++ b/.ci/azure/linux.yml @@ -184,7 +184,10 @@ jobs: continueOnError: false # Skip test_onnx/test_zoo_models and test_onnx/test_backend due to long execution time - - script: . $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph --junitxml=TEST-Pyngraph.xml --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py + - script: | + export DATA_PATH=$(MODELS_PATH) + export MODELS_PATH=$(MODELS_PATH) + . $(SETUPVARS) -pyver 3.8 && python3 -m pytest -s $(INSTALL_TEST_DIR)/pyngraph --junitxml=TEST-Pyngraph.xml --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_zoo_models.py --ignore=$(INSTALL_TEST_DIR)/pyngraph/tests/test_onnx/test_backend.py displayName: 'nGraph Python Bindings Tests' continueOnError: false diff --git a/inference-engine/ie_bridges/python/cmake/utils/merge_init_files.py b/inference-engine/ie_bridges/python/cmake/utils/merge_init_files.py new file mode 100755 index 00000000000..2e1b3914b44 --- /dev/null +++ b/inference-engine/ie_bridges/python/cmake/utils/merge_init_files.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import os + + +parser = argparse.ArgumentParser() + +parser.add_argument('-i', '--input_file', type=str, required=True) +parser.add_argument('-o', '--output_file', type=str, required=True) + +args = parser.parse_args() + +mode = 'a' if os.path.exists(args.output_file) else 'w' +with open(args.input_file) as input_file, open(args.output_file, mode) as output_file: + lines = input_file.readlines() + count = 0 + copied_lines = ["\n"] + for idx, line in enumerate(lines): + if line[0] == '#' and idx <= 2: + count += 1 + if idx > 2 and count == 3: + copied_lines.append(line) + output_file.writelines(copied_lines) diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt index c55ae9cd1e8..bd90ae05f42 100644 --- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt +++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt @@ -60,13 +60,18 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") target_compile_options(${TARGET_NAME} PRIVATE "-Wno-error=register") endif() +find_package(Python COMPONENTS Interpreter Development) + # perform copy +# merge __init__.py files of new and old python api add_custom_command(TARGET ${TARGET_NAME} POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/requirements.txt ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../../requirements.txt COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/requirements.txt ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/../../requirements.txt - COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../__init__.py + COMMAND ${Python_EXECUTABLE} ${PYTHON_BRIDGE_SRC_ROOT}/cmake/utils/merge_init_files.py + -i ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/__init__.py + -o ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../__init__.py ) # install diff --git a/runtime/bindings/python/CMakeLists.txt b/runtime/bindings/python/CMakeLists.txt index 4e1b972c17a..c3e3786579b 100644 --- a/runtime/bindings/python/CMakeLists.txt +++ b/runtime/bindings/python/CMakeLists.txt @@ -6,10 +6,11 @@ cmake_minimum_required (VERSION 3.13) project(OpenVINOPython DESCRIPTION "OpenVINO Runtime Python bindings") +add_subdirectory(thirdparty/pybind11 EXCLUDE_FROM_ALL) + set(LIBRARY_OUTPUT_DIRECTORY_BIN ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) -add_subdirectory(thirdparty/pybind11 EXCLUDE_FROM_ALL) -add_subdirectory(src/compatibility) +add_subdirectory(src) if(NGRAPH_UNIT_TEST_ENABLE) add_subdirectory(tests/mock/mock_py_ngraph_frontend) diff --git a/runtime/bindings/python/setup.py b/runtime/bindings/python/setup.py index 0cd8167f263..17af4b09475 100644 --- a/runtime/bindings/python/setup.py +++ b/runtime/bindings/python/setup.py @@ -22,7 +22,7 @@ OPENVINO_ROOT_DIR = os.path.normpath(os.path.join(PYNGRAPH_ROOT_DIR, "../../..") # Change current working directory to runtime/bindings/python os.chdir(PYNGRAPH_ROOT_DIR) -NGRAPH_LIBS = ["ngraph", "onnx_ngraph_frontend"] +NGRAPH_LIBS = ["ngraph", "onnx_ngraph_frontend", "openvino"] packages = [ "ngraph", @@ -40,8 +40,10 @@ packages = [ "ngraph.impl.op.util", "ngraph.impl.passes", "ngraph.frontend", + "openvino" ] + data_files = [] with open(os.path.join(PYNGRAPH_ROOT_DIR, "requirements.txt")) as req: @@ -49,13 +51,12 @@ with open(os.path.join(PYNGRAPH_ROOT_DIR, "requirements.txt")) as req: cmdclass = {} for super_class in [_build, _install, _develop]: - class command(super_class): """Add user options for build, install and develop commands.""" cmake_build_types = ["Release", "Debug", "RelWithDebInfo", "MinSizeRel"] user_options = super_class.user_options + [ - ("config=", None, "Build configuration [{}].".format("|".join(cmake_build_types))), + ("config=", None, f"Build configuration [{'|'.join(cmake_build_types)}]."), ("jobs=", None, "Specifies the number of jobs to use with make."), ("cmake-args=", None, "Additional options to be passed to CMake.") ] @@ -84,7 +85,7 @@ class BuildCMakeExt(build_ext): cmake_build_types = ["Release", "Debug", "RelWithDebInfo", "MinSizeRel"] user_options = [ - ("config=", None, "Build configuration [{}].".format("|".join(cmake_build_types))), + ("config=", None, f"Build configuration [{'|'.join(cmake_build_types)}]."), ("jobs=", None, "Specifies the number of jobs to use with make."), ("cmake-args=", None, "Additional options to be passed to CMake.") ] @@ -120,7 +121,7 @@ class BuildCMakeExt(build_ext): self.debug = True if "Debug" == self.config else False except ValueError: self.announce("Unsupported CMAKE_BUILD_TYPE value: " + self.config, level=4) - self.announce("Supported values: {}".format(", ".join(self.cmake_build_types)), level=4) + self.announce(f"Supported values: {', '.join(self.cmake_build_types)}", level=4) sys.exit(1) if self.jobs is None and os.getenv("MAX_JOBS") is not None: self.jobs = os.getenv("MAX_JOBS") @@ -129,6 +130,8 @@ class BuildCMakeExt(build_ext): def run(self): """Run CMake build for modules.""" for extension in self.extensions: + if extension.name == "pyopenvino": + self.build_cmake(extension) if extension.name == "_pyngraph": self.build_cmake(extension) @@ -155,7 +158,8 @@ class BuildCMakeExt(build_ext): self.announce("Configuring cmake project", level=3) ext_args = self.cmake_args.split() if self.cmake_args else [] self.spawn(["cmake", "-S" + root_dir, "-B" + self.build_temp, - "-DCMAKE_BUILD_TYPE={}".format(self.config), + f"-DCMAKE_BUILD_TYPE={self.config}", + f"-DInferenceEngine_DIR={os.path.join(OPENVINO_ROOT_DIR, 'build')}", "-DENABLE_PYTHON=ON", "-DNGRAPH_ONNX_FRONTEND_ENABLE=ON"] + ext_args) @@ -165,9 +169,8 @@ class BuildCMakeExt(build_ext): "--config", self.config, "-j", str(self.jobs)]) self.announce("Moving built python module to " + str(extension_path), level=3) - pyds = list(glob.iglob("{0}/**/{1}*{2}".format(bin_dir, - extension.name, - sysconfig.get_config_var("EXT_SUFFIX")), recursive=True)) + pyds = list(glob.iglob(f"{bin_dir}/**/{extension.name}*{sysconfig.get_config_var('EXT_SUFFIX')}", + recursive=True)) for name in pyds: self.announce("copy " + os.path.join(name), level=3) shutil.copy(name, extension_path) @@ -193,9 +196,10 @@ class InstallCMakeLibs(install_lib): lib_ext = ".dll" libs = [] + print(root_dir) for ngraph_lib in NGRAPH_LIBS: - libs.extend(list(glob.iglob("{0}/**/*{1}*{2}".format(root_dir, - ngraph_lib, lib_ext), recursive=True))) + libs.extend(list(glob.iglob(f"{root_dir}/**/*{ngraph_lib}*{lib_ext}", recursive=True))) + print(libs) if not libs: raise Exception("NGraph libs not found.") @@ -216,8 +220,8 @@ setup( author="Intel Corporation", url="https://github.com/openvinotoolkit/openvino", license="License :: OSI Approved :: Apache Software License", - ext_modules=[CMakeExtension(name="_pyngraph")], - package_dir={"": "src/compatibility"}, + ext_modules=[CMakeExtension(name="_pyngraph"), CMakeExtension(name="pyopenvino")], + package_dir={"": "src/compatibility", "openvino": "src/openvino"}, packages=packages, install_requires=requirements, data_files=data_files, diff --git a/runtime/bindings/python/src/CMakeLists.txt b/runtime/bindings/python/src/CMakeLists.txt new file mode 100644 index 00000000000..c7e71113d99 --- /dev/null +++ b/runtime/bindings/python/src/CMakeLists.txt @@ -0,0 +1,6 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +add_subdirectory(compatibility) +add_subdirectory(pyopenvino) diff --git a/runtime/bindings/python/src/openvino/__init__.py b/runtime/bindings/python/src/openvino/__init__.py new file mode 100644 index 00000000000..f5222eba684 --- /dev/null +++ b/runtime/bindings/python/src/openvino/__init__.py @@ -0,0 +1,40 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.pyopenvino import Core +from openvino.pyopenvino import IENetwork +from openvino.pyopenvino import ExecutableNetwork +from openvino.pyopenvino import Version +from openvino.pyopenvino import Parameter +from openvino.pyopenvino import InputInfoPtr +from openvino.pyopenvino import InputInfoCPtr +from openvino.pyopenvino import DataPtr +from openvino.pyopenvino import TensorDesc +from openvino.pyopenvino import get_version +from openvino.pyopenvino import StatusCode +from openvino.pyopenvino import InferQueue +from openvino.pyopenvino import InferRequest # TODO: move to ie_api? +from openvino.pyopenvino import Blob +from openvino.pyopenvino import PreProcessInfo +from openvino.pyopenvino import MeanVariant +from openvino.pyopenvino import ResizeAlgorithm +from openvino.pyopenvino import ColorFormat +from openvino.pyopenvino import PreProcessChannel + +from openvino.ie_api import BlobWrapper +from openvino.ie_api import infer +from openvino.ie_api import async_infer +from openvino.ie_api import get_result +from openvino.ie_api import blob_from_file + +# Patching for Blob class +# flake8: noqa: F811 +Blob = BlobWrapper +# Patching ExecutableNetwork +ExecutableNetwork.infer = infer +# Patching InferRequest +InferRequest.infer = infer +InferRequest.async_infer = async_infer +InferRequest.get_result = get_result +# Patching InferQueue +InferQueue.async_infer = async_infer diff --git a/runtime/bindings/python/src/openvino/ie_api.py b/runtime/bindings/python/src/openvino/ie_api.py new file mode 100644 index 00000000000..061318d29f9 --- /dev/null +++ b/runtime/bindings/python/src/openvino/ie_api.py @@ -0,0 +1,114 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from openvino.pyopenvino import TBlobFloat32 +from openvino.pyopenvino import TBlobFloat64 +from openvino.pyopenvino import TBlobInt64 +from openvino.pyopenvino import TBlobUint64 +from openvino.pyopenvino import TBlobInt32 +from openvino.pyopenvino import TBlobUint32 +from openvino.pyopenvino import TBlobInt16 +from openvino.pyopenvino import TBlobUint16 +from openvino.pyopenvino import TBlobInt8 +from openvino.pyopenvino import TBlobUint8 +from openvino.pyopenvino import TensorDesc +from openvino.pyopenvino import InferRequest + +import numpy as np + + +precision_map = {"FP32": np.float32, + "FP64": np.float64, + "FP16": np.int16, + "BF16": np.int16, + "I16": np.int16, + "I8": np.int8, + "BIN": np.int8, + "I32": np.int32, + "I64": np.int64, + "U8": np.uint8, + "BOOL": np.uint8, + "U16": np.uint16, + "U32": np.uint32, + "U64": np.uint64} + + +def normalize_inputs(py_dict: dict) -> dict: + """Normalize a dictionary of inputs to contiguous numpy arrays.""" + return {k: (np.ascontiguousarray(v) if isinstance(v, np.ndarray) else v) + for k, v in py_dict.items()} + +# flake8: noqa: D102 +def infer(request: InferRequest, inputs: dict = None) -> dict: + results = request._infer(inputs=normalize_inputs(inputs if inputs is not None else {})) + return {name: (blob.buffer.copy()) for name, blob in results.items()} + +# flake8: noqa: D102 +def get_result(request: InferRequest, name: str) -> np.ndarray: + return request.get_blob(name).buffer.copy() + +# flake8: noqa: D102 +def async_infer(request: InferRequest, inputs: dict = None, userdata=None) -> None: # type: ignore + request._async_infer(inputs=normalize_inputs(inputs if inputs is not None else {}), + userdata=userdata) + +# flake8: noqa: C901 +# Dispatch Blob types on Python side. +class BlobWrapper: + def __new__(cls, tensor_desc: TensorDesc, arr: np.ndarray = None): # type: ignore + arr_size = 0 + precision = "" + if tensor_desc is not None: + tensor_desc_size = int(np.prod(tensor_desc.dims)) + precision = tensor_desc.precision + if arr is not None: + arr = np.array(arr) # Keeping array as numpy array + arr_size = int(np.prod(arr.shape)) + if np.isfortran(arr): + arr = arr.ravel(order="F") + else: + arr = arr.ravel(order="C") + if arr_size != tensor_desc_size: + raise AttributeError(f"Number of elements in provided numpy array " + f"{arr_size} and required by TensorDesc " + f"{tensor_desc_size} are not equal") + if arr.dtype != precision_map[precision]: + raise ValueError(f"Data type {arr.dtype} of provided numpy array " + f"doesn't match to TensorDesc precision {precision}") + if not arr.flags["C_CONTIGUOUS"]: + arr = np.ascontiguousarray(arr) + elif arr is None: + arr = np.empty(0, dtype=precision_map[precision]) + else: + raise AttributeError("TensorDesc can't be None") + + if precision in ["FP32"]: + return TBlobFloat32(tensor_desc, arr, arr_size) + elif precision in ["FP64"]: + return TBlobFloat64(tensor_desc, arr, arr_size) + elif precision in ["FP16", "BF16"]: + return TBlobInt16(tensor_desc, arr.view(dtype=np.int16), arr_size) + elif precision in ["I64"]: + return TBlobInt64(tensor_desc, arr, arr_size) + elif precision in ["U64"]: + return TBlobUint64(tensor_desc, arr, arr_size) + elif precision in ["I32"]: + return TBlobInt32(tensor_desc, arr, arr_size) + elif precision in ["U32"]: + return TBlobUint32(tensor_desc, arr, arr_size) + elif precision in ["I16"]: + return TBlobInt16(tensor_desc, arr, arr_size) + elif precision in ["U16"]: + return TBlobUint16(tensor_desc, arr, arr_size) + elif precision in ["I8", "BIN"]: + return TBlobInt8(tensor_desc, arr, arr_size) + elif precision in ["U8", "BOOL"]: + return TBlobUint8(tensor_desc, arr, arr_size) + else: + raise AttributeError(f"Unsupported precision {precision} for Blob") + +# flake8: noqa: D102 +def blob_from_file(path_to_bin_file: str) -> BlobWrapper: + array = np.fromfile(path_to_bin_file, dtype=np.uint8) + tensor_desc = TensorDesc("U8", array.shape, "C") + return BlobWrapper(tensor_desc, array) diff --git a/runtime/bindings/python/src/pyopenvino/CMakeLists.txt b/runtime/bindings/python/src/pyopenvino/CMakeLists.txt new file mode 100644 index 00000000000..305a5687c0f --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/CMakeLists.txt @@ -0,0 +1,63 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +project (pyopenvino) + +if(NOT DEFINED OpenVINO_SOURCE_DIR) + find_package(OpenVINO REQUIRED) +endif() + +# PYTHON_VERSION_MAJOR and PYTHON_VERSION_MINOR are defined inside pybind11 +set(PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}) +message(STATUS "Python version=${PYTHON_VERSION}") + +if(OpenVINO_SOURCE_DIR) + if(WIN32) + set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python_api/${PYTHON_VERSION}/openvino) + else() + set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/${PYTHON_VERSION}/openvino) + endif() + + set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) + set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) +endif() + +# create target + +file(GLOB_RECURSE SOURCES *.cpp) + +pybind11_add_module(${PROJECT_NAME} MODULE ${SOURCES}) + +target_include_directories(${PROJECT_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/..") +target_link_libraries(${PROJECT_NAME} PRIVATE openvino::runtime) + +# perform copy +if(OpenVINO_SOURCE_DIR) + add_custom_command(TARGET ${PROJECT_NAME} + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_SOURCE_DIR}/../openvino ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} + ) +endif() + +if(OpenVINO_SOURCE_DIR OR InferenceEngineDeveloperPackage_FOUND) + if(COMMAND ie_python_minimal_api) + ie_python_minimal_api(${PROJECT_NAME}) + endif() + + add_clang_format_target(${PROJECT_NAME}_clang FOR_TARGETS ${PROJECT_NAME}) + + ie_cpack_add_component(pyopenvino_${PYTHON_VERSION}) + + install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../openvino + DESTINATION python/${PYTHON_VERSION} + COMPONENT pyopenvino_${PYTHON_VERSION} + USE_SOURCE_PERMISSIONS) + + install(TARGETS ${PROJECT_NAME} + DESTINATION python/${PYTHON_VERSION}/openvino + COMPONENT pyopenvino_${PYTHON_VERSION}) + + ie_cpack(pyopenvino_${PYTHON_VERSION}) +endif() diff --git a/runtime/bindings/python/src/pyopenvino/core/common.cpp b/runtime/bindings/python/src/pyopenvino/core/common.cpp new file mode 100644 index 00000000000..60ab04903f2 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/common.cpp @@ -0,0 +1,267 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common.hpp" + +#include + +namespace Common { +namespace { +const std::unordered_map layout_int_to_str_map = {{0, "ANY"}, + {1, "NCHW"}, + {2, "NHWC"}, + {3, "NCDHW"}, + {4, "NDHWC"}, + {64, "OIHW"}, + {95, "SCALAR"}, + {96, "C"}, + {128, "CHW"}, + {192, "HW"}, + {193, "NC"}, + {194, "CN"}, + {200, "BLOCKED"}}; + +const std::unordered_map layout_str_to_enum = { + {"ANY", InferenceEngine::Layout::ANY}, + {"NHWC", InferenceEngine::Layout::NHWC}, + {"NCHW", InferenceEngine::Layout::NCHW}, + {"NCDHW", InferenceEngine::Layout::NCDHW}, + {"NDHWC", InferenceEngine::Layout::NDHWC}, + {"OIHW", InferenceEngine::Layout::OIHW}, + {"GOIHW", InferenceEngine::Layout::GOIHW}, + {"OIDHW", InferenceEngine::Layout::OIDHW}, + {"GOIDHW", InferenceEngine::Layout::GOIDHW}, + {"SCALAR", InferenceEngine::Layout::SCALAR}, + {"C", InferenceEngine::Layout::C}, + {"CHW", InferenceEngine::Layout::CHW}, + {"HW", InferenceEngine::Layout::HW}, + {"NC", InferenceEngine::Layout::NC}, + {"CN", InferenceEngine::Layout::CN}, + {"BLOCKED", InferenceEngine::Layout::BLOCKED}}; +} // namespace + +InferenceEngine::Layout get_layout_from_string(const std::string& layout) { + return layout_str_to_enum.at(layout); +} + +const std::string& get_layout_from_enum(const InferenceEngine::Layout& layout) { + return layout_int_to_str_map.at(layout); +} + +PyObject* parse_parameter(const InferenceEngine::Parameter& param) { + // Check for std::string + if (param.is()) { + return PyUnicode_FromString(param.as().c_str()); + } + // Check for int + else if (param.is()) { + auto val = param.as(); + return PyLong_FromLong((long)val); + } + // Check for unsinged int + else if (param.is()) { + auto val = param.as(); + return PyLong_FromLong((unsigned long)val); + } + // Check for float + else if (param.is()) { + auto val = param.as(); + return PyFloat_FromDouble((double)val); + } + // Check for bool + else if (param.is()) { + auto val = param.as(); + return val ? Py_True : Py_False; + } + // Check for std::vector + else if (param.is>()) { + auto val = param.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyObject* str_val = PyUnicode_FromString(it.c_str()); + PyList_Append(list, str_val); + } + return list; + } + // Check for std::vector + else if (param.is>()) { + auto val = param.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyList_Append(list, PyLong_FromLong(it)); + } + return list; + } + // Check for std::vector + else if (param.is>()) { + auto val = param.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyList_Append(list, PyLong_FromLong(it)); + } + return list; + } + // Check for std::vector + else if (param.is>()) { + auto val = param.as>(); + PyObject* list = PyList_New(0); + for (const auto& it : val) { + PyList_Append(list, PyFloat_FromDouble((double)it)); + } + return list; + } + // Check for std::tuple + else if (param.is>()) { + auto val = param.as>(); + PyObject* tuple = PyTuple_New(2); + PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); + PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); + return tuple; + } + // Check for std::tuple + else if (param.is>()) { + auto val = param.as>(); + PyObject* tuple = PyTuple_New(3); + PyTuple_SetItem(tuple, 0, PyLong_FromUnsignedLong((unsigned long)std::get<0>(val))); + PyTuple_SetItem(tuple, 1, PyLong_FromUnsignedLong((unsigned long)std::get<1>(val))); + PyTuple_SetItem(tuple, 2, PyLong_FromUnsignedLong((unsigned long)std::get<2>(val))); + return tuple; + } + // Check for std::map + else if (param.is>()) { + auto val = param.as>(); + PyObject* dict = PyDict_New(); + for (const auto& it : val) { + PyDict_SetItemString(dict, it.first.c_str(), PyUnicode_FromString(it.second.c_str())); + } + return dict; + } + // Check for std::map + else if (param.is>()) { + auto val = param.as>(); + PyObject* dict = PyDict_New(); + for (const auto& it : val) { + PyDict_SetItemString(dict, it.first.c_str(), PyLong_FromLong((long)it.second)); + } + return dict; + } else { + PyErr_SetString(PyExc_TypeError, "Failed to convert parameter to Python representation!"); + return (PyObject*)NULL; + } +} + +bool is_TBlob(const py::handle& blob) { + if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else if (py::isinstance>(blob)) { + return true; + } else { + return false; + } +} + +const std::shared_ptr cast_to_blob(const py::handle& blob) { + if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else if (py::isinstance>(blob)) { + return blob.cast>&>(); + } else { + IE_THROW() << "Unsupported data type for when casting to blob!"; + // return nullptr; + } +} + +void blob_from_numpy(const py::handle& arr, InferenceEngine::Blob::Ptr blob) { + if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else if (py::isinstance>(arr)) { + Common::fill_blob(arr, blob); + } else { + IE_THROW() << "Unsupported data type for when filling blob!"; + } +} + +void set_request_blobs(InferenceEngine::InferRequest& request, const py::dict& dictonary) { + for (auto&& pair : dictonary) { + const std::string& name = pair.first.cast(); + if (py::isinstance(pair.second)) { + Common::blob_from_numpy(pair.second, request.GetBlob(name)); + } else if (is_TBlob(pair.second)) { + request.SetBlob(name, Common::cast_to_blob(pair.second)); + } else { + IE_THROW() << "Unable to set blob " << name << "!"; + } + } +} + +uint32_t get_optimal_number_of_requests(const InferenceEngine::ExecutableNetwork& actual) { + try { + auto parameter_value = actual.GetMetric(METRIC_KEY(SUPPORTED_METRICS)); + auto supported_metrics = parameter_value.as>(); + const std::string key = METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS); + if (std::find(supported_metrics.begin(), supported_metrics.end(), key) != supported_metrics.end()) { + parameter_value = actual.GetMetric(key); + if (parameter_value.is()) + return parameter_value.as(); + else + IE_THROW() << "Unsupported format for " << key << "!" + << " Please specify number of infer requests directly!"; + } else { + IE_THROW() << "Can't load network: " << key << " is not supported!" + << " Please specify number of infer requests directly!"; + } + } catch (const std::exception& ex) { + IE_THROW() << "Can't load network: " << ex.what() << " Please specify number of infer requests directly!"; + } +} + +}; // namespace Common diff --git a/runtime/bindings/python/src/pyopenvino/core/common.hpp b/runtime/bindings/python/src/pyopenvino/core/common.hpp new file mode 100644 index 00000000000..a16edfab23a --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/common.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include "Python.h" +#include "ie_common.h" + +namespace py = pybind11; + +namespace Common +{ + template + void fill_blob(const py::handle& py_array, InferenceEngine::Blob::Ptr blob) + { + py::array_t arr = py::cast(py_array); + if (arr.size() != 0) { + // blob->allocate(); + InferenceEngine::MemoryBlob::Ptr mem_blob = InferenceEngine::as(blob); + std::copy( + arr.data(0), arr.data(0) + arr.size(), mem_blob->rwmap().as()); + } else { + py::print("Empty array!"); + } + } + + InferenceEngine::Layout get_layout_from_string(const std::string& layout); + + const std::string& get_layout_from_enum(const InferenceEngine::Layout& layout); + + PyObject* parse_parameter(const InferenceEngine::Parameter& param); + + PyObject* parse_parameter(const InferenceEngine::Parameter& param); + + bool is_TBlob(const py::handle& blob); + + const std::shared_ptr cast_to_blob(const py::handle& blob); + + void blob_from_numpy(const py::handle& _arr, InferenceEngine::Blob::Ptr &blob); + + void set_request_blobs(InferenceEngine::InferRequest& request, const py::dict& dictonary); + + uint32_t get_optimal_number_of_requests(const InferenceEngine::ExecutableNetwork& actual); +}; // namespace Common diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.cpp b/runtime/bindings/python/src/pyopenvino/core/containers.cpp new file mode 100644 index 00000000000..0c5fa642556 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/containers.cpp @@ -0,0 +1,42 @@ + +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/containers.hpp" + +#include +#include + +PYBIND11_MAKE_OPAQUE(Containers::PyConstInputsDataMap); +PYBIND11_MAKE_OPAQUE(Containers::PyOutputsDataMap); +PYBIND11_MAKE_OPAQUE(Containers::PyResults); + +namespace py = pybind11; + +namespace Containers { + +void regclass_PyConstInputsDataMap(py::module m) { + auto py_const_inputs_data_map = py::bind_map(m, "PyConstInputsDataMap"); + + py_const_inputs_data_map.def("keys", [](PyConstInputsDataMap& self) { + return py::make_key_iterator(self.begin(), self.end()); + }); +} + +void regclass_PyOutputsDataMap(py::module m) { + auto py_outputs_data_map = py::bind_map(m, "PyOutputsDataMap"); + + py_outputs_data_map.def("keys", [](PyOutputsDataMap& self) { + return py::make_key_iterator(self.begin(), self.end()); + }); +} + +void regclass_PyResults(py::module m) { + auto py_results = py::bind_map(m, "PyResults"); + + py_results.def("keys", [](PyResults& self) { + return py::make_key_iterator(self.begin(), self.end()); + }); +} +} // namespace Containers diff --git a/runtime/bindings/python/src/pyopenvino/core/containers.hpp b/runtime/bindings/python/src/pyopenvino/core/containers.hpp new file mode 100644 index 00000000000..4be92c0d565 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/containers.hpp @@ -0,0 +1,28 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include "ie_data.h" +#include "ie_blob.h" + +namespace py = pybind11; + +namespace Containers { + using PyConstInputsDataMap = + std::map>; + + using PyOutputsDataMap = + std::map>; + + using PyResults = + std::map>; + + void regclass_PyConstInputsDataMap(py::module m); + void regclass_PyOutputsDataMap(py::module m); + void regclass_PyResults(py::module m); +} \ No newline at end of file diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_blob.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_blob.cpp new file mode 100644 index 00000000000..11e6c7634a3 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_blob.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "ie_blob.h" + +#include +#include +#include + +#include + +#include "pyopenvino/core/ie_blob.hpp" +#include "pyopenvino/core/tensor_description.hpp" + +namespace py = pybind11; + +void regclass_Blob(py::module m) { + py::class_> cls(m, "Blob"); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_blob.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_blob.hpp new file mode 100644 index 00000000000..b5efc440386 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_blob.hpp @@ -0,0 +1,56 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include + +#include +#include +#include + +#include "ie_blob.h" +#include "ie_common.h" +#include "ie_layouts.h" +#include "ie_precision.hpp" + +#include "pyopenvino/core/tensor_description.hpp" + +namespace py = pybind11; + +void regclass_Blob(py::module m); + +template +void regclass_TBlob(py::module m, std::string typestring) +{ + auto pyclass_name = py::detail::c_str((std::string("TBlob") + typestring)); + + py::class_, std::shared_ptr>> cls( + m, pyclass_name); + + cls.def(py::init( + [](const InferenceEngine::TensorDesc& tensorDesc, py::array_t& arr, size_t size = 0) { + auto blob = InferenceEngine::make_shared_blob(tensorDesc); + blob->allocate(); + if (size != 0) { + std::copy(arr.data(0), arr.data(0) + size, blob->rwmap().template as()); + } + return blob; + })); + + cls.def_property_readonly("buffer", [](InferenceEngine::TBlob& self) { + auto blob_ptr = self.buffer().template as(); + auto shape = self.getTensorDesc().getDims(); + return py::array_t(shape, &blob_ptr[0], py::cast(self)); + }); + + cls.def_property_readonly("tensor_desc", + [](InferenceEngine::TBlob& self) { return self.getTensorDesc(); }); + + cls.def("__str__", [](InferenceEngine::TBlob& self) -> std::string { + std::stringstream ss; + auto blob_ptr = self.buffer().template as(); + auto shape = self.getTensorDesc().getDims(); + auto py_arr = py::array_t(shape, &blob_ptr[0], py::cast(self)); + ss << py_arr; + return ss.str(); + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp new file mode 100644 index 00000000000..9d37bfb00f7 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_core.cpp @@ -0,0 +1,160 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/ie_core.hpp" + +#include + +#include + +#include "common.hpp" + +namespace py = pybind11; + +std::string to_string(py::handle handle) { + auto encodedString = PyUnicode_AsUTF8String(handle.ptr()); + return PyBytes_AsString(encodedString); +} + +void regclass_Core(py::module m) { + py::class_> cls(m, "Core"); + cls.def(py::init()); + cls.def(py::init()); + + cls.def( + "set_config", + [](InferenceEngine::Core& self, const py::dict& config, const std::string& device_name) { + std::map config_map; + for (auto item : config) { + config_map[to_string(item.first)] = to_string(item.second); + } + self.SetConfig(config_map, device_name); + }, + py::arg("config"), + py::arg("device_name")); + + cls.def( + "load_network", + [](InferenceEngine::Core& self, + const InferenceEngine::CNNNetwork& network, + const std::string& device_name, + const std::map& config) { + return self.LoadNetwork(network, device_name, config); + }, + py::arg("network"), + py::arg("device_name"), + py::arg("config") = py::dict()); + + cls.def( + "add_extension", + [](InferenceEngine::Core& self, const std::string& extension_path, const std::string& device_name) { + auto extension_ptr = InferenceEngine::make_so_pointer(extension_path); + auto extension = std::dynamic_pointer_cast(extension_ptr); + self.AddExtension(extension, device_name); + }, + py::arg("extension_path"), + py::arg("device_name")); + + cls.def( + "get_versions", + [](InferenceEngine::Core& self, const std::string& device_name) { + return self.GetVersions(device_name); + }, + py::arg("device_name")); + + cls.def( + "read_network", + [](InferenceEngine::Core& self, py::bytes model, py::bytes weights) { + InferenceEngine::MemoryBlob::Ptr weights_blob; + if (weights) { + std::string weights_bytes = weights; + uint8_t* bin = (uint8_t*)weights_bytes.c_str(); + size_t bin_size = weights_bytes.length(); + InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, + {bin_size}, + InferenceEngine::Layout::C); + weights_blob = InferenceEngine::make_shared_blob(tensorDesc); + weights_blob->allocate(); + memcpy(weights_blob->rwmap().as(), bin, bin_size); + } + return self.ReadNetwork(model, weights_blob); + }, + py::arg("model"), + py::arg("weights")); + + cls.def( + "read_network", + [](InferenceEngine::Core& self, const std::string& model, const std::string& weights) { + return self.ReadNetwork(model, weights); + }, + py::arg("model"), + py::arg("weights") = ""); + + cls.def( + "read_network", + [](InferenceEngine::Core& self, const std::string& model, py::handle blob) { + return self.ReadNetwork(model, Common::cast_to_blob(blob)); + }, + py::arg("model"), + py::arg("blob")); + + cls.def( + "read_network", + [](InferenceEngine::Core& self, py::object model, py::object weights) { + return self.ReadNetwork(py::str(model), py::str(weights)); + }, + py::arg("model"), + py::arg("weights") = ""); + + cls.def( + "import_network", + [](InferenceEngine::Core& self, + const std::string& model_file, + const std::string& device_name, + const std::map& config) { + return self.ImportNetwork(model_file, device_name, config); + }, + py::arg("model_file"), + py::arg("device_name"), + py::arg("config") = py::none()); + + cls.def( + "get_config", + [](InferenceEngine::Core& self, const std::string& device_name, const std::string& config_name) -> py::handle { + return Common::parse_parameter(self.GetConfig(device_name, config_name)); + }, + py::arg("device_name"), + py::arg("config_name")); + + cls.def( + "get_metric", + [](InferenceEngine::Core& self, std::string device_name, std::string metric_name) -> py::handle { + return Common::parse_parameter(self.GetMetric(device_name, metric_name)); + }, + py::arg("device_name"), + py::arg("metric_name")); + + cls.def("register_plugin", + &InferenceEngine::Core::RegisterPlugin, + py::arg("plugin_name"), + py::arg("device_name") = py::str()); + + cls.def("register_plugins", &InferenceEngine::Core::RegisterPlugins); + + cls.def("unregister_plugin", &InferenceEngine::Core::UnregisterPlugin, py::arg("device_name")); + + cls.def( + "query_network", + [](InferenceEngine::Core& self, + const InferenceEngine::CNNNetwork& network, + const std::string& device_name, + const std::map& config) { + return self.QueryNetwork(network, device_name, config).supportedLayersMap; + }, + py::arg("network"), + py::arg("device_name"), + py::arg("config") = py::dict()); + + cls.def_property_readonly("available_devices", &InferenceEngine::Core::GetAvailableDevices); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_core.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_core.hpp new file mode 100644 index 00000000000..c1f477a6390 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_core.hpp @@ -0,0 +1,10 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include + +namespace py = pybind11; + +void regclass_Core(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_data.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_data.cpp new file mode 100644 index 00000000000..d1fd3bf760d --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_data.cpp @@ -0,0 +1,39 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/ie_data.hpp" + +#include +#include + +#include "common.hpp" + +namespace py = pybind11; + +void regclass_Data(py::module m) { + py::class_> cls(m, "DataPtr"); + + cls.def_property( + "layout", + [](InferenceEngine::Data& self) { + return Common::get_layout_from_enum(self.getLayout()); + }, + [](InferenceEngine::Data& self, const std::string& layout) { + self.setLayout(Common::get_layout_from_string(layout)); + }); + + cls.def_property( + "precision", + [](InferenceEngine::Data& self) { + return self.getPrecision().name(); + }, + [](InferenceEngine::Data& self, const std::string& precision) { + self.setPrecision(InferenceEngine::Precision::FromStr(precision)); + }); + + cls.def_property_readonly("shape", &InferenceEngine::Data::getDims); + + cls.def_property_readonly("name", &InferenceEngine::Data::getName); + // cls.def_property_readonly("initialized", ); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_data.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_data.hpp new file mode 100644 index 00000000000..6b1459714ec --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_data.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_Data(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp new file mode 100644 index 00000000000..37199110f09 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.cpp @@ -0,0 +1,91 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "pyopenvino/core/ie_executable_network.hpp" + +#include +#include + +#include "common.hpp" +#include "pyopenvino/core/containers.hpp" +#include "pyopenvino/core/ie_infer_request.hpp" +#include "pyopenvino/core/ie_input_info.hpp" + +namespace py = pybind11; + +void regclass_ExecutableNetwork(py::module m) { + py::class_> cls( + m, + "ExecutableNetwork"); + + cls.def("create_infer_request", [](InferenceEngine::ExecutableNetwork& self) { + auto request = InferRequestWrapper(self.CreateInferRequest()); + // Get Inputs and Outputs info from executable network + request._inputsInfo = self.GetInputsInfo(); + request._outputsInfo = self.GetOutputsInfo(); + // request.user_callback_defined = false; + return request; + }); + + cls.def( + "_infer", + [](InferenceEngine::ExecutableNetwork& self, const py::dict& inputs) { + // Create temporary InferRequest + auto request = self.CreateInferRequest(); + // Update inputs if there are any + if (!inputs.empty()) { + Common::set_request_blobs(request, inputs); //, self.GetInputsInfo()); + } + // Call Infer function + request.Infer(); + // Get output Blobs and return + Containers::PyResults results; + InferenceEngine::ConstOutputsDataMap outputsInfo = self.GetOutputsInfo(); + for (auto& out : outputsInfo) { + results[out.first] = request.GetBlob(out.first); + } + return results; + }, + py::arg("inputs")); + + cls.def("get_exec_graph_info", &InferenceEngine::ExecutableNetwork::GetExecGraphInfo); + + cls.def( + "export", + [](InferenceEngine::ExecutableNetwork& self, const std::string& modelFileName) { + self.Export(modelFileName); + }, + py::arg("model_file")); + + cls.def( + "get_config", + [](InferenceEngine::ExecutableNetwork& self, const std::string& config_name) -> py::handle { + return Common::parse_parameter(self.GetConfig(config_name)); + }, + py::arg("config_name")); + + cls.def( + "get_metric", + [](InferenceEngine::ExecutableNetwork& self, const std::string& metric_name) -> py::handle { + return Common::parse_parameter(self.GetMetric(metric_name)); + }, + py::arg("metric_name")); + + cls.def_property_readonly("input_info", [](InferenceEngine::ExecutableNetwork& self) { + Containers::PyConstInputsDataMap inputs; + const InferenceEngine::ConstInputsDataMap& inputsInfo = self.GetInputsInfo(); + for (const auto& in : inputsInfo) { + inputs[in.first] = in.second; + } + return inputs; + }); + + cls.def_property_readonly("output_info", [](InferenceEngine::ExecutableNetwork& self) { + Containers::PyOutputsDataMap outputs; + InferenceEngine::ConstOutputsDataMap outputsInfo = self.GetOutputsInfo(); + for (auto& out : outputsInfo) { + outputs[out.first] = out.second; + } + return outputs; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.hpp new file mode 100644 index 00000000000..bd6f909f46e --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_executable_network.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_ExecutableNetwork(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp new file mode 100644 index 00000000000..d54e7cce69c --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.cpp @@ -0,0 +1,233 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "pyopenvino/core/ie_infer_queue.hpp" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pyopenvino/core/common.hpp" +#include "pyopenvino/core/ie_infer_request.hpp" + +#define INVALID_ID -1 + +namespace py = pybind11; + +class InferQueue { +public: + InferQueue(std::vector requests, + std::queue idle_handles, + std::vector user_ids) + : _requests(requests), + _idle_handles(idle_handles), + _user_ids(user_ids) { + this->setDefaultCallbacks(); + _last_id = -1; + } + + ~InferQueue() { + _requests.clear(); + } + + bool _is_ready() { + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { + return !(_idle_handles.empty()); + }); + + return !(_idle_handles.empty()); + } + + py::dict _getIdleRequestInfo() { + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { + return !(_idle_handles.empty()); + }); + + size_t request_id = _idle_handles.front(); + + InferenceEngine::StatusCode status = + _requests[request_id]._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + + if (status == InferenceEngine::StatusCode::RESULT_NOT_READY) { + status = _requests[request_id]._request.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + } + + py::dict request_info = py::dict(); + request_info["id"] = request_id; + request_info["status"] = status; + + return request_info; + } + + size_t getIdleRequestId() { + // Wait for any of _idle_handles + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { + return !(_idle_handles.empty()); + }); + + size_t idle_request_id = _idle_handles.front(); + _idle_handles.pop(); + + return idle_request_id; + } + + std::vector waitAll() { + // Wait for all requests to return with callback thus updating + // _idle_handles so it matches the size of requests + py::gil_scoped_release release; + std::unique_lock lock(_mutex); + _cv.wait(lock, [this] { + return _idle_handles.size() == _requests.size(); + }); + + std::vector statuses; + + for (size_t handle = 0; handle < _requests.size(); handle++) { + statuses.push_back(_requests[handle]._request.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); + } + + return statuses; + } + + void setDefaultCallbacks() { + for (size_t handle = 0; handle < _requests.size(); handle++) { + _requests[handle]._request.SetCompletionCallback([this, handle /* ... */]() { + _requests[handle]._endTime = Time::now(); + // Add idle handle to queue + _idle_handles.push(handle); + // Notify locks in getIdleRequestId() or waitAll() functions + _cv.notify_one(); + }); + } + } + + void setCustomCallbacks(py::function f_callback) { + for (size_t handle = 0; handle < _requests.size(); handle++) { + _requests[handle]._request.SetCompletionCallback([this, f_callback, handle /* ... */]() { + _requests[handle]._endTime = Time::now(); + InferenceEngine::StatusCode statusCode = + _requests[handle]._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + if (statusCode == InferenceEngine::StatusCode::RESULT_NOT_READY) { + statusCode = InferenceEngine::StatusCode::OK; + } + // Acquire GIL, execute Python function + py::gil_scoped_acquire acquire; + f_callback(_requests[handle], statusCode, _user_ids[handle]); + // Add idle handle to queue + _idle_handles.push(handle); + // Notify locks in getIdleRequestId() or waitAll() functions + _cv.notify_one(); + }); + } + } + + std::vector _requests; + std::queue _idle_handles; + std::vector _user_ids; // user ID can be any Python object + size_t _last_id; + std::mutex _mutex; + std::condition_variable _cv; +}; + +void regclass_InferQueue(py::module m) { + py::class_> cls(m, "InferQueue"); + + cls.def(py::init([](InferenceEngine::ExecutableNetwork& net, size_t jobs) { + if (jobs == 0) { + const InferenceEngine::ExecutableNetwork& _net = net; + jobs = (size_t)Common::get_optimal_number_of_requests(_net); + } + + std::vector requests; + std::queue idle_handles; + std::vector user_ids(jobs); + + for (size_t handle = 0; handle < jobs; handle++) { + auto request = InferRequestWrapper(net.CreateInferRequest()); + // Get Inputs and Outputs info from executable network + request._inputsInfo = net.GetInputsInfo(); + request._outputsInfo = net.GetOutputsInfo(); + + requests.push_back(request); + idle_handles.push(handle); + } + + return new InferQueue(requests, idle_handles, user_ids); + }), + py::arg("network"), + py::arg("jobs") = 0); + + cls.def( + "_async_infer", + [](InferQueue& self, const py::dict inputs, py::object userdata) { + // getIdleRequestId function has an intention to block InferQueue + // until there is at least one idle (free to use) InferRequest + auto handle = self.getIdleRequestId(); + // Set new inputs label/id from user + self._user_ids[handle] = userdata; + // Update inputs of picked InferRequest + if (!inputs.empty()) { + Common::set_request_blobs(self._requests[handle]._request, inputs); + } + // Now GIL can be released - we are NOT working with Python objects in this block + { + py::gil_scoped_release release; + self._requests[handle]._startTime = Time::now(); + // Start InferRequest in asynchronus mode + self._requests[handle]._request.StartAsync(); + } + }, + py::arg("inputs"), + py::arg("userdata")); + + cls.def("is_ready", [](InferQueue& self) { + return self._is_ready(); + }); + + cls.def("wait_all", [](InferQueue& self) { + return self.waitAll(); + }); + + cls.def("get_idle_request_info", [](InferQueue& self) { + return self._getIdleRequestInfo(); + }); + + cls.def("set_infer_callback", [](InferQueue& self, py::function f_callback) { + self.setCustomCallbacks(f_callback); + }); + + cls.def("__len__", [](InferQueue& self) { + return self._requests.size(); + }); + + cls.def( + "__iter__", + [](InferQueue& self) { + return py::make_iterator(self._requests.begin(), self._requests.end()); + }, + py::keep_alive<0, 1>()); /* Keep set alive while iterator is used */ + + cls.def("__getitem__", [](InferQueue& self, size_t i) { + return self._requests[i]; + }); + + cls.def_property_readonly("userdata", [](InferQueue& self) { + return self._user_ids; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.hpp new file mode 100644 index 00000000000..23aa72fd072 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_infer_queue.hpp @@ -0,0 +1,10 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_InferQueue(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp new file mode 100644 index 00000000000..f45ddd6a5cd --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.cpp @@ -0,0 +1,210 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "pyopenvino/core/ie_infer_request.hpp" + +#include +#include + +#include + +#include "pyopenvino/core/common.hpp" +#include "pyopenvino/core/containers.hpp" +#include "pyopenvino/core/ie_executable_network.hpp" +#include "pyopenvino/core/ie_preprocess_info.hpp" + +namespace py = pybind11; + +void regclass_InferRequest(py::module m) { + py::class_> cls(m, "InferRequest"); + + cls.def( + "set_batch", + [](InferRequestWrapper& self, const int size) { + self._request.SetBatch(size); + }, + py::arg("size")); + + cls.def( + "get_blob", + [](InferRequestWrapper& self, const std::string& name) { + return self._request.GetBlob(name); + }, + py::arg("name")); + + cls.def( + "set_blob", + [](InferRequestWrapper& self, const std::string& name, py::handle& blob) { + self._request.SetBlob(name, Common::cast_to_blob(blob)); + }, + py::arg("name"), + py::arg("blob")); + + cls.def( + "set_blob", + [](InferRequestWrapper& self, + const std::string& name, + py::handle& blob, + const InferenceEngine::PreProcessInfo& info) { + self._request.SetBlob(name, Common::cast_to_blob(blob)); + }, + py::arg("name"), + py::arg("blob"), + py::arg("info")); + + cls.def( + "set_input", + [](InferRequestWrapper& self, const py::dict& inputs) { + Common::set_request_blobs(self._request, inputs); + }, + py::arg("inputs")); + + cls.def( + "set_output", + [](InferRequestWrapper& self, const py::dict& results) { + Common::set_request_blobs(self._request, results); + }, + py::arg("results")); + + cls.def( + "_infer", + [](InferRequestWrapper& self, const py::dict& inputs) { + // Update inputs if there are any + if (!inputs.empty()) { + Common::set_request_blobs(self._request, inputs); + } + // Call Infer function + self._startTime = Time::now(); + self._request.Infer(); + self._endTime = Time::now(); + // Get output Blobs and return + Containers::PyResults results; + for (auto& out : self._outputsInfo) { + results[out.first] = self._request.GetBlob(out.first); + } + return results; + }, + py::arg("inputs")); + + cls.def( + "_async_infer", + [](InferRequestWrapper& self, const py::dict inputs, py::object userdata) { + py::gil_scoped_release release; + if (!inputs.empty()) { + Common::set_request_blobs(self._request, inputs); + } + // TODO: check for None so next async infer userdata can be updated + // if (!userdata.empty()) + // { + // if (user_callback_defined) + // { + // self._request.SetCompletionCallback([self, userdata]() { + // // py::gil_scoped_acquire acquire; + // auto statusCode = const_cast(self).Wait( + // InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + // self._request.user_callback(self, statusCode, userdata); + // // py::gil_scoped_release release; + // }); + // } + // else + // { + // py::print("There is no callback function!"); + // } + // } + self._startTime = Time::now(); + self._request.StartAsync(); + }, + py::arg("inputs"), + py::arg("userdata")); + + cls.def("cancel", [](InferRequestWrapper& self) { + self._request.Cancel(); + }); + + cls.def( + "wait", + [](InferRequestWrapper& self, int64_t millis_timeout) { + py::gil_scoped_release release; + return self._request.Wait(millis_timeout); + }, + py::arg("millis_timeout") = InferenceEngine::IInferRequest::WaitMode::RESULT_READY); + + cls.def( + "set_completion_callback", + [](InferRequestWrapper& self, py::function f_callback, py::object userdata) { + self._request.SetCompletionCallback([&self, f_callback, userdata]() { + self._endTime = Time::now(); + InferenceEngine::StatusCode statusCode = + self._request.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); + if (statusCode == InferenceEngine::StatusCode::RESULT_NOT_READY) { + statusCode = InferenceEngine::StatusCode::OK; + } + // Acquire GIL, execute Python function + py::gil_scoped_acquire acquire; + f_callback(self, statusCode, userdata); + }); + }, + py::arg("f_callback"), + py::arg("userdata")); + + cls.def("get_perf_counts", [](InferRequestWrapper& self) { + std::map perfMap; + perfMap = self._request.GetPerformanceCounts(); + py::dict perf_map; + + for (auto it : perfMap) { + py::dict profile_info; + switch (it.second.status) { + case InferenceEngine::InferenceEngineProfileInfo::EXECUTED: + profile_info["status"] = "EXECUTED"; + break; + case InferenceEngine::InferenceEngineProfileInfo::NOT_RUN: + profile_info["status"] = "NOT_RUN"; + break; + case InferenceEngine::InferenceEngineProfileInfo::OPTIMIZED_OUT: + profile_info["status"] = "OPTIMIZED_OUT"; + break; + default: + profile_info["status"] = "UNKNOWN"; + } + profile_info["exec_type"] = it.second.exec_type; + profile_info["layer_type"] = it.second.layer_type; + profile_info["cpu_time"] = it.second.cpu_uSec; + profile_info["real_time"] = it.second.realTime_uSec; + profile_info["execution_index"] = it.second.execution_index; + perf_map[it.first.c_str()] = profile_info; + } + return perf_map; + }); + + cls.def( + "preprocess_info", + [](InferRequestWrapper& self, const std::string& name) { + return self._request.GetPreProcess(name); + }, + py::arg("name")); + + // cls.def_property_readonly("preprocess_info", [](InferRequestWrapper& self) { + // + // }); + + cls.def_property_readonly("input_blobs", [](InferRequestWrapper& self) { + Containers::PyResults input_blobs; + for (auto& in : self._inputsInfo) { + input_blobs[in.first] = self._request.GetBlob(in.first); + } + return input_blobs; + }); + + cls.def_property_readonly("output_blobs", [](InferRequestWrapper& self) { + Containers::PyResults output_blobs; + for (auto& out : self._outputsInfo) { + output_blobs[out.first] = self._request.GetBlob(out.first); + } + return output_blobs; + }); + + cls.def_property_readonly("latency", [](InferRequestWrapper& self) { + return self.getLatency(); + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp new file mode 100644 index 00000000000..13afbac4403 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_infer_request.hpp @@ -0,0 +1,43 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include + +#include +#include +#include + +namespace py = pybind11; + +typedef std::chrono::high_resolution_clock Time; +typedef std::chrono::nanoseconds ns; + +class InferRequestWrapper { +public: + InferRequestWrapper(InferenceEngine::InferRequest request) + : _request(request) + { + } + // ~InferRequestWrapper() = default; + + // bool user_callback_defined; + // py::function user_callback; + + double getLatency() { + auto execTime = std::chrono::duration_cast(_endTime - _startTime); + return static_cast(execTime.count()) * 0.000001; + } + + InferenceEngine::InferRequest _request; + InferenceEngine::ConstInputsDataMap _inputsInfo; + InferenceEngine::ConstOutputsDataMap _outputsInfo; + Time::time_point _startTime; + Time::time_point _endTime; +}; + +void regclass_InferRequest(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_input_info.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_input_info.cpp new file mode 100644 index 00000000000..d47020b537a --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_input_info.cpp @@ -0,0 +1,78 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/ie_input_info.hpp" + +#include + +#include + +#include "common.hpp" + +namespace py = pybind11; + +class ConstInputInfoWrapper { +public: + ConstInputInfoWrapper() = default; + ~ConstInputInfoWrapper() = default; + const InferenceEngine::InputInfo& cref() const { + return value; + } + +protected: + const InferenceEngine::InputInfo& ref() { + return this->value; + } + const InferenceEngine::InputInfo value = InferenceEngine::InputInfo(); +}; + +void regclass_InputInfo(py::module m) { + // Workaround for constant class + py::class_> cls_const(m, "InputInfoCPtr"); + + cls_const.def(py::init<>()); + + cls_const.def_property_readonly("input_data", [](const ConstInputInfoWrapper& self) { + return self.cref().getInputData(); + }); + cls_const.def_property_readonly("precision", [](const ConstInputInfoWrapper& self) { + return self.cref().getPrecision().name(); + }); + cls_const.def_property_readonly("tensor_desc", [](const ConstInputInfoWrapper& self) { + return self.cref().getTensorDesc(); + }); + cls_const.def_property_readonly("name", [](const ConstInputInfoWrapper& self) { + return self.cref().name(); + }); + // Mutable version + py::class_> cls(m, "InputInfoPtr"); + + cls.def(py::init<>()); + + cls.def_property("input_data", + &InferenceEngine::InputInfo::getInputData, + &InferenceEngine::InputInfo::setInputData); + cls.def_property( + "layout", + [](InferenceEngine::InputInfo& self) { + return Common::get_layout_from_enum(self.getLayout()); + }, + [](InferenceEngine::InputInfo& self, const std::string& layout) { + self.setLayout(Common::get_layout_from_string(layout)); + }); + cls.def_property( + "precision", + [](InferenceEngine::InputInfo& self) { + return self.getPrecision().name(); + }, + [](InferenceEngine::InputInfo& self, const std::string& precision) { + self.setPrecision(InferenceEngine::Precision::FromStr(precision)); + }); + cls.def_property_readonly("tensor_desc", &InferenceEngine::InputInfo::getTensorDesc); + cls.def_property_readonly("name", &InferenceEngine::InputInfo::name); + cls.def_property_readonly("preprocess_info", [](InferenceEngine::InputInfo& self) { + InferenceEngine::PreProcessInfo& preprocess = self.getPreProcess(); + return preprocess; + }); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_input_info.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_input_info.hpp new file mode 100644 index 00000000000..69d17221bc2 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_input_info.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_InputInfo(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp new file mode 100644 index 00000000000..1fdaf5bf292 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_network.cpp @@ -0,0 +1,105 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/ie_network.hpp" + +#include +#include +#include + +#include + +#include "ngraph/function.hpp" +#include "pyopenvino/core/ie_input_info.hpp" + +// using PyInputsDataMap = std::map>; +// +// PYBIND11_MAKE_OPAQUE(PyInputsDataMap); + +namespace py = pybind11; + +void regclass_IENetwork(py::module m) { + py::class_> cls(m, "IENetwork"); + cls.def(py::init()); + + cls.def(py::init([](std::shared_ptr& function) { + InferenceEngine::CNNNetwork cnnNetwork(function); + return std::make_shared(cnnNetwork); + })); + + cls.def("reshape", + [](InferenceEngine::CNNNetwork& self, const std::map>& input_shapes) { + self.reshape(input_shapes); + }); + + cls.def( + "add_outputs", + [](InferenceEngine::CNNNetwork& self, py::handle& outputs) { + int i = 0; + py::list _outputs; + if (!py::isinstance(outputs)) { + if (py::isinstance(outputs)) { + _outputs.append(outputs.cast()); + } else if (py::isinstance(outputs)) { + _outputs.append(outputs.cast()); + } + } else { + _outputs = outputs.cast(); + } + for (py::handle output : _outputs) { + if (py::isinstance(_outputs[i])) { + self.addOutput(output.cast(), 0); + } else if (py::isinstance(output)) { + py::tuple output_tuple = output.cast(); + self.addOutput(output_tuple[0].cast(), output_tuple[1].cast()); + } else { + IE_THROW() << "Incorrect type " << output.get_type() << "for layer to add at index " << i + << ". Expected string with layer name or tuple with two elements: layer name as " + "first element and port id as second"; + } + i++; + } + }, + py::arg("outputs")); + cls.def("add_output", &InferenceEngine::CNNNetwork::addOutput, py::arg("layer_name"), py::arg("output_index") = 0); + + cls.def( + "serialize", + [](InferenceEngine::CNNNetwork& self, const std::string& path_to_xml, const std::string& path_to_bin) { + self.serialize(path_to_xml, path_to_bin); + }, + py::arg("path_to_xml"), + py::arg("path_to_bin") = ""); + + cls.def("get_function", [](InferenceEngine::CNNNetwork& self) { + return self.getFunction(); + }); + + cls.def("get_ov_name_for_tensor", &InferenceEngine::CNNNetwork::getOVNameForTensor, py::arg("orig_name")); + + cls.def_property("batch_size", + &InferenceEngine::CNNNetwork::getBatchSize, + &InferenceEngine::CNNNetwork::setBatchSize); + + // auto py_inputs_data_map = py::bind_map(m, "PyInputsDataMap"); + + // py_inputs_data_map.def("keys", [](PyInputsDataMap& self) { + // return py::make_key_iterator(self.begin(), self.end()); + // }); + + cls.def_property_readonly("input_info", [](InferenceEngine::CNNNetwork& self) { + std::map> inputs; + const InferenceEngine::InputsDataMap& inputsInfo = self.getInputsInfo(); + for (auto& in : inputsInfo) { + inputs[in.first] = in.second; + } + return inputs; + }); + + cls.def_property_readonly("outputs", [](InferenceEngine::CNNNetwork& self) { + return self.getOutputsInfo(); + }); + + cls.def_property_readonly("name", &InferenceEngine::CNNNetwork::getName); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_network.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_network.hpp new file mode 100644 index 00000000000..9cbd5e43456 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_network.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_IENetwork(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_parameter.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_parameter.cpp new file mode 100644 index 00000000000..6110d74e6d3 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_parameter.cpp @@ -0,0 +1,16 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/ie_parameter.hpp" + +#include + +namespace py = pybind11; + +void regclass_Parameter(py::module m) { + py::class_> cls(m, "Parameter"); + + cls.def(py::init()); + cls.def(py::init()); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_parameter.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_parameter.hpp new file mode 100644 index 00000000000..435f8205a6a --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_parameter.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_Parameter(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_preprocess_info.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_preprocess_info.cpp new file mode 100644 index 00000000000..7accf2f2b66 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_preprocess_info.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/ie_preprocess_info.hpp" + +#include +#include + +#include + +#include "pyopenvino/core/common.hpp" + +namespace py = pybind11; + +void regclass_PreProcessInfo(py::module m) { + py::class_>( + m, + "PreProcessChannel") + .def_readwrite("std_scale", &InferenceEngine::PreProcessChannel::stdScale) + .def_readwrite("mean_value", &InferenceEngine::PreProcessChannel::meanValue) + .def_readwrite("mean_data", &InferenceEngine::PreProcessChannel::meanData); + + py::class_ cls(m, "PreProcessInfo"); + + cls.def(py::init()); + cls.def("__getitem__", [](InferenceEngine::PreProcessInfo& self, size_t& index) { + return self[index]; + }); + cls.def("get_number_of_channels", &InferenceEngine::PreProcessInfo::getNumberOfChannels); + cls.def("init", &InferenceEngine::PreProcessInfo::init); + cls.def("set_mean_image", [](InferenceEngine::PreProcessInfo& self, py::handle meanImage) { + self.setMeanImage(Common::cast_to_blob(meanImage)); + }); + cls.def("set_mean_image_for_channel", + [](InferenceEngine::PreProcessInfo& self, py::handle meanImage, const size_t channel) { + self.setMeanImageForChannel(Common::cast_to_blob(meanImage), channel); + }); + cls.def_property("mean_variant", + &InferenceEngine::PreProcessInfo::getMeanVariant, + &InferenceEngine::PreProcessInfo::setVariant); + cls.def_property("resize_algorithm", + &InferenceEngine::PreProcessInfo::getResizeAlgorithm, + &InferenceEngine::PreProcessInfo::setResizeAlgorithm); + cls.def_property("color_format", + &InferenceEngine::PreProcessInfo::getColorFormat, + &InferenceEngine::PreProcessInfo::setColorFormat); + + py::enum_(m, "MeanVariant") + .value("MEAN_IMAGE", InferenceEngine::MeanVariant::MEAN_IMAGE) + .value("MEAN_VALUE", InferenceEngine::MeanVariant::MEAN_VALUE) + .value("NONE", InferenceEngine::MeanVariant::NONE) + .export_values(); + + py::enum_(m, "ResizeAlgorithm") + .value("NO_RESIZE", InferenceEngine::ResizeAlgorithm::NO_RESIZE) + .value("RESIZE_BILINEAR", InferenceEngine::ResizeAlgorithm::RESIZE_BILINEAR) + .value("RESIZE_AREA", InferenceEngine::ResizeAlgorithm::RESIZE_AREA) + .export_values(); + + py::enum_(m, "ColorFormat") + .value("RAW", InferenceEngine::ColorFormat::RAW) + .value("RGB", InferenceEngine::ColorFormat::RGB) + .value("BGR", InferenceEngine::ColorFormat::BGR) + .value("RGBX", InferenceEngine::ColorFormat::RGBX) + .value("BGRX", InferenceEngine::ColorFormat::BGRX) + .value("NV12", InferenceEngine::ColorFormat::NV12) + .value("I420", InferenceEngine::ColorFormat::I420) + .export_values(); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_preprocess_info.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_preprocess_info.hpp new file mode 100644 index 00000000000..cc762ada0cb --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_preprocess_info.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_PreProcessInfo(py::module m); \ No newline at end of file diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp b/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp new file mode 100644 index 00000000000..158cda68cea --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_version.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/ie_version.hpp" + +#include + +namespace py = pybind11; + +void regclass_Version(py::module m) { + py::class_ cls(m, "Version"); + + cls.def_readonly("build_number", &InferenceEngine::Version::buildNumber); + cls.def_readonly("description", &InferenceEngine::Version::description); + cls.def_readwrite("api_version", &InferenceEngine::Version::apiVersion); + + cls.def_property_readonly("major", [](InferenceEngine::Version& self) { + return IE_VERSION_MAJOR; + }); + + cls.def_property_readonly("minor", [](InferenceEngine::Version& self) { + return IE_VERSION_MINOR; + }); +} \ No newline at end of file diff --git a/runtime/bindings/python/src/pyopenvino/core/ie_version.hpp b/runtime/bindings/python/src/pyopenvino/core/ie_version.hpp new file mode 100644 index 00000000000..5e98a3a8cf1 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/ie_version.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_Version(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/core/tensor_description.cpp b/runtime/bindings/python/src/pyopenvino/core/tensor_description.cpp new file mode 100644 index 00000000000..b9382f34e5c --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/tensor_description.cpp @@ -0,0 +1,58 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "pyopenvino/core/tensor_description.hpp" + +#include +#include +#include + +#include + +#include "common.hpp" + +namespace py = pybind11; +using namespace InferenceEngine; + +void regclass_TensorDecription(py::module m) { + py::class_> cls(m, "TensorDesc"); + cls.def(py::init()); + cls.def(py::init([](const std::string& precision, const SizeVector& dims, const std::string& layout) { + return TensorDesc(Precision::FromStr(precision), dims, Common::get_layout_from_string(layout)); + })); + + cls.def_property( + "layout", + [](TensorDesc& self) { + return Common::get_layout_from_enum(self.getLayout()); + }, + [](TensorDesc& self, const std::string& layout) { + self.setLayout(Common::get_layout_from_string(layout)); + }); + + cls.def_property( + "precision", + [](TensorDesc& self) { + return self.getPrecision().name(); + }, + [](TensorDesc& self, const std::string& precision) { + self.setPrecision(InferenceEngine::Precision::FromStr(precision)); + }); + + cls.def_property( + "dims", + [](TensorDesc& self) { + return self.getDims(); + }, + [](TensorDesc& self, const SizeVector& dims) { + self.setDims(dims); + }); + + cls.def( + "__eq__", + [](const TensorDesc& a, const TensorDesc b) { + return a == b; + }, + py::is_operator()); +} diff --git a/runtime/bindings/python/src/pyopenvino/core/tensor_description.hpp b/runtime/bindings/python/src/pyopenvino/core/tensor_description.hpp new file mode 100644 index 00000000000..806c7b9d3b8 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/core/tensor_description.hpp @@ -0,0 +1,11 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +namespace py = pybind11; + +void regclass_TensorDecription(py::module m); diff --git a/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp new file mode 100644 index 00000000000..33865d6e688 --- /dev/null +++ b/runtime/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -0,0 +1,91 @@ +// Copyright (C) 2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include +#include + +#include +#include +#include + +#include "core/containers.hpp" +#include "core/ie_blob.hpp" +#include "core/ie_core.hpp" +#include "core/ie_data.hpp" +#include "core/ie_executable_network.hpp" +#include "core/ie_infer_queue.hpp" +#include "core/ie_infer_request.hpp" +#include "core/ie_input_info.hpp" +#include "core/ie_network.hpp" +#include "core/ie_parameter.hpp" +#include "core/ie_preprocess_info.hpp" +#include "core/ie_version.hpp" +#include "core/tensor_description.hpp" + +namespace py = pybind11; + +std::string get_version() { + auto version = InferenceEngine::GetInferenceEngineVersion(); + std::string version_str = std::to_string(version->apiVersion.major) + "."; + version_str += std::to_string(version->apiVersion.minor) + "."; + version_str += version->buildNumber; + return version_str; +} + +PYBIND11_MODULE(pyopenvino, m) { + m.doc() = "Package openvino.pyopenvino which wraps openvino C++ APIs"; + m.def("get_version", &get_version); + py::enum_(m, "StatusCode") + .value("OK", InferenceEngine::StatusCode::OK) + .value("GENERAL_ERROR", InferenceEngine::StatusCode::GENERAL_ERROR) + .value("NOT_IMPLEMENTED", InferenceEngine::StatusCode::NOT_IMPLEMENTED) + .value("NETWORK_NOT_LOADED", InferenceEngine::StatusCode::NETWORK_NOT_LOADED) + .value("PARAMETER_MISMATCH", InferenceEngine::StatusCode::PARAMETER_MISMATCH) + .value("NOT_FOUND", InferenceEngine::StatusCode::NOT_FOUND) + .value("OUT_OF_BOUNDS", InferenceEngine::StatusCode::OUT_OF_BOUNDS) + .value("UNEXPECTED", InferenceEngine::StatusCode::UNEXPECTED) + .value("REQUEST_BUSY", InferenceEngine::StatusCode::REQUEST_BUSY) + .value("RESULT_NOT_READY", InferenceEngine::StatusCode::RESULT_NOT_READY) + .value("NOT_ALLOCATED", InferenceEngine::StatusCode::NOT_ALLOCATED) + .value("INFER_NOT_STARTED", InferenceEngine::StatusCode::INFER_NOT_STARTED) + .value("NETWORK_NOT_READ", InferenceEngine::StatusCode::NETWORK_NOT_READ) + .export_values(); + + py::enum_(m, "WaitMode") + .value("RESULT_READY", InferenceEngine::IInferRequest::WaitMode::RESULT_READY) + .value("STATUS_ONLY", InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY) + .export_values(); + + regclass_Core(m); + regclass_IENetwork(m); + + regclass_Data(m); + regclass_TensorDecription(m); + + // Registering template of Blob + regclass_Blob(m); + // Registering specific types of Blobs + regclass_TBlob(m, "Float32"); + regclass_TBlob(m, "Float64"); + regclass_TBlob(m, "Int64"); + regclass_TBlob(m, "Uint64"); + regclass_TBlob(m, "Int32"); + regclass_TBlob(m, "Uint32"); + regclass_TBlob(m, "Int16"); + regclass_TBlob(m, "Uint16"); + regclass_TBlob(m, "Int8"); + regclass_TBlob(m, "Uint8"); + + // Registering specific types of containers + Containers::regclass_PyConstInputsDataMap(m); + Containers::regclass_PyOutputsDataMap(m); + Containers::regclass_PyResults(m); + + regclass_ExecutableNetwork(m); + regclass_InferRequest(m); + regclass_Version(m); + regclass_Parameter(m); + regclass_InputInfo(m); + regclass_InferQueue(m); + regclass_PreProcessInfo(m); +} diff --git a/runtime/bindings/python/tests/conftest.py b/runtime/bindings/python/tests/conftest.py index 4b4fa151343..865ae43552e 100644 --- a/runtime/bindings/python/tests/conftest.py +++ b/runtime/bindings/python/tests/conftest.py @@ -9,6 +9,37 @@ import tests from pathlib import Path +def image_path(): + path_to_repo = os.environ["DATA_PATH"] + path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp") + return path_to_img + + +def model_path(is_myriad=False): + path_to_repo = os.environ["MODELS_PATH"] + if not is_myriad: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") + else: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") + return (test_xml, test_bin) + + +def model_onnx_path(): + path_to_repo = os.environ["MODELS_PATH"] + test_onnx = os.path.join(path_to_repo, "models", "test_model", "test_model.onnx") + return test_onnx + + +def plugins_path(): + path_to_repo = os.environ["DATA_PATH"] + plugins_xml = os.path.join(path_to_repo, "ie_class", "plugins.xml") + plugins_win_xml = os.path.join(path_to_repo, "ie_class", "plugins_win.xml") + plugins_osx_xml = os.path.join(path_to_repo, "ie_class", "plugins_apple.xml") + return (plugins_xml, plugins_win_xml, plugins_osx_xml) + + def _get_default_model_zoo_dir(): return Path(os.getenv("ONNX_HOME", Path.home() / ".onnx/model_zoo")) @@ -76,3 +107,8 @@ def pytest_collection_modifyitems(config, items): skip_this_backend = keywords[backend_name] if skip_this_backend in item.keywords: item.add_marker(skip_markers[backend_name]) + + +@pytest.fixture(scope="session") +def device(): + return os.environ.get("TEST_DEVICE") if os.environ.get("TEST_DEVICE") else "CPU" diff --git a/runtime/bindings/python/tests/test_inference_engine/__init__.py b/runtime/bindings/python/tests/test_inference_engine/__init__.py new file mode 100644 index 00000000000..46a1a3756d2 --- /dev/null +++ b/runtime/bindings/python/tests/test_inference_engine/__init__.py @@ -0,0 +1,2 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 diff --git a/runtime/bindings/python/tests/test_inference_engine/test_core.py b/runtime/bindings/python/tests/test_inference_engine/test_core.py new file mode 100644 index 00000000000..191baaab0bb --- /dev/null +++ b/runtime/bindings/python/tests/test_inference_engine/test_core.py @@ -0,0 +1,282 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import numpy as np +import os +from sys import platform +from pathlib import Path + +import ngraph as ng +import openvino as ov +from ngraph.impl import Function, Shape, Type +from ngraph.impl.op import Parameter +from openvino import TensorDesc, Blob + +from ..conftest import model_path, model_onnx_path, plugins_path + +test_net_xml, test_net_bin = model_path() +test_net_onnx = model_onnx_path() +plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path() + + +def test_blobs(): + input_shape = [1, 3, 4, 4] + input_data_float32 = (np.random.rand(*input_shape) - 0.5).astype(np.float32) + + td = TensorDesc("FP32", input_shape, "NCHW") + + input_blob_float32 = Blob(td, input_data_float32) + + assert np.all(np.equal(input_blob_float32.buffer, input_data_float32)) + + input_data_int16 = (np.random.rand(*input_shape) + 0.5).astype(np.int16) + + td = TensorDesc("I16", input_shape, "NCHW") + + input_blob_i16 = Blob(td, input_data_int16) + + assert np.all(np.equal(input_blob_i16.buffer, input_data_int16)) + + +@pytest.mark.skip(reason="Fix") +def test_ie_core_class(): + input_shape = [1, 3, 4, 4] + param = ng.parameter(input_shape, np.float32, name="parameter") + relu = ng.relu(param, name="relu") + func = Function([relu], [param], "test") + func.get_ordered_ops()[2].friendly_name = "friendly" + + cnn_network = ov.IENetwork(func) + + ie_core = ov.Core() + ie_core.set_config({}, device_name="CPU") + executable_network = ie_core.load_network(cnn_network, "CPU", {}) + + td = TensorDesc("FP32", input_shape, "NCHW") + + # from IPython import embed; embed() + + request = executable_network.create_infer_request() + input_data = np.random.rand(*input_shape) - 0.5 + + expected_output = np.maximum(0.0, input_data) + + input_blob = Blob(td, input_data) + + request.set_input({"parameter": input_blob}) + request.infer() + + result = request.get_blob("relu").buffer + + assert np.allclose(result, expected_output) + + +def test_load_network(device): + ie = ov.Core() + net = ie.read_network(model=test_net_xml, weights=test_net_bin) + exec_net = ie.load_network(net, device) + assert isinstance(exec_net, ov.ExecutableNetwork) + + +def test_read_network(): + ie_core = ov.Core() + net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) + assert isinstance(net, ov.IENetwork) + + net = ie_core.read_network(model=test_net_xml) + assert isinstance(net, ov.IENetwork) + + +def test_read_network_from_blob(): + ie_core = ov.Core() + model = open(test_net_xml).read() + blob = ov.blob_from_file(test_net_bin) + net = ie_core.read_network(model=model, blob=blob) + assert isinstance(net, ov.IENetwork) + + +def test_read_network_from_blob_valid(): + ie_core = ov.Core() + model = open(test_net_xml).read() + blob = ov.blob_from_file(test_net_bin) + net = ie_core.read_network(model=model, blob=blob) + ref_net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) + assert net.name == ref_net.name + assert net.batch_size == ref_net.batch_size + ii_net = net.input_info + ii_net2 = ref_net.input_info + o_net = net.outputs + o_net2 = ref_net.outputs + assert ii_net.keys() == ii_net2.keys() + assert o_net.keys() == o_net2.keys() + + +def test_read_network_as_path(): + ie_core = ov.Core() + net = ie_core.read_network(model=Path(test_net_xml), weights=Path(test_net_bin)) + assert isinstance(net, ov.IENetwork) + + net = ie_core.read_network(model=test_net_xml, weights=Path(test_net_bin)) + assert isinstance(net, ov.IENetwork) + + net = ie_core.read_network(model=Path(test_net_xml)) + assert isinstance(net, ov.IENetwork) + + +def test_read_network_from_onnx(): + ie_core = ov.Core() + net = ie_core.read_network(model=test_net_onnx) + assert isinstance(net, ov.IENetwork) + + +def test_read_network_from_onnx_as_path(): + ie_core = ov.Core() + net = ie_core.read_network(model=Path(test_net_onnx)) + assert isinstance(net, ov.IENetwork) + + +def test_read_net_from_buffer(): + ie_core = ov.Core() + with open(test_net_bin, "rb") as f: + bin = f.read() + with open(model_path()[0], "rb") as f: + xml = f.read() + net = ie_core.read_network(model=xml, weights=bin) + assert isinstance(net, ov.IENetwork) + + +def test_net_from_buffer_valid(): + ie_core = ov.Core() + with open(test_net_bin, "rb") as f: + bin = f.read() + with open(model_path()[0], "rb") as f: + xml = f.read() + net = ie_core.read_network(model=xml, weights=bin) + ref_net = ie_core.read_network(model=test_net_xml, weights=test_net_bin) + assert net.name == ref_net.name + assert net.batch_size == ref_net.batch_size + ii_net = net.input_info + ii_net2 = ref_net.input_info + o_net = net.outputs + o_net2 = ref_net.outputs + assert ii_net.keys() == ii_net2.keys() + assert o_net.keys() == o_net2.keys() + + +def test_get_version(device): + ie = ov.Core() + version = ie.get_versions(device) + assert isinstance(version, dict), "Returned version must be a dictionary" + assert device in version, "{} plugin version wasn't found in versions" + assert hasattr(version[device], "major"), "Returned version has no field 'major'" + assert hasattr(version[device], "minor"), "Returned version has no field 'minor'" + assert hasattr(version[device], "description"), "Returned version has no field 'description'" + assert hasattr(version[device], "build_number"), "Returned version has no field 'build_number'" + + +def test_available_devices(device): + ie = ov.Core() + devices = ie.available_devices + assert device in devices, f"Current device '{device}' is not listed in " \ + f"available devices '{', '.join(devices)}'" + + +def test_get_config(): + ie = ov.Core() + conf = ie.get_config("CPU", "CPU_BIND_THREAD") + assert conf == "YES" + + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", + reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") +def test_get_metric_list_of_str(): + ie = ov.Core() + param = ie.get_metric("CPU", "OPTIMIZATION_CAPABILITIES") + assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \ + f"metric must be a list but {type(param)} is returned" + assert all(isinstance(v, str) for v in param), \ + "Not all of the parameter values for 'OPTIMIZATION_CAPABILITIES' metric are strings!" + + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", + reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") +def test_get_metric_tuple_of_two_ints(): + ie = ov.Core() + param = ie.get_metric("CPU", "RANGE_FOR_STREAMS") + assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \ + f"metric must be tuple but {type(param)} is returned" + assert all(isinstance(v, int) for v in param), \ + "Not all of the parameter values for 'RANGE_FOR_STREAMS' metric are integers!" + + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", + reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") +def test_get_metric_tuple_of_three_ints(): + ie = ov.Core() + param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS") + assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \ + f"metric must be tuple but {type(param)} is returned" + assert all(isinstance(v, int) for v in param), "Not all of the parameter values for " \ + "'RANGE_FOR_ASYNC_INFER_REQUESTS' metric are integers!" + + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", + reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test") +def test_get_metric_str(): + ie = ov.Core() + param = ie.get_metric("CPU", "FULL_DEVICE_NAME") + assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \ + f"metric must be string but {type(param)} is returned" + + +def test_query_network(device): + ie = ov.Core() + net = ie.read_network(model=test_net_xml, weights=test_net_bin) + query_res = ie.query_network(network=net, device_name=device) + func_net = net.get_function() + ops_net = func_net.get_ordered_ops() + ops_net_names = [op.friendly_name for op in ops_net] + assert [key for key in query_res.keys() if key not in ops_net_names] == [], \ + "Not all network layers present in query_network results" + assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers" + + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test") +def test_register_plugin(): + ie = ov.Core() + ie.register_plugin("MKLDNNPlugin", "BLA") + net = ie.read_network(model=test_net_xml, weights=test_net_bin) + exec_net = ie.load_network(net, "BLA") + assert isinstance(exec_net, ov.ExecutableNetwork), \ + "Cannot load the network to the registered plugin with name 'BLA'" + + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test") +def test_register_plugins(): + ie = ov.Core() + if platform == "linux" or platform == "linux2": + ie.register_plugins(plugins_xml) + elif platform == "darwin": + ie.register_plugins(plugins_osx_xml) + elif platform == "win32": + ie.register_plugins(plugins_win_xml) + + net = ie.read_network(model=test_net_xml, weights=test_net_bin) + exec_net = ie.load_network(net, "CUSTOM") + assert isinstance(exec_net, + ov.ExecutableNetwork), "Cannot load the network to " \ + "the registered plugin with name 'CUSTOM' " \ + "registred in the XML file" + + +def test_create_IENetwork_from_nGraph(): + element_type = Type.f32 + param = Parameter(element_type, Shape([1, 3, 22, 22])) + relu = ng.relu(param) + func = Function([relu], [param], "test") + cnnNetwork = ov.IENetwork(func) + assert cnnNetwork is not None + func2 = cnnNetwork.get_function() + assert func2 is not None + assert len(func2.get_ops()) == 3 diff --git a/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py new file mode 100644 index 00000000000..038eb5fdff2 --- /dev/null +++ b/runtime/bindings/python/tests/test_inference_engine/test_infer_request.py @@ -0,0 +1,171 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import os +import pytest + +from openvino import Core, Blob, TensorDesc, StatusCode + + +def image_path(): + path_to_repo = os.environ["DATA_PATH"] + path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp") + return path_to_img + + +def model_path(is_myriad=False): + path_to_repo = os.environ["MODELS_PATH"] + if not is_myriad: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin") + else: + test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml") + test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin") + return (test_xml, test_bin) + + +def read_image(): + import cv2 + n, c, h, w = (1, 3, 32, 32) + image = cv2.imread(path_to_img) + if image is None: + raise FileNotFoundError("Input image not found") + + image = cv2.resize(image, (h, w)) / 255 + image = image.transpose((2, 0, 1)).astype(np.float32) + image = image.reshape((n, c, h, w)) + return image + + +is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD" +test_net_xml, test_net_bin = model_path(is_myriad) +path_to_img = image_path() + + +def test_get_perf_counts(device): + ie_core = Core() + net = ie_core.read_network(test_net_xml, test_net_bin) + ie_core.set_config({"PERF_COUNT": "YES"}, device) + exec_net = ie_core.load_network(net, device) + img = read_image() + request = exec_net.create_infer_request() + td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") + input_blob = Blob(td, img) + request.set_input({"data": input_blob}) + request.infer() + pc = request.get_perf_counts() + assert pc["29"]["status"] == "EXECUTED" + assert pc["29"]["layer_type"] == "FullyConnected" + del exec_net + del ie_core + del net + + +@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", + reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, " + "Dynamic batch fully supported only on CPU") +@pytest.mark.skip(reason="Fix") +def test_set_batch_size(device): + ie_core = Core() + ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device) + net = ie_core.read_network(test_net_xml, test_net_bin) + net.batch_size = 10 + data = np.ones(shape=net.input_info["data"].input_data.shape) + exec_net = ie_core.load_network(net, device) + data[0] = read_image()[0] + request = exec_net.create_infer_request() + request.set_batch(1) + td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") + input_blob = Blob(td, data) + request.set_input({"data": input_blob}) + request.infer() + assert np.allclose(int(round(request.output_blobs["fc_out"].buffer[0][2])), 1), \ + "Incorrect data for 1st batch" + del exec_net + del ie_core + del net + + +@pytest.mark.skip(reason="Fix") +def test_set_zero_batch_size(device): + ie_core = Core() + net = ie_core.read_network(test_net_xml, test_net_bin) + exec_net = ie_core.load_network(net, device) + request = exec_net.create_infer_request() + with pytest.raises(ValueError) as e: + request.set_batch(0) + assert "Batch size should be positive integer number but 0 specified" in str(e.value) + del exec_net + del ie_core + del net + + +@pytest.mark.skip(reason="Fix") +def test_set_negative_batch_size(device): + ie_core = Core() + net = ie_core.read_network(test_net_xml, test_net_bin) + exec_net = ie_core.load_network(net, device) + request = exec_net.create_infer_request() + with pytest.raises(ValueError) as e: + request.set_batch(-1) + assert "Batch size should be positive integer number but -1 specified" in str(e.value) + del exec_net + del ie_core + del net + + +def test_blob_setter(device): + ie_core = Core() + net = ie_core.read_network(test_net_xml, test_net_bin) + exec_net_1 = ie_core.load_network(network=net, device_name=device) + + net.input_info["data"].layout = "NHWC" + exec_net_2 = ie_core.load_network(network=net, device_name=device) + + img = read_image() + + request1 = exec_net_1.create_infer_request() + tensor_desc = TensorDesc("FP32", [1, 3, img.shape[2], img.shape[3]], "NCHW") + img_blob1 = Blob(tensor_desc, img) + request1.set_input({"data": img_blob1}) + request1.infer() + res_1 = np.sort(request1.get_blob("fc_out").buffer) + + img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32) + tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NHWC") + img_blob = Blob(tensor_desc, img) + request = exec_net_2.create_infer_request() + request.set_blob("data", img_blob) + request.infer() + res_2 = np.sort(request.get_blob("fc_out").buffer) + assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2) + + +def test_cancel(device): + ie_core = Core() + net = ie_core.read_network(test_net_xml, test_net_bin) + exec_net = ie_core.load_network(net, device) + img = read_image() + td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW") + input_blob = Blob(td, img) + request = exec_net.create_infer_request() + + def callback(req, code, array): + array.append(42) + + data = [] + request.set_completion_callback(callback, data) + request.set_input({"data": input_blob}) + request.async_infer() + request.cancel() + with pytest.raises(RuntimeError) as e: + request.wait() + assert "[ INFER_CANCELLED ]" in str(e.value) + # check if callback has executed + assert data == [42] + + request.async_infer() + status = request.wait() + assert status == StatusCode.OK + assert data == [42, 42] diff --git a/runtime/bindings/python/tox.ini b/runtime/bindings/python/tox.ini index 7f0acd7e96d..c34d286ca33 100644 --- a/runtime/bindings/python/tox.ini +++ b/runtime/bindings/python/tox.ini @@ -20,7 +20,7 @@ commands= flake8 {posargs:src/ setup.py} flake8 --ignore=D100,D101,D102,D103,D104,D105,D107,W503 tests/ # ignore lack of docs in tests mypy --config-file=tox.ini {posargs:src/} - pytest --backend={env:NGRAPH_BACKEND} tests -v -k 'not _cuda' --ignore=tests/test_onnx/test_zoo_models.py + pytest --backend={env:NGRAPH_BACKEND} tests -v -k 'not _cuda' --ignore=tests/test_onnx/test_zoo_models.py --ignore=tests/test_inference_engine [testenv:zoo_models] commands=