[Python API] Remove offline transformations from old python api (#9121)

* [Python API] Remove offline transformations from old python api

* try to fix import error

* try to fix pylint

* try to fix pylint2

* Use new api in graph_utils

* Fix pylint

* Try to fix pylint

* Use serialize from pass manager

* try to skip tests

* try to use new ir

Co-authored-by: AlexeyLebedev1 <alexey.lebedev@intel.com>
This commit is contained in:
Anastasia Kuporosova 2022-01-10 17:32:55 +03:00 committed by GitHub
parent 04386bb667
commit 8fe5484645
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 12 additions and 375 deletions

View File

@ -64,7 +64,6 @@ endfunction()
set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
add_subdirectory (src/openvino/inference_engine)
add_subdirectory (src/openvino/offline_transformations)
if(ENABLE_WHEEL)
add_subdirectory(wheel)

View File

@ -1,64 +0,0 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
set(TARGET_NAME "offline_transformations_api")
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/offline_transformations)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/offline_transformations)
set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/offline_transformations)
set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/offline_transformations)
set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_api_impl_defs.pxd
${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_api.pyx
${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_api_impl.hpp
${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_api_impl.cpp)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_api.pyx
PROPERTIES CYTHON_IS_CXX ON)
# create target
cython_add_module(${TARGET_NAME} ${SOURCES})
add_dependencies(${TARGET_NAME} ie_api)
ov_python_disable_intel_warnings(${TARGET_NAME})
if(COMMAND ie_add_vs_version_file)
ie_add_vs_version_file(NAME ${TARGET_NAME}
FILEDESCRIPTION "Offline Transformatoins Python library")
endif()
if(InferenceEngineDeveloperPackage_FOUND)
list(APPEND link_libraries IE::offline_transformations)
else()
list(APPEND link_libraries offline_transformations)
endif()
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inference_engine")
target_include_directories(${TARGET_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
target_link_libraries(${TARGET_NAME} PRIVATE openvino::runtime ${link_libraries})
# Compatibility with python 2.7 which has deprecated "register" specifier
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
target_compile_options(${TARGET_NAME} PRIVATE "-Wno-error=register")
endif()
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}
EXCLUDE_PATTERNS ".*\\.cxx;.*\\.pxd;.*\\.pyx")
# perform copy
add_custom_command(TARGET ${TARGET_NAME}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/offline_transformations/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py
)
# install
install(TARGETS ${TARGET_NAME}
RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT}
LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT})
install(PROGRAMS __init__.py
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/offline_transformations
COMPONENT ${PYTHON_COMPONENT})

View File

@ -1,32 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
if sys.platform == 'win32':
# Installer, yum, pip installs openvino dlls to the different directories
# and those paths need to be visible to the openvino modules
#
# If you're using a custom installation of openvino,
# add the location of openvino dlls to your system PATH.
#
# looking for the libs in the pip installation path by default.
openvino_libs = [os.path.join(os.path.dirname(__file__), '..', '..', 'openvino', 'libs')]
# setupvars.bat script set all libs paths to OPENVINO_LIB_PATHS environment variable.
openvino_libs_installer = os.getenv('OPENVINO_LIB_PATHS')
if openvino_libs_installer:
openvino_libs.extend(openvino_libs_installer.split(';'))
for lib in openvino_libs:
lib_path = os.path.join(os.path.dirname(__file__), lib)
if os.path.isdir(lib_path):
# On Windows, with Python >= 3.8, DLLs are no longer imported from the PATH.
if (3, 8) <= sys.version_info:
os.add_dll_directory(os.path.abspath(lib_path))
else:
os.environ['PATH'] = os.path.abspath(lib_path) + ';' + os.environ['PATH']
from .offline_transformations_api import *
__all__ = ['ApplyMOCTransformations']

View File

@ -1,51 +0,0 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from .cimport offline_transformations_api_impl_defs as C
from ..inference_engine.ie_api cimport IENetwork
from libcpp cimport bool
from libcpp.string cimport string
from libcpp.map cimport map
from libc.stdint cimport int64_t
def ApplyMOCTransformations(IENetwork network, bool cf):
C.ApplyMOCTransformations(network.impl, cf)
def ApplyPOTTransformations(IENetwork network, string device):
C.ApplyPOTTransformations(network.impl, device)
def ApplyMakeStatefulTransformation(IENetwork network, param_res_names : dict):
cdef map[string, string] c_param_res_names
for param_name, res_name in param_res_names.items():
if type(param_name) != str or type(res_name) != str:
raise TypeError("Only string keys and values are allowed!")
c_param_res_names[param_name.encode()] = res_name.encode()
C.ApplyMakeStatefulTransformation(network.impl, c_param_res_names)
def ApplyLowLatencyTransformation(IENetwork network, bool use_const_initializer = True):
C.ApplyLowLatencyTransformation(network.impl, use_const_initializer)
def CompressModelTransformation(IENetwork network):
C.CompressModelTransformation(network.impl)
def ApplyPruningTransformation(IENetwork network):
C.ApplyPruningTransformation(network.impl)
def GenerateMappingFile(IENetwork network, string path, bool extract_names):
C.GenerateMappingFile(network.impl, path, extract_names)
def Serialize(IENetwork network, string path_to_xml, string path_to_bin):
C.Serialize(network.impl, path_to_xml, path_to_bin)
def CheckAPI():
C.CheckAPI()

View File

@ -1,94 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "offline_transformations_api_impl.hpp"
#include <generate_mapping_file.hpp>
#include <ngraph/opsets/opset6.hpp>
#include <ngraph/pass/constant_folding.hpp>
#include <ngraph/pass/low_latency.hpp>
#include <ngraph/pass/manager.hpp>
#include <openvino/pass/make_stateful.hpp>
#include <pot_transformations.hpp>
#include <pruning.hpp>
#include <transformations/common_optimizations/compress_float_constants.hpp>
#include <transformations/common_optimizations/division_by_zero_fp16_resolver.hpp>
#include <transformations/common_optimizations/mark_precision_sensitive_subgraphs.hpp>
#include <transformations/common_optimizations/moc_transformations.hpp>
#include <transformations/control_flow/unroll_tensor_iterator.hpp>
#include <transformations/serialize.hpp>
void InferenceEnginePython::ApplyMOCTransformations(InferenceEnginePython::IENetwork network, bool cf) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::MOCTransformations>(cf);
manager.run_passes(network.actual->getFunction());
}
void InferenceEnginePython::ApplyPOTTransformations(InferenceEnginePython::IENetwork network, std::string device) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::POTTransformations>(std::move(device));
manager.run_passes(network.actual->getFunction());
}
void InferenceEnginePython::ApplyLowLatencyTransformation(InferenceEnginePython::IENetwork network,
bool use_const_initializer) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::LowLatency2>(use_const_initializer);
manager.run_passes(network.actual->getFunction());
}
void InferenceEnginePython::ApplyMakeStatefulTransformation(InferenceEnginePython::IENetwork network,
std::map<std::string, std::string>& param_res_names) {
ngraph::pass::Manager manager;
manager.register_pass<ov::pass::MakeStateful>(param_res_names);
manager.run_passes(network.actual->getFunction());
}
void InferenceEnginePython::ApplyPruningTransformation(InferenceEnginePython::IENetwork network) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::Pruning>();
manager.run_passes(network.actual->getFunction());
}
void InferenceEnginePython::GenerateMappingFile(InferenceEnginePython::IENetwork network,
std::string path,
bool extract_names) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::GenerateMappingFile>(path, extract_names);
manager.run_passes(network.actual->getFunction());
}
void InferenceEnginePython::CompressModelTransformation(InferenceEnginePython::IENetwork network) {
ngraph::pass::Manager manager;
manager.register_pass<ov::pass::DivisionByZeroFP16Resolver>();
manager.register_pass<ov::pass::MarkPrecisionSensitiveSubgraphs>();
manager.register_pass<ov::pass::CompressFloatConstants>();
manager.run_passes(network.actual->getFunction());
}
void InferenceEnginePython::Serialize(InferenceEnginePython::IENetwork network,
std::string path_to_xml,
std::string path_to_bin) {
ngraph::pass::Manager manager;
manager.register_pass<ngraph::pass::Serialize>(path_to_xml, path_to_bin);
manager.run_passes(network.actual->getFunction());
}
void InferenceEnginePython::CheckAPI() {
std::shared_ptr<ngraph::Function> f;
{
auto input = std::make_shared<ngraph::opset6::Parameter>(ngraph::element::f32, ngraph::Shape{1, 1000, 4});
auto reshape =
std::make_shared<ngraph::opset6::Reshape>(input, std::make_shared<ngraph::opset6::ShapeOf>(input), true);
f = std::make_shared<ngraph::Function>(ngraph::NodeVector{reshape}, ngraph::ParameterVector{input});
}
ngraph::pass::Manager m;
m.register_pass<ngraph::pass::ConstantFolding>();
m.run_passes(f);
assert(f->get_results().size() == 1);
auto reshape = f->get_result()->input_value(0).get_node_shared_ptr();
assert(std::dynamic_pointer_cast<ngraph::opset6::Parameter>(reshape->input_value(0).get_node_shared_ptr()));
assert(std::dynamic_pointer_cast<ngraph::opset6::Constant>(reshape->input_value(1).get_node_shared_ptr()));
}

View File

@ -1,34 +0,0 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include <map>
#include <string>
#include "Python.h"
#include "ie_api_impl.hpp"
namespace InferenceEnginePython {
void ApplyMOCTransformations(InferenceEnginePython::IENetwork network, bool cf);
void ApplyPOTTransformations(InferenceEnginePython::IENetwork network, std::string device);
void ApplyLowLatencyTransformation(InferenceEnginePython::IENetwork network, bool use_const_initializer = true);
void ApplyMakeStatefulTransformation(InferenceEnginePython::IENetwork network,
std::map<std::string, std::string>& param_res_names);
void ApplyPruningTransformation(InferenceEnginePython::IENetwork network);
void GenerateMappingFile(InferenceEnginePython::IENetwork network, std::string path, bool extract_names);
void CompressModelTransformation(InferenceEnginePython::IENetwork network);
void Serialize(InferenceEnginePython::IENetwork network, std::string path_to_xml, std::string path_to_bin);
void CheckAPI();
}; // namespace InferenceEnginePython

View File

@ -1,27 +0,0 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from libcpp cimport bool
from libcpp.string cimport string
from libcpp.map cimport map
from ..inference_engine.ie_api_impl_defs cimport IENetwork
cdef extern from "offline_transformations_api_impl.hpp" namespace "InferenceEnginePython":
cdef void ApplyMOCTransformations(IENetwork network, bool cf)
cdef void ApplyPOTTransformations(IENetwork network, string device)
cdef void ApplyLowLatencyTransformation(IENetwork network, bool use_const_initializer)
cdef void ApplyMakeStatefulTransformation(IENetwork network, map[string, string]& in_out_names)
cdef void ApplyPruningTransformation(IENetwork network)
cdef void CompressModelTransformation(IENetwork network)
cdef void GenerateMappingFile(IENetwork network, string path, bool extract_names)
cdef void Serialize(IENetwork network, string path_to_xml, string path_to_bin)
cdef void CheckAPI()

View File

@ -1,63 +0,0 @@
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.inference_engine import IECore, IENetwork
from openvino.offline_transformations import ApplyMOCTransformations, ApplyLowLatencyTransformation, \
ApplyPruningTransformation, ApplyMakeStatefulTransformation
import ngraph as ng
from ngraph.impl.op import Parameter
from ngraph.impl import Function, Shape, Type
from conftest import model_path
test_net_xml, test_net_bin = model_path()
def get_test_cnnnetwork():
param = ng.parameter(Shape([1, 3, 22, 22]), name="parameter")
relu = ng.relu(param)
res = ng.result(relu, name='result')
func = Function([res], [param], 'test')
caps = Function.to_capsule(func)
cnnNetwork = IENetwork(caps)
assert cnnNetwork != None
return cnnNetwork
def test_moc_transformations():
net = get_test_cnnnetwork()
ApplyMOCTransformations(net, False)
f = ng.function_from_cnn(net)
assert f != None
assert len(f.get_ops()) == 3
def test_low_latency_transformations():
net = get_test_cnnnetwork()
ApplyLowLatencyTransformation(net)
f = ng.function_from_cnn(net)
assert f != None
assert len(f.get_ops()) == 3
def test_make_stateful_transformations():
net = get_test_cnnnetwork()
ApplyMakeStatefulTransformation(net, {"parameter": "result"})
f = ng.function_from_cnn(net)
assert f != None
assert len(f.get_parameters()) == 0
assert len(f.get_results()) == 0
def test_pruning_transformations():
net = get_test_cnnnetwork()
ApplyPruningTransformation(net)
f = ng.function_from_cnn(net)
assert f != None
assert len(f.get_ops()) == 3

View File

@ -20,7 +20,7 @@ endif()
# create target for openvino.wheel
set(openvino_wheel_deps ie_api offline_transformations_api)
set(openvino_wheel_deps ie_api)
foreach(_target ov_runtime_libraries ie_plugins _pyngraph pyopenvino)
if(TARGET ${_target})
list(APPEND openvino_wheel_deps ${_target})

View File

@ -7,14 +7,16 @@ from copy import deepcopy
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.utils.ir_reader.restore_graph import restore_graph_from_ir, save_restored_graph
from openvino.tools.mo.utils.logger import init_logger
from openvino.inference_engine import IECore # pylint: disable=E0611
from openvino.offline_transformations import ApplyPOTTransformations # pylint: disable=import-error,no-name-in-module
from openvino.runtime import Core # pylint: disable=E0401,E0611
from openvino.runtime.passes import Manager # pylint: disable=E0401,E0611
from openvino.offline_transformations_pybind import apply_pot_transformations # pylint: disable=import-error,no-name-in-module
from ..graph.passes import ModelPreprocessor, remove_converts, add_removed_converts
from ..utils.logger import stdout_redirect
init_logger('ERROR', False)
ie = IECore()
core = Core()
pass_manager = Manager()
def load_graph(model_config, target_device='ANY'):
@ -28,11 +30,12 @@ def load_graph(model_config, target_device='ANY'):
xml_path = model_config.model
if target_device in special_transform_devices:
network = ie.read_network(model=xml_path, weights=bin_path)
ApplyPOTTransformations(network, target_device.encode('utf-8'))
model = core.read_model(model=xml_path, weights=bin_path)
apply_pot_transformations(model, target_device.encode('utf-8'))
bin_path = serialized_bin_path
xml_path = serialized_xml_path
network.serialize(xml_path, bin_path)
pass_manager.register_pass(pass_name="Serialize", xml_path=xml_path, bin_path=bin_path)
pass_manager.run_passes(model)
if not os.path.exists(xml_path):
raise RuntimeError('Input model xml should link to an existing file. Please, provide a correct path.')

View File

@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:76959e68b61e35bca1e4d0a815a2fdd2a50fbfa1ca6d4cb6218d6d57f76603b2
size 23366
oid sha256:5ed7c8ba0078d053ca7408709468b5f8450c8c4236938bc4107911f969a6f1ed
size 23441