Turn on IE and NG python APIs by default inside Model Optimizer (#5721)
* Turn on IE and NG python APIs by default inside Model Optimizer * Remove fallback * Fix mo_ut * Remove MO wheel tests * Add model_optimizer custom target to gather all MO deps inside single traget * Fix PR comments
This commit is contained in:
parent
ad852f78b4
commit
ea3ed8af21
38
.github/workflows/mo.yml
vendored
38
.github/workflows/mo.yml
vendored
@ -63,41 +63,3 @@ jobs:
|
||||
python3 -m xmlrunner discover -p *_test.py --output=../mo-ut-logs
|
||||
working-directory: model-optimizer
|
||||
|
||||
build_wheel:
|
||||
name: Build Python wheel
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install wheel setuptools
|
||||
python3 -m pip install tensorflow==2.3.0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
python3 setup.py sdist bdist_wheel
|
||||
working-directory: model-optimizer
|
||||
|
||||
- name: Test package content
|
||||
run: |
|
||||
echo "src = open('openvino_mo.egg-info/SOURCES.txt', 'rt').read().split()" | tee -a test_wheel.py
|
||||
echo "ref = open('automation/package_BOM.txt', 'rt').read().split()" | tee -a test_wheel.py
|
||||
echo "for name in ref:" | tee -a test_wheel.py
|
||||
echo " if name.endswith('.py'):" | tee -a test_wheel.py
|
||||
echo " assert name in src or './' + name in src, name + ' file missed'" | tee -a test_wheel.py
|
||||
python3 test_wheel.py
|
||||
working-directory: model-optimizer
|
||||
|
||||
- name: Test conversion
|
||||
run: |
|
||||
wget -q http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz
|
||||
tar -xf mobilenet_v1_1.0_224.tgz
|
||||
python3 -m pip install model-optimizer/dist/*.whl
|
||||
python3 -m mo --input_model mobilenet_v1_1.0_224_frozen.pb --input_shape "[1,224,224,3]"
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: mo_wheel
|
||||
path: "model-optimizer/dist/*.whl"
|
||||
|
@ -68,6 +68,10 @@ if(ENABLE_WHEEL)
|
||||
add_subdirectory(wheel)
|
||||
endif()
|
||||
|
||||
if (NGRAPH_PYTHON_BUILD_ENABLE)
|
||||
add_dependencies(ie_api _pyngraph)
|
||||
endif()
|
||||
|
||||
# install
|
||||
|
||||
ie_cpack_add_component(${PYTHON_VERSION})
|
||||
|
@ -1,6 +1,14 @@
|
||||
# Copyright (C) 2018-2021 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
if (NOT NGRAPH_PYTHON_BUILD_ENABLE)
|
||||
message(WARNING "Please enable nGraph Python API (_pyngraph) target to enable Model Optimizer target")
|
||||
elseif(NOT ENABLE_PYTHON)
|
||||
message(WARNING "Please enable IE Python API (ie_api and offline_transformations_api) targets to enable Model Optimizer target")
|
||||
else()
|
||||
add_custom_target(model_optimizer DEPENDS ie_api offline_transformations_api inference_engine_ir_reader)
|
||||
endif()
|
||||
|
||||
# install
|
||||
ie_cpack_add_component(model_optimizer)
|
||||
|
||||
|
@ -144,20 +144,18 @@ def prepare_ir(argv: argparse.Namespace):
|
||||
|
||||
# This try-except is additional reinsurance that the IE
|
||||
# dependency search does not break the MO pipeline
|
||||
try:
|
||||
argv.ie_is_available = find_ie_version(silent=argv.silent)
|
||||
|
||||
if not argv.ie_is_available and not argv.silent:
|
||||
print("[ WARNING ] Could not find the Inference Engine Python API. At this moment, the Inference Engine dependency is not required, but will be required in future releases.")
|
||||
print("[ WARNING ] Consider building the Inference Engine Python API from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\"".format(
|
||||
def raise_ie_not_found():
|
||||
raise Error("Could not find the Inference Engine or nGraph Python API.\n"
|
||||
"Consider building the Inference Engine and nGraph Python APIs from sources or try to install OpenVINO (TM) Toolkit using \"install_prerequisites.{}\"".format(
|
||||
"bat" if sys.platform == "windows" else "sh"))
|
||||
# If the IE was not found, it will not print the MO version, so we have to print it manually
|
||||
print("{}: \t{}".format("Model Optimizer version", get_version()))
|
||||
try:
|
||||
if not find_ie_version(silent=argv.silent):
|
||||
raise_ie_not_found()
|
||||
except Exception as e:
|
||||
argv.ie_is_available = False
|
||||
raise_ie_not_found()
|
||||
|
||||
# This is just to check that transform key is valid and transformations are available
|
||||
check_available_transforms(parse_transform(argv.transform), argv.ie_is_available)
|
||||
check_available_transforms(parse_transform(argv.transform))
|
||||
|
||||
if argv.legacy_ir_generation and len(argv.transform) != 0:
|
||||
raise Error("--legacy_ir_generation and --transform keys can not be used at the same time.")
|
||||
@ -261,10 +259,6 @@ def emit_ir(graph: Graph, argv: argparse.Namespace):
|
||||
mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None
|
||||
input_names = deepcopy(graph.graph['input_names']) if 'input_names' in graph.graph else []
|
||||
|
||||
# Remove temporary ie_is_available key from argv no to have it in IR
|
||||
ie_is_available = argv.ie_is_available
|
||||
del argv.ie_is_available
|
||||
|
||||
prepare_emit_ir(graph=graph,
|
||||
data_type=graph.graph['cmd_params'].data_type,
|
||||
output_dir=argv.output_dir,
|
||||
@ -285,7 +279,7 @@ def emit_ir(graph: Graph, argv: argparse.Namespace):
|
||||
# This try-except is additional reinsurance that the IE
|
||||
# dependency search does not break the MO pipeline
|
||||
try:
|
||||
if not argv.legacy_ir_generation and ie_is_available:
|
||||
if not argv.legacy_ir_generation:
|
||||
path_to_offline_transformations = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'back',
|
||||
'offline_transformations.py')
|
||||
status = subprocess.run([sys.executable, path_to_offline_transformations,
|
||||
@ -295,7 +289,7 @@ def emit_ir(graph: Graph, argv: argparse.Namespace):
|
||||
return_code = status.returncode
|
||||
except Exception as e:
|
||||
return_code = "failed"
|
||||
log.error(e, extra={'is_warning': True})
|
||||
log.error(e)
|
||||
|
||||
message = str(dict({
|
||||
"platform": platform.system(),
|
||||
@ -307,39 +301,20 @@ def emit_ir(graph: Graph, argv: argparse.Namespace):
|
||||
t = tm.Telemetry()
|
||||
t.send_event('mo', 'offline_transformations_status', message)
|
||||
|
||||
# if IR wasn't produced by offline_transformations step we need to fallback to IR
|
||||
# produced by prepare_ir. This IR needs to be renamed from XXX_tmp.xml to XXX.xml
|
||||
suffixes = [".xml", ".bin", ".mapping"]
|
||||
if return_code != 0:
|
||||
if len(argv.transform) != 0:
|
||||
# Remove temporary IR before throwing exception
|
||||
for suf in suffixes:
|
||||
path_to_file = orig_model_name + "_tmp" + suf
|
||||
if os.path.exists(path_to_file):
|
||||
os.remove(path_to_file)
|
||||
raise Error("Failed to apply transformations: {}".format(argv.transform))
|
||||
raise Error("offline transformations step has failed.")
|
||||
|
||||
log.error("Using fallback to produce IR.", extra={'is_warning': True})
|
||||
for suf in suffixes:
|
||||
# remove existing files
|
||||
path_to_file = orig_model_name + suf
|
||||
if os.path.exists(path_to_file):
|
||||
os.remove(path_to_file)
|
||||
for suf in [".xml", ".bin", ".mapping"]:
|
||||
# remove existing files
|
||||
path_to_file = orig_model_name + "_tmp" + suf
|
||||
if os.path.exists(path_to_file):
|
||||
os.remove(path_to_file)
|
||||
|
||||
# rename tmp IR to original name
|
||||
os.rename(orig_model_name + "_tmp" + suf, orig_model_name + suf)
|
||||
else:
|
||||
for suf in suffixes:
|
||||
# remove existing files
|
||||
path_to_file = orig_model_name + "_tmp" + suf
|
||||
if os.path.exists(path_to_file):
|
||||
os.remove(path_to_file)
|
||||
|
||||
# add meta information to IR
|
||||
append_ir_info(file=orig_model_name,
|
||||
meta_info=get_meta_info(argv),
|
||||
mean_data=mean_data,
|
||||
input_names=input_names)
|
||||
# add meta information to IR
|
||||
append_ir_info(file=orig_model_name,
|
||||
meta_info=get_meta_info(argv),
|
||||
mean_data=mean_data,
|
||||
input_names=input_names)
|
||||
|
||||
print('[ SUCCESS ] Generated IR version {} model.'.format(get_ir_version(argv)))
|
||||
print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
|
||||
|
@ -51,7 +51,13 @@ def import_core_modules(silent: bool, path_to_module: str):
|
||||
from openvino.offline_transformations import ApplyMOCTransformations, ApplyLowLatencyTransformation, \
|
||||
GenerateMappingFile # pylint: disable=import-error,no-name-in-module
|
||||
|
||||
# TODO: it is temporary import to check that nGraph python API is available. But in future
|
||||
# we need to replace it with Frontend imports
|
||||
from ngraph.impl.op import Parameter # pylint: disable=import-error,no-name-in-module
|
||||
from _pyngraph import PartialShape, Dimension # pylint: disable=import-error,no-name-in-module
|
||||
|
||||
import openvino # pylint: disable=import-error,no-name-in-module
|
||||
import ngraph # pylint: disable=import-error,no-name-in-module
|
||||
|
||||
if silent:
|
||||
return True
|
||||
@ -60,6 +66,8 @@ def import_core_modules(silent: bool, path_to_module: str):
|
||||
mo_version = str(v.get_version()) # pylint: disable=no-member,no-name-in-module
|
||||
|
||||
print("\t- {}: \t{}".format("Inference Engine found in", os.path.dirname(openvino.__file__)))
|
||||
# TODO: when nGraph version will be available we need to start compare it to IE and MO versions. Ticket: 58091
|
||||
print("\t- {}: \t{}".format("nGraph found in", os.path.dirname(ngraph.__file__)))
|
||||
print("{}: \t{}".format("Inference Engine version", ie_version))
|
||||
print("{}: \t{}".format("Model Optimizer version", mo_version))
|
||||
|
||||
|
@ -1217,17 +1217,12 @@ def parse_transform(transform: str) -> list:
|
||||
return transforms
|
||||
|
||||
|
||||
def check_available_transforms(transforms: list, ie_is_available: bool):
|
||||
def check_available_transforms(transforms: list):
|
||||
"""
|
||||
This function check that transformations specified by user are available.
|
||||
:param transforms: list of user specified transformations
|
||||
:param ie_is_available: True if IE Python API is available and False if it is not
|
||||
:return: raises an Error if IE or transformation is not available
|
||||
:return: raises an Error if transformation is not available
|
||||
"""
|
||||
if not ie_is_available and len(transforms) != 0:
|
||||
raise Error('Can not apply {} transformations due to missing Inference Engine Python API'.format(
|
||||
','.join([name for name, _ in transforms])))
|
||||
|
||||
from mo.back.offline_transformations import get_available_transformations
|
||||
available_transforms = get_available_transformations()
|
||||
|
||||
|
@ -959,11 +959,11 @@ class TransformChecker(unittest.TestCase):
|
||||
def test_check_low_latency_is_available(self, available_transformations):
|
||||
available_transformations.return_value = {"LowLatency2": None}
|
||||
try:
|
||||
check_available_transforms([("LowLatency2", "")], True)
|
||||
check_available_transforms([("LowLatency2", "")])
|
||||
except Error as e:
|
||||
self.assertTrue(False, "Exception \"{}\" is unexpected".format(e))
|
||||
|
||||
@patch("mo.back.offline_transformations.get_available_transformations")
|
||||
def test_check_dummy_pass_is_available(self, available_transformations):
|
||||
available_transformations.return_value = {"LowLatency2": None}
|
||||
self.assertRaises(Error, check_available_transforms, [("DummyPass", "")], True)
|
||||
self.assertRaises(Error, check_available_transforms, [("DummyPass", "")])
|
||||
|
Loading…
Reference in New Issue
Block a user