Revert "[MO] remove deprecated: data_type, disable_nhwc_to_nchw, tensorflow_use_custom_operations_config (#16394)" (#16555)

This reverts commit 43ef89e625.
This commit is contained in:
Ilya Lavrenov 2023-03-27 09:04:41 +04:00 committed by GitHub
parent ce67ac09d3
commit 3f3bda592b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 50 additions and 21 deletions

View File

@ -40,11 +40,10 @@ class CommonLayerTest:
os.environ['MO_ENABLED_TRANSFORMS'] = enabled_transforms os.environ['MO_ENABLED_TRANSFORMS'] = enabled_transforms
os.environ['MO_DISABLED_TRANSFORMS'] = disabled_transforms os.environ['MO_DISABLED_TRANSFORMS'] = disabled_transforms
compress_to_fp16 = False if precision == 'FP32' else True
mo_params = {self.input_model_key: model_path, mo_params = {self.input_model_key: model_path,
"output_dir": temp_dir, "output_dir": temp_dir,
"compress_to_fp16": compress_to_fp16, "data_type": precision, "model_name": 'model'
"model_name": 'model'} }
if 'input_shapes' in kwargs and len(kwargs['input_shapes']): if 'input_shapes' in kwargs and len(kwargs['input_shapes']):
input_shapes_str = [] input_shapes_str = []

View File

@ -26,10 +26,6 @@ def generate_ir(coverage=False, **kwargs):
params.extend(("-b", str(value))) params.extend(("-b", str(value)))
elif key == "k": elif key == "k":
params.extend(("-k", str(value))) params.extend(("-k", str(value)))
# for FP32 set explicitly compress_to_fp16=False,
# if we omit this argument for FP32, it will be set implicitly to True as the default
elif key == 'compress_to_fp16':
params.append("--{}={}".format(key, value))
elif isinstance(value, bool) and value: elif isinstance(value, bool) and value:
params.append("--{}".format(key)) params.append("--{}".format(key))
elif isinstance(value, bool) and not value: elif isinstance(value, bool) and not value:

View File

@ -70,7 +70,7 @@ class TensorFlowObjectDetectionAPIAnalysis(AnalyzeAction):
if all([graph_contains_scope(graph, scope) for scope in scopes]): if all([graph_contains_scope(graph, scope) for scope in scopes]):
result = dict() result = dict()
result['flavor'] = flavor result['flavor'] = flavor
result['mandatory_parameters'] = {'transformations_config': result['mandatory_parameters'] = {'tensorflow_use_custom_operations_config':
files_by_pattern(get_mo_root_dir() + '/openvino/tools/mo/front/tf', files_by_pattern(get_mo_root_dir() + '/openvino/tools/mo/front/tf',
__class__.file_patterns[flavor], __class__.file_patterns[flavor],
add_prefix=True), add_prefix=True),

View File

@ -52,14 +52,14 @@ class TensorFlowRetinaNet(AnalyzeAction):
if pattern_instance_counter.counter > 0: if pattern_instance_counter.counter > 0:
result = dict() result = dict()
result['mandatory_parameters'] = {'transformations_config': result['mandatory_parameters'] = {'tensorflow_use_custom_operations_config':
'openvino/tools/mo/front/tf/retinanet.json'} 'openvino/tools/mo/front/tf/retinanet.json'}
message = "Your model looks like TensorFlow RetinaNet Model.\n" \ message = "Your model looks like TensorFlow RetinaNet Model.\n" \
"To generate the IR, provide model to the Model Optimizer with the following parameters:\n" \ "To generate the IR, provide model to the Model Optimizer with the following parameters:\n" \
"\t--input_model <path_to_model>/<model>.pb\n" \ "\t--input_model <path_to_model>/<model>.pb\n" \
"\t--input_shape [1,600,600,3]\n" \ "\t--input_shape [1,600,600,3]\n" \
"\t--transformations_config <OPENVINO_INSTALL_DIR>/tools/model_optimizer/openvino/tools/mo/front/tf/retinanet.json\n" \ "\t--tensorflow_use_custom_operations_config <OPENVINO_INSTALL_DIR>/tools/model_optimizer/openvino/tools/mo/front/tf/retinanet.json\n" \
"\t--reverse_input_channels" "\t--reverse_input_channels"
return {'model_type': {'TF_RetinaNet': result}}, message return {'model_type': {'TF_RetinaNet': result}}, message

View File

@ -47,7 +47,7 @@ YOLO_CONFIGS = {'YOLOV2Full': ['openvino/tools/mo/front/tf/yolo_v2.json', 'openv
def get_YOLO_params_by_flavor(flavor: str): def get_YOLO_params_by_flavor(flavor: str):
result = dict() result = dict()
result['flavor'] = flavor result['flavor'] = flavor
result['mandatory_parameters'] = {'transformations_config': YOLO_CONFIGS[flavor]} result['mandatory_parameters'] = {'tensorflow_use_custom_operations_config': YOLO_CONFIGS[flavor]}
return result return result
@ -72,7 +72,7 @@ class TensorFlowYOLOV1V2Analysis(AnalyzeAction):
"To generate the IR, provide TensorFlow YOLOv1 or YOLOv2 Model to the Model Optimizer with the following parameters:\n" \ "To generate the IR, provide TensorFlow YOLOv1 or YOLOv2 Model to the Model Optimizer with the following parameters:\n" \
"\t--input_model <path_to_model>/<model_name>.pb\n" \ "\t--input_model <path_to_model>/<model_name>.pb\n" \
"\t--batch 1\n" \ "\t--batch 1\n" \
"\t--transformations_config <PYTHON_SITE_PACKAGES>/openvino/tools/mo/front/tf/<yolo_config>.json\n" \ "\t--tensorflow_use_custom_operations_config <PYTHON_SITE_PACKAGES>/openvino/tools/mo/front/tf/<yolo_config>.json\n" \
"All detailed information about conversion of this model can be found at\n" \ "All detailed information about conversion of this model can be found at\n" \
"https://docs.openvino.ai/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html" "https://docs.openvino.ai/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html"
return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message
@ -99,7 +99,7 @@ class TensorFlowYOLOV3Analysis(AnalyzeAction):
"To generate the IR, provide TensorFlow YOLOv3 Model to the Model Optimizer with the following parameters:\n" \ "To generate the IR, provide TensorFlow YOLOv3 Model to the Model Optimizer with the following parameters:\n" \
"\t--input_model <path_to_model>/yolo_v3.pb\n" \ "\t--input_model <path_to_model>/yolo_v3.pb\n" \
"\t--batch 1\n" \ "\t--batch 1\n" \
"\t--transformations_config <PYTHON_SITE_PACKAGES>/openvino/tools/mo/front/tf/yolo_v3.json\n" \ "\t--tensorflow_use_custom_operations_config <PYTHON_SITE_PACKAGES>/openvino/tools/mo/front/tf/yolo_v3.json\n" \
"Detailed information about conversion of this model can be found at\n" \ "Detailed information about conversion of this model can be found at\n" \
"https://docs.openvino.ai/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html" "https://docs.openvino.ai/latest/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html"
return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message

View File

@ -65,7 +65,7 @@ def apply_offline_transformations(func: Model, argv: argparse.Namespace):
apply_moc_legacy_transformations(func, params_with_custom_types) apply_moc_legacy_transformations(func, params_with_custom_types)
apply_user_transformations(func, parse_transform(argv.transform)) apply_user_transformations(func, parse_transform(argv.transform))
if "compress_to_fp16" in argv and argv.compress_to_fp16: if "compress_fp16" in argv and argv.compress_fp16:
compress_model(func) compress_model(func)
apply_fused_names_cleanup(func) apply_fused_names_cleanup(func)

View File

@ -214,6 +214,13 @@ def arguments_post_parsing(argv: argparse.Namespace):
log.error(e) log.error(e)
raise_ie_not_found() raise_ie_not_found()
# Turn off compression only if it's disabled explicitly by --compress_to_fp16=False or --data_type=FP32.
# By default, in all other cases compression is enabled
if ('data_type' in argv and argv.data_type in ['FP32', 'float']) or \
('compress_to_fp16' in argv and argv.compress_to_fp16 is False):
argv.compress_fp16 = False
else:
argv.compress_fp16 = True
argv.data_type = 'FP32' # if compression was enabled will be restored back to 'FP16' after apply_offline_transformations argv.data_type = 'FP32' # if compression was enabled will be restored back to 'FP16' after apply_offline_transformations
# This is just to check that transform key is valid and transformations are available # This is just to check that transform key is valid and transformations are available
@ -227,6 +234,12 @@ def arguments_post_parsing(argv: argparse.Namespace):
if ret_code: if ret_code:
raise Error('check_requirements exited with return code {}'.format(ret_code)) raise Error('check_requirements exited with return code {}'.format(ret_code))
if hasattr(argv, 'tensorflow_use_custom_operations_config') and \
argv.tensorflow_use_custom_operations_config is not None:
# update command-line arguments even for new TensorFlow Frontend
# because it should fallback to the Legacy Frontend in this case
argv.transformations_config = argv.tensorflow_use_custom_operations_config
if argv.scale and argv.scale_values: if argv.scale and argv.scale_values:
raise Error( raise Error(
'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input ' + 'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input ' +
@ -496,7 +509,7 @@ def emit_ir(graph: Graph, argv: argparse.Namespace, non_default_params: dict):
try: try:
from openvino.tools.mo.back.offline_transformations import apply_offline_transformations from openvino.tools.mo.back.offline_transformations import apply_offline_transformations
func = apply_offline_transformations(func, argv) func = apply_offline_transformations(func, argv)
if "compress_to_fp16" in argv and argv.compress_to_fp16: if "compress_fp16" in argv and argv.compress_fp16:
# restore data_type cmd parameter # restore data_type cmd parameter
argv.data_type = 'FP16' argv.data_type = 'FP16'
return_code = 0 return_code = 0

View File

@ -98,7 +98,7 @@ def _value_or_raise(match: SubgraphMatch, pipeline_config: PipelineConfig, key:
raise Error('The sub-graph replacer "[REPLACEMENT_ID]" was not able to find the value for key "{}" in the ' raise Error('The sub-graph replacer "[REPLACEMENT_ID]" was not able to find the value for key "{}" in the '
'pipeline configuration file specified with the --tensorflow_object_detection_api_pipeline_config ' 'pipeline configuration file specified with the --tensorflow_object_detection_api_pipeline_config '
'command line parameter. Update the sub-graph replacement configuration file specified with the ' 'command line parameter. Update the sub-graph replacement configuration file specified with the '
'--transformations_config command line parameter by adding key "{}" with required ' '--tensorflow_use_custom_operations_config command line parameter by adding key "{}" with required '
'value to the "custom_attributes" dictionary of the "[REPLACEMENT_ID]" replacer.'.format(key, key)) 'value to the "custom_attributes" dictionary of the "[REPLACEMENT_ID]" replacer.'.format(key, key))
return value return value

View File

@ -91,7 +91,7 @@ class TFLoader(Loader):
graph.__setattr__('name', argv.model_name) graph.__setattr__('name', argv.model_name)
# 'layout' parameter change may cause an issue in EltwiseInputReshape replacer # 'layout' parameter change may cause an issue in EltwiseInputReshape replacer
# and convert_nhwc_to_nchw(graph) # and convert_nhwc_to_nchw(graph)
graph.graph['layout'] = 'NHWC' graph.graph['layout'] = 'NCHW' if argv.disable_nhwc_to_nchw else 'NHWC'
graph.graph['fw'] = 'tf' graph.graph['fw'] = 'tf'
graph.graph['variables_values'] = variables_values graph.graph['variables_values'] = variables_values
@ -114,7 +114,7 @@ class TFLoader(Loader):
# try to detect layout from the nodes of the graph. If there are no convolution nodes in N(D)HWC layout then we # try to detect layout from the nodes of the graph. If there are no convolution nodes in N(D)HWC layout then we
# consider that the graph is in NCHW layout and no layout conversion should be performed # consider that the graph is in NCHW layout and no layout conversion should be performed
if not graph_or_sub_graph_has_nhwc_ops(graph): if not argv.disable_nhwc_to_nchw and not graph_or_sub_graph_has_nhwc_ops(graph):
if not argv.silent: if not argv.silent:
log.debug('disable_nhwc_to_nchw" was automatically enabled.') log.debug('disable_nhwc_to_nchw" was automatically enabled.')
for_graph_and_each_sub_graph_recursively(graph, update_cmd_params_and_layout) for_graph_and_each_sub_graph_recursively(graph, update_cmd_params_and_layout)

View File

@ -37,7 +37,8 @@ def main(cli_parser: argparse.ArgumentParser, framework=None):
try: try:
ngraph_function, argv = _convert(cli_parser, framework, {}) ngraph_function, argv = _convert(cli_parser, framework, {})
is_tf, _, _, _, _ = deduce_legacy_frontend_by_namespace(argv) is_tf, _, _, _, _ = deduce_legacy_frontend_by_namespace(argv)
if 'compress_to_fp16' in argv and argv.compress_to_fp16: if ('compress_to_fp16' in argv and argv.compress_to_fp16) \
or ('data_type' in argv and argv.data_type in ['half', 'FP16']):
print(get_compression_message()) print(get_compression_message())
ov_update_message = get_ov_update_message() ov_update_message = get_ov_update_message()

View File

@ -32,7 +32,7 @@ def moc_emit_ir(ngraph_function: Model, argv: argparse.Namespace):
apply_user_transformations(ngraph_function, parse_transform(argv.transform)) apply_user_transformations(ngraph_function, parse_transform(argv.transform))
if argv.compress_to_fp16: if argv.compress_fp16:
from openvino.tools.mo.back.offline_transformations import compress_model from openvino.tools.mo.back.offline_transformations import compress_model
compress_model(ngraph_function) compress_model(ngraph_function)

View File

@ -836,6 +836,8 @@ class DeprecatedCanonicalizePathCheckExistenceAction(CanonicalizePathCheckExiste
def __call__(self, parser, namespace, values, option_string=None): def __call__(self, parser, namespace, values, option_string=None):
dep_msg = "Use of deprecated cli option {} detected. Option use in the following releases will be fatal. ".format( dep_msg = "Use of deprecated cli option {} detected. Option use in the following releases will be fatal. ".format(
option_string) option_string)
if 'tensorflow_use_custom_operations_config' in option_string:
dep_msg += 'Please use --transformations_config cli option instead'
log.error(dep_msg, extra={'is_warning': True}) log.error(dep_msg, extra={'is_warning': True})
super().__call__(parser, namespace, values, option_string) super().__call__(parser, namespace, values, option_string)
@ -1019,6 +1021,15 @@ def get_common_cli_parser(parser: argparse.ArgumentParser = None):
help=mo_convert_params_common['layout'].description.format( help=mo_convert_params_common['layout'].description.format(
mo_convert_params_common['layout'].possible_types_command_line), mo_convert_params_common['layout'].possible_types_command_line),
default=()) default=())
# TODO: isn't it a weights precision type
common_group.add_argument('--data_type',
help='[DEPRECATED] Data type for model weights and biases. '
'If original model has FP32 weights or biases and --data_type=FP16 is specified, '
'FP32 model weights and biases are compressed to FP16. '
'All intermediate data is kept in original precision.',
choices=["FP16", "FP32", "half", "float"],
default='FP16',
action=DeprecatedOptionCommon)
common_group.add_argument('--compress_to_fp16', common_group.add_argument('--compress_to_fp16',
help=mo_convert_params_common['compress_to_fp16'].description, help=mo_convert_params_common['compress_to_fp16'].description,
type=check_bool, type=check_bool,
@ -1093,6 +1104,7 @@ def get_common_cli_options(model_name):
d['mean_values'] = ['- Mean values', lambda x: x if x else 'Not specified'] d['mean_values'] = ['- Mean values', lambda x: x if x else 'Not specified']
d['scale_values'] = ['- Scale values', lambda x: x if x else 'Not specified'] d['scale_values'] = ['- Scale values', lambda x: x if x else 'Not specified']
d['scale'] = ['- Scale factor', lambda x: x if x else 'Not specified'] d['scale'] = ['- Scale factor', lambda x: x if x else 'Not specified']
d['data_type'] = ['- Precision of IR', lambda x: 'FP32' if x == 'float' else 'FP16' if x == 'half' else x]
d['transform'] = ['- User transformations', lambda x: x if x else 'Not specified'] d['transform'] = ['- User transformations', lambda x: x if x else 'Not specified']
d['reverse_input_channels'] = '- Reverse input channels' d['reverse_input_channels'] = '- Reverse input channels'
d['static_shape'] = '- Enable IR generation for fixed input shape' d['static_shape'] = '- Enable IR generation for fixed input shape'
@ -1121,6 +1133,7 @@ def get_tf_cli_options():
d = { d = {
'input_model_is_text': '- Input model in text protobuf format', 'input_model_is_text': '- Input model in text protobuf format',
'tensorflow_custom_operations_config_update': '- Update the configuration file with input/output node names', 'tensorflow_custom_operations_config_update': '- Update the configuration file with input/output node names',
'tensorflow_use_custom_operations_config': '- Use the config file',
'tensorflow_object_detection_api_pipeline_config': '- Use configuration file used to generate the model with ' 'tensorflow_object_detection_api_pipeline_config': '- Use configuration file used to generate the model with '
'Object Detection API', 'Object Detection API',
'tensorflow_custom_layer_libraries': '- List of shared libraries with TensorFlow custom layers implementation', 'tensorflow_custom_layer_libraries': '- List of shared libraries with TensorFlow custom layers implementation',
@ -1164,7 +1177,7 @@ def get_params_with_paths_list():
'input_checkpoint', 'input_meta_graph', 'input_proto', 'input_symbol', 'input_checkpoint', 'input_meta_graph', 'input_proto', 'input_symbol',
'pretrained_model_name', 'saved_model_dir', 'tensorboard_logdir', 'pretrained_model_name', 'saved_model_dir', 'tensorboard_logdir',
'tensorflow_custom_layer_libraries', 'tensorflow_custom_operations_config_update', 'tensorflow_custom_layer_libraries', 'tensorflow_custom_operations_config_update',
'tensorflow_object_detection_api_pipeline_config', 'tensorflow_object_detection_api_pipeline_config', 'tensorflow_use_custom_operations_config',
'transformations_config'] 'transformations_config']
@ -1243,6 +1256,9 @@ def get_tf_cli_parser(parser: argparse.ArgumentParser = None):
tf_group.add_argument('--tensorflow_custom_operations_config_update', tf_group.add_argument('--tensorflow_custom_operations_config_update',
help=mo_convert_params_tf['tensorflow_custom_operations_config_update'].description, help=mo_convert_params_tf['tensorflow_custom_operations_config_update'].description,
action=CanonicalizePathCheckExistenceAction) action=CanonicalizePathCheckExistenceAction)
tf_group.add_argument('--tensorflow_use_custom_operations_config',
help='Use the configuration file with custom operation description.',
action=DeprecatedCanonicalizePathCheckExistenceAction)
tf_group.add_argument('--tensorflow_object_detection_api_pipeline_config', tf_group.add_argument('--tensorflow_object_detection_api_pipeline_config',
help=mo_convert_params_tf['tensorflow_object_detection_api_pipeline_config'].description, help=mo_convert_params_tf['tensorflow_object_detection_api_pipeline_config'].description,
action=CanonicalizePathCheckExistenceAction) action=CanonicalizePathCheckExistenceAction)
@ -1254,6 +1270,10 @@ def get_tf_cli_parser(parser: argparse.ArgumentParser = None):
help=mo_convert_params_tf['tensorflow_custom_layer_libraries'].description, help=mo_convert_params_tf['tensorflow_custom_layer_libraries'].description,
default=None, default=None,
action=CanonicalizePathCheckExistenceAction) action=CanonicalizePathCheckExistenceAction)
tf_group.add_argument('--disable_nhwc_to_nchw',
help='[DEPRECATED] Disables the default translation from NHWC to NCHW. Since 2022.1 this option '
'is deprecated and used only to maintain backward compatibility with previous releases.',
action=DeprecatedStoreTrue, default=False)
return parser return parser