From b232d4b43d7dc1ca41a882baccd9c1b1c9c2f61a Mon Sep 17 00:00:00 2001 From: Anastasiia Pnevskaia Date: Fri, 13 Oct 2023 18:12:38 +0200 Subject: [PATCH] Guide for input/output in original FW. (#20141) * Added guide for input/output in original FW. * Apply suggestions from code review Co-authored-by: Roman Kazantsev * Removed unused import. * Apply suggestions from code review Co-authored-by: Roman Kazantsev * Text format corrections. * Header format correction. * Minor correction. * Minor corrections. * Minor corrections. * Removed unused import. * Update docs/OV_Converter_UG/prepare_model/convert_model/MO_OVC_transition.md Co-authored-by: Tatiana Savina * Update docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md Co-authored-by: Nico Galoppo * Examples format change. Added PyTorch example. * Example corrected. * Added PyTorch example. * Small correction. * Apply suggestions from code review Co-authored-by: Maxim Vafin * Added note. * Corrected note. --------- Co-authored-by: Roman Kazantsev Co-authored-by: Tatiana Savina Co-authored-by: Nico Galoppo Co-authored-by: Maxim Vafin --- .../mo_ovc_transition.md | 227 ++++++++++++++++++ 1 file changed, 227 insertions(+) diff --git a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md index 9de12249a34..e45eac3b3a3 100644 --- a/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md +++ b/docs/articles_en/documentation/openvino_legacy_features/mo_ovc_transition.md @@ -624,6 +624,233 @@ Here is the guide to transition from legacy model preprocessing to new API prepr - Not available in OVC tool. Please check Python API. +Cutting Off Parts of a Model +############################ + +Performing surgery by cutting model inputs and outputs from a model is no longer available in the new conversion API. Instead, we recommend performing the cut in the original framework. +Below are examples of model cutting of TensorFlow protobuf, TensorFlow SavedModel, and ONNX formats with the legacy conversion API, compared to achieving the same cut with tools provided by the Tensorflow and ONNX frameworks. +For PyTorch, TensorFlow 2 Keras, and PaddlePaddle, we recommend changing the original model code to perform the model cut. + +Note: This guide does not cover the cutting a model by input port of an operation that MO tool provides using `input` and `output` options, for example, `--input 1:name_op`. + +``PyTorch`` +########### + +Model cut for PyTorch is not available in legacy API. + +When it is needed to remove a whole module from the model it is possible to replace such modules with `Identity`. Below is the example of removing `conv1` and `bn1` modules at the input and `fc` module at the output of the resnet50 model. + +.. code-block:: py + :force: + + import openvino as ov + import torch + import torchvision + from torch.nn import Identity + + # Load pretrained model + model = torchvision.models.resnet50(weights='DEFAULT') + + # input cut + model.conv1 = Identity() + model.bn1 = Identity() + + # output cut + model.fc = Identity() + + # convert and compile the model + ov_model = ov.convert_model(model, input=([-1,64,-1,-1], torch.float32)) + compiled_model = ov.compile_model(ov_model) + +When it is needed to remove one or more outputs from the model it is possible to create a wrapper for the model and only output the needed output. Below is the example of removing second output from the model. + +.. code-block:: py + :force: + + import openvino as ov + import torch + + # Example of model with multiple outputs + class Model(torch.nn.Module): + def __init__(self): + super(Model, self).__init__() + self.linear1 = torch.nn.Linear(100, 200) + self.activation1 = torch.nn.ReLU() + self.linear2 = torch.nn.Linear(200, 10) + self.activation2 = torch.nn.Sigmoid() + + def forward(self, x): + x = self.linear1(x) + x = self.activation1(x) + y = self.linear2(x) + y = self.activation2(y) + return x, y + + # New model, where some outputs are cut + class CutModel(torch.nn.Module): + def __init__(self): + super(CutModel, self).__init__() + self.model = Model() + + def forward(self, x): + + # get first output + x, _ = self.model(x) + + return x + + # Model with output cut + cut_model = CutModel() + + # convert and compile the model + ov_model = ov.convert_model(cut_model, input=([-1,-1,-1], torch.float32)) + compiled_model = ov.compile_model(ov_model) + + +``TensorFlow protobuf format / tf.Graph / tf.GraphDef`` +####################################################### + +Legacy API. + +.. code-block:: py + :force: + + import openvino as ov + import openvino.tools.mo as mo + + import tensorflow as tf + + def load_graph(model_path): + graph_def = tf.compat.v1.GraphDef() + with open(model_path, "rb") as f: + graph_def.ParseFromString(f.read()) + with tf.compat.v1.Graph().as_default() as graph: + tf.graph_util.import_graph_def(graph_def, name="") + return graph + + # Load TF model + graph = load_graph("/path_to_model/HugeCTR.pb") + + # Convert the model with input and output cut + input_name = "concat" + output_name = "MatVec_3/Squeeze" + ov_model = mo.convert_model(graph, input=(input_name, [-1, -1]), output=output_name) + + # Compile the model + compiled_model = ov.compile_model(ov_model) + +Model cut in original FW. + +.. code-block:: py + :force: + + import openvino as ov + import tensorflow as tf + + from tensorflow.python.tools.strip_unused_lib import strip_unused + + def load_graph(model_path): + graph_def = tf.compat.v1.GraphDef() + with open(model_path, "rb") as f: + graph_def.ParseFromString(f.read()) + with tf.compat.v1.Graph().as_default() as graph: + tf.graph_util.import_graph_def(graph_def, name="") + return graph + + # Load TF model + graph = load_graph("/path_to_model/HugeCTR.pb") + + # Cut the model + input_name = "concat" + output_name = "MatVec_3/Squeeze" + graph_def = graph.as_graph_def() + new_graph_def = strip_unused(graph_def, [input_name], [output_name], tf.float32.as_datatype_enum) + + # Convert and compile model + ov_model = ov.convert_model(new_graph_def, input=[-1, -1]) + cmp_model = ov.compile_model(ov_model) + + +``TensorFlow SavedModel format`` +################################ + +Model cut for SavedModel format is not available in legacy API. + +Example of model cut in original FW. + +.. code-block:: py + :force: + + import openvino as ov + import tensorflow_hub as hub + + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + from tensorflow.python.tools.strip_unused_lib import strip_unused + + # Load TF model + model = hub.load("https://tfhub.dev/svampeatlas/vision/embedder/fungi_V2/1?tf-hub-format=compressed") + + # Convert model to GraphDef + model_func = model.signatures["default"] + frozen_func = convert_variables_to_constants_v2(model_func) + graph_def = frozen_func.graph.as_graph_def() + + # Cut the model + input_name = 'InceptionV4/InceptionV4/Conv2d_2b_3x3/Relu' + output_name = 'InceptionV4/InceptionV4/Mixed_7c/concat' + new_graph_def = strip_unused(graph_def, [input_name], [output_name], tf.float32.as_datatype_enum) + + # Convert and compile the model + ov_model = ov.convert_model(new_graph_def) + compiled_model = ov.compile_model(ov_model) + + +``ONNX`` +######## + + +Legacy API. + +.. code-block:: py + :force: + + import openvino as ov + import openvino.tools.mo as mo + + input_path = "/path_to_model/yolov8x.onnx" + + # Convert model and perform input and output cut + input_name = "/model.2/Concat_output_0" + output_name = "/model.22/Concat_3_output_0" + ov_model = mo.convert_model(input_path, input=input_name, output=output_name) + + # Compile model + ov.compile_model(ov_model) + +Model cut in original FW. + +.. code-block:: py + :force: + + import onnx + import openvino as ov + + input_path = "/path_to_model/yolov8x.onnx" + + # Cut the model + input_name = "/model.2/Concat_output_0" + output_name = "/model.22/Concat_3_output_0" + cut_model_path = "/path_to_model/yolov8x_cut.onnx" + onnx.utils.extract_model(input_path, cut_model_path, [input_name], [output_name]) + + # Convert model + ov_model = ov.convert_model(cut_model_path) + + # Compile model + ov.compile_model(ov_model) + + Supported Frameworks in MO vs OVC #################################