From dc52ec7880d12d5adee090b57fc6527fe40594ca Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Tue, 27 Oct 2020 10:23:25 +0300 Subject: [PATCH] [Samples] Python: added support of onnx models (#2782) * [Samples] Python: added support of onnx models * [Docs] Python: added support of onnx models Updated documentation --- docs/IE_DG/Samples_Overview.md | 2 +- .../classification_sample_async/README.md | 3 ++- .../classification_sample_async.py | 18 +++++++++++------ .../README.md | 2 ++ .../hello_classification.py} | 20 ++++++++++++------- .../object_detection_sample_ssd/README.md | 3 ++- .../object_detection_sample_ssd.py | 18 +++++++++++------ .../sample/style_transfer_sample/README.md | 2 ++ .../style_transfer_sample.py | 18 +++++++++++------ 9 files changed, 58 insertions(+), 28 deletions(-) rename inference-engine/ie_bridges/python/sample/{classification_sample => hello_classification}/README.md (97%) rename inference-engine/ie_bridges/python/sample/{classification_sample/classification_sample.py => hello_classification/hello_classification.py} (90%) diff --git a/docs/IE_DG/Samples_Overview.md b/docs/IE_DG/Samples_Overview.md index 64a9462ef31..79c92d85c0c 100644 --- a/docs/IE_DG/Samples_Overview.md +++ b/docs/IE_DG/Samples_Overview.md @@ -25,7 +25,7 @@ Inference Engine sample applications include the following: - **Image Classification Sample Async** – Inference of image classification networks like AlexNet and GoogLeNet using Asynchronous Inference Request API (the sample supports only images as inputs). - [Image Classification C++ Sample Async](../../inference-engine/samples/classification_sample_async/README.md) - [Image Classification Python* Sample Async](../../inference-engine/ie_bridges/python/sample/classification_sample_async/README.md) -- **[Image Classification Python* Sample](../../inference-engine/ie_bridges/python/sample/classification_sample/README.md)** – Inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API (the sample supports only images as inputs). +- **[Image Classification Python* Sample](../../inference-engine/ie_bridges/python/sample/hello_classification/README.md)** – Inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API (the sample supports only images as inputs). - **Neural Style Transfer Sample** – Style Transfer sample (the sample supports only images as inputs). - [Neural Style Transfer C++ Sample](../../inference-engine/samples/style_transfer_sample/README.md) - [Neural Style Transfer Python* Sample](../../inference-engine/ie_bridges/python/sample/style_transfer_sample/README.md) diff --git a/inference-engine/ie_bridges/python/sample/classification_sample_async/README.md b/inference-engine/ie_bridges/python/sample/classification_sample_async/README.md index 25dbfdf8cf8..d7a20f50373 100644 --- a/inference-engine/ie_bridges/python/sample/classification_sample_async/README.md +++ b/inference-engine/ie_bridges/python/sample/classification_sample_async/README.md @@ -62,7 +62,8 @@ Running the application with the empty list of options yields the usage message To run the sample, you can use AlexNet and GoogLeNet or other image classification models. You can download the pre-trained models with the OpenVINO [Model Downloader](@ref omz_tools_downloader_README) or from [https://download.01.org/opencv/](https://download.01.org/opencv/). > **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). - +> +> The sample accepts models in ONNX format (.onnx) that do not require preprocessing. You can do inference of an image using a trained AlexNet network on FPGA with fallback to CPU using the following command: ``` diff --git a/inference-engine/ie_bridges/python/sample/classification_sample_async/classification_sample_async.py b/inference-engine/ie_bridges/python/sample/classification_sample_async/classification_sample_async.py index 1afd6d7b06b..315d3e33d8a 100644 --- a/inference-engine/ie_bridges/python/sample/classification_sample_async/classification_sample_async.py +++ b/inference-engine/ie_bridges/python/sample/classification_sample_async/classification_sample_async.py @@ -77,7 +77,7 @@ def build_argparser(): parser = ArgumentParser(add_help=False) args = parser.add_argument_group('Options') args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') - args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", + args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, type=str) args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files", required=True, type=str, nargs="+") @@ -96,17 +96,23 @@ def build_argparser(): def main(): log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() - model_xml = args.model - model_bin = os.path.splitext(model_xml)[0] + ".bin" # Plugin initialization for specified device and load extensions library if specified log.info("Creating Inference Engine") ie = IECore() if args.cpu_extension and 'CPU' in args.device: ie.add_extension(args.cpu_extension, "CPU") - # Read IR - log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin)) - net = ie.read_network(model=model_xml, weights=model_bin) + + # Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format + model = args.model + model_bin = None + model_name, model_ext = os.path.splitext(model) + log.info(f"Loading network files:\n\t{model}") + if model_ext == ".xml": + # Read .bin weights for IR format only + model_bin = model_name + ".bin" + log.info(f"\n\t{model_bin}") + net = ie.read_network(model=model, weights=model_bin) assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies" assert len(net.outputs) == 1, "Sample supports only single output topologies" diff --git a/inference-engine/ie_bridges/python/sample/classification_sample/README.md b/inference-engine/ie_bridges/python/sample/hello_classification/README.md similarity index 97% rename from inference-engine/ie_bridges/python/sample/classification_sample/README.md rename to inference-engine/ie_bridges/python/sample/hello_classification/README.md index 1116e770bbb..488278c87d2 100644 --- a/inference-engine/ie_bridges/python/sample/classification_sample/README.md +++ b/inference-engine/ie_bridges/python/sample/hello_classification/README.md @@ -49,6 +49,8 @@ Running the application with the empty list of options yields the usage message To run the sample, you can use AlexNet and GoogLeNet or other image classification models. You can download the pre-trained models with the OpenVINO [Model Downloader](@ref omz_tools_downloader_README) or from [https://download.01.org/opencv/](https://download.01.org/opencv/). > **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). +> +> The sample accepts models in ONNX format (.onnx) that do not require preprocessing. For example, to perform inference of an AlexNet model (previously converted to the Inference Engine format) on CPU, use the following command: diff --git a/inference-engine/ie_bridges/python/sample/classification_sample/classification_sample.py b/inference-engine/ie_bridges/python/sample/hello_classification/hello_classification.py similarity index 90% rename from inference-engine/ie_bridges/python/sample/classification_sample/classification_sample.py rename to inference-engine/ie_bridges/python/sample/hello_classification/hello_classification.py index 5de0b064b95..0de5732de41 100644 --- a/inference-engine/ie_bridges/python/sample/classification_sample/classification_sample.py +++ b/inference-engine/ie_bridges/python/sample/hello_classification/hello_classification.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """ Copyright (C) 2018-2020 Intel Corporation @@ -28,7 +28,7 @@ def build_argparser(): parser = ArgumentParser(add_help=False) args = parser.add_argument_group('Options') args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') - args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True, + args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, type=str) args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files", required=True, @@ -51,17 +51,23 @@ def build_argparser(): def main(): log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() - model_xml = args.model - model_bin = os.path.splitext(model_xml)[0] + ".bin" # Plugin initialization for specified device and load extensions library if specified log.info("Creating Inference Engine") ie = IECore() if args.cpu_extension and 'CPU' in args.device: ie.add_extension(args.cpu_extension, "CPU") - # Read IR - log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin)) - net = ie.read_network(model=model_xml, weights=model_bin) + + # Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format + model = args.model + model_bin = None + model_name, model_ext = os.path.splitext(model) + log.info(f"Loading network files:\n\t{model}") + if model_ext == ".xml": + # Read .bin weights for IR format only + model_bin = model_name + ".bin" + log.info(f"\n\t{model_bin}") + net = ie.read_network(model=model, weights=model_bin) assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies" assert len(net.outputs) == 1, "Sample supports only single output topologies" diff --git a/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/README.md b/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/README.md index 5ec82287612..26b8394cdb7 100644 --- a/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/README.md +++ b/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/README.md @@ -58,7 +58,8 @@ Running the application with the empty list of options yields the usage message To run the sample, you can use RMNet_SSD or other object-detection models. You can download the pre-trained models with the OpenVINO [Model Downloader](@ref omz_tools_downloader_README) or from [https://download.01.org/opencv/](https://download.01.org/opencv/). > **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md). - +> +> The sample accepts models in ONNX format (.onnx) that do not require preprocessing. You can do inference of an image using a trained RMNet_SSD network on FPGA with fallback to CPU using the following command: ``` diff --git a/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/object_detection_sample_ssd.py b/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/object_detection_sample_ssd.py index 64ef7aaaa80..35ecd58fc02 100644 --- a/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/object_detection_sample_ssd.py +++ b/inference-engine/ie_bridges/python/sample/object_detection_sample_ssd/object_detection_sample_ssd.py @@ -29,7 +29,7 @@ def build_argparser(): parser = ArgumentParser(add_help=False) args = parser.add_argument_group("Options") args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') - args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", + args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, type=str) args.add_argument("-i", "--input", help="Required. Path to image file.", required=True, type=str, nargs="+") @@ -53,11 +53,17 @@ def main(): args = build_argparser().parse_args() log.info("Loading Inference Engine") ie = IECore() - # --------------------------- 1. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------ - model_xml = args.model - model_bin = os.path.splitext(model_xml)[0] + ".bin" - log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin)) - net = ie.read_network(model=model_xml, weights=model_bin) + + # ---1. Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format --- + model = args.model + model_bin = None + model_name, model_ext = os.path.splitext(model) + log.info(f"Loading network files:\n\t{model}") + if model_ext == ".xml": + # Read .bin weights for IR format only + model_bin = model_name + ".bin" + log.info(f"\n\t{model_bin}") + net = ie.read_network(model=model, weights=model_bin) func = ng.function_from_cnn(net) ops = func.get_ordered_ops() # ----------------------------------------------------------------------------------------------------- diff --git a/inference-engine/ie_bridges/python/sample/style_transfer_sample/README.md b/inference-engine/ie_bridges/python/sample/style_transfer_sample/README.md index ad1834829ad..d7c6f11df8f 100644 --- a/inference-engine/ie_bridges/python/sample/style_transfer_sample/README.md +++ b/inference-engine/ie_bridges/python/sample/style_transfer_sample/README.md @@ -4,6 +4,8 @@ This topic demonstrates how to run the Neural Style Transfer sample application, inference of style transfer models. > **NOTE**: The OpenVINO™ toolkit does not include a pre-trained model to run the Neural Style Transfer sample. A public model from the [Zhaw's Neural Style Transfer repository](https://github.com/zhaw/neural_style) can be used. Read the [Converting a Style Transfer Model from MXNet*](../../../../../docs/MO_DG/prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md) topic from the [Model Optimizer Developer Guide](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) to learn about how to get the trained model and how to convert it to the Inference Engine format (\*.xml + \*.bin). +> +> The sample accepts models in ONNX format (.onnx) that do not require preprocessing. ## How It Works diff --git a/inference-engine/ie_bridges/python/sample/style_transfer_sample/style_transfer_sample.py b/inference-engine/ie_bridges/python/sample/style_transfer_sample/style_transfer_sample.py index 36001556fb9..ce1d2210588 100644 --- a/inference-engine/ie_bridges/python/sample/style_transfer_sample/style_transfer_sample.py +++ b/inference-engine/ie_bridges/python/sample/style_transfer_sample/style_transfer_sample.py @@ -28,7 +28,7 @@ def build_argparser(): parser = ArgumentParser(add_help=False) args = parser.add_argument_group('Options') args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.') - args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True, type=str) + args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, type=str) args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files", required=True, type=str, nargs="+") args.add_argument("-l", "--cpu_extension", @@ -55,17 +55,23 @@ def build_argparser(): def main(): log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) args = build_argparser().parse_args() - model_xml = args.model - model_bin = os.path.splitext(model_xml)[0] + ".bin" # Plugin initialization for specified device and load extensions library if specified log.info("Creating Inference Engine") ie = IECore() if args.cpu_extension and 'CPU' in args.device: ie.add_extension(args.cpu_extension, "CPU") - # Read IR - log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin)) - net = ie.read_network(model=model_xml, weights=model_bin) + + # Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format + model = args.model + model_bin = None + model_name, model_ext = os.path.splitext(model) + log.info(f"Loading network files:\n\t{model}") + if model_ext == ".xml": + # Read .bin weights for IR format only + model_bin = model_name + ".bin" + log.info(f"\n\t{model_bin}") + net = ie.read_network(model=model, weights=model_bin) assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies" assert len(net.outputs) == 1, "Sample supports only single output topologies"