[Samples] Python: added support of onnx models (#2782)

* [Samples] Python: added support of onnx models

* [Docs] Python: added support of onnx models

Updated documentation
This commit is contained in:
Mikhail Ryzhov 2020-10-27 10:23:25 +03:00 committed by GitHub
parent 73c40722fd
commit dc52ec7880
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 58 additions and 28 deletions

View File

@ -25,7 +25,7 @@ Inference Engine sample applications include the following:
- **Image Classification Sample Async** Inference of image classification networks like AlexNet and GoogLeNet using Asynchronous Inference Request API (the sample supports only images as inputs).
- [Image Classification C++ Sample Async](../../inference-engine/samples/classification_sample_async/README.md)
- [Image Classification Python* Sample Async](../../inference-engine/ie_bridges/python/sample/classification_sample_async/README.md)
- **[Image Classification Python* Sample](../../inference-engine/ie_bridges/python/sample/classification_sample/README.md)** Inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API (the sample supports only images as inputs).
- **[Image Classification Python* Sample](../../inference-engine/ie_bridges/python/sample/hello_classification/README.md)** Inference of image classification networks like AlexNet and GoogLeNet using Synchronous Inference Request API (the sample supports only images as inputs).
- **Neural Style Transfer Sample** Style Transfer sample (the sample supports only images as inputs).
- [Neural Style Transfer C++ Sample](../../inference-engine/samples/style_transfer_sample/README.md)
- [Neural Style Transfer Python* Sample](../../inference-engine/ie_bridges/python/sample/style_transfer_sample/README.md)

View File

@ -62,7 +62,8 @@ Running the application with the empty list of options yields the usage message
To run the sample, you can use AlexNet and GoogLeNet or other image classification models. You can download the pre-trained models with the OpenVINO [Model Downloader](@ref omz_tools_downloader_README) or from [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
>
> The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
You can do inference of an image using a trained AlexNet network on FPGA with fallback to CPU using the following command:
```

View File

@ -77,7 +77,7 @@ def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.",
required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
required=True, type=str, nargs="+")
@ -96,17 +96,23 @@ def build_argparser():
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
log.info("Creating Inference Engine")
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = ie.read_network(model=model_xml, weights=model_bin)
# Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
model = args.model
model_bin = None
model_name, model_ext = os.path.splitext(model)
log.info(f"Loading network files:\n\t{model}")
if model_ext == ".xml":
# Read .bin weights for IR format only
model_bin = model_name + ".bin"
log.info(f"\n\t{model_bin}")
net = ie.read_network(model=model, weights=model_bin)
assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"

View File

@ -49,6 +49,8 @@ Running the application with the empty list of options yields the usage message
To run the sample, you can use AlexNet and GoogLeNet or other image classification models. You can download the pre-trained models with the OpenVINO [Model Downloader](@ref omz_tools_downloader_README) or from [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
>
> The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
For example, to perform inference of an AlexNet model (previously converted to the Inference Engine format) on CPU, use the following command:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
"""
Copyright (C) 2018-2020 Intel Corporation
@ -28,7 +28,7 @@ def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True,
args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True,
type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files",
required=True,
@ -51,17 +51,23 @@ def build_argparser():
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
log.info("Creating Inference Engine")
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = ie.read_network(model=model_xml, weights=model_bin)
# Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
model = args.model
model_bin = None
model_name, model_ext = os.path.splitext(model)
log.info(f"Loading network files:\n\t{model}")
if model_ext == ".xml":
# Read .bin weights for IR format only
model_bin = model_name + ".bin"
log.info(f"\n\t{model_bin}")
net = ie.read_network(model=model, weights=model_bin)
assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"

View File

@ -58,7 +58,8 @@ Running the application with the empty list of options yields the usage message
To run the sample, you can use RMNet_SSD or other object-detection models. You can download the pre-trained models with the OpenVINO [Model Downloader](@ref omz_tools_downloader_README) or from [https://download.01.org/opencv/](https://download.01.org/opencv/).
> **NOTE**: Before running the sample with a trained model, make sure the model is converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
>
> The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
You can do inference of an image using a trained RMNet_SSD network on FPGA with fallback to CPU using the following command:
```

View File

@ -29,7 +29,7 @@ def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group("Options")
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.",
required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to image file.",
required=True, type=str, nargs="+")
@ -53,11 +53,17 @@ def main():
args = build_argparser().parse_args()
log.info("Loading Inference Engine")
ie = IECore()
# --------------------------- 1. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = ie.read_network(model=model_xml, weights=model_bin)
# ---1. Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format ---
model = args.model
model_bin = None
model_name, model_ext = os.path.splitext(model)
log.info(f"Loading network files:\n\t{model}")
if model_ext == ".xml":
# Read .bin weights for IR format only
model_bin = model_name + ".bin"
log.info(f"\n\t{model_bin}")
net = ie.read_network(model=model, weights=model_bin)
func = ng.function_from_cnn(net)
ops = func.get_ordered_ops()
# -----------------------------------------------------------------------------------------------------

View File

@ -4,6 +4,8 @@ This topic demonstrates how to run the Neural Style Transfer sample application,
inference of style transfer models.
> **NOTE**: The OpenVINO™ toolkit does not include a pre-trained model to run the Neural Style Transfer sample. A public model from the [Zhaw's Neural Style Transfer repository](https://github.com/zhaw/neural_style) can be used. Read the [Converting a Style Transfer Model from MXNet*](../../../../../docs/MO_DG/prepare_model/convert_model/mxnet_specific/Convert_Style_Transfer_From_MXNet.md) topic from the [Model Optimizer Developer Guide](../../../../../docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md) to learn about how to get the trained model and how to convert it to the Inference Engine format (\*.xml + \*.bin).
>
> The sample accepts models in ONNX format (.onnx) that do not require preprocessing.
## How It Works

View File

@ -28,7 +28,7 @@ def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.", required=True, type=str)
args.add_argument("-m", "--model", help="Required. Path to an .xml or .onnx file with a trained model.", required=True, type=str)
args.add_argument("-i", "--input", help="Required. Path to a folder with images or path to an image files", required=True,
type=str, nargs="+")
args.add_argument("-l", "--cpu_extension",
@ -55,17 +55,23 @@ def build_argparser():
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
model_xml = args.model
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# Plugin initialization for specified device and load extensions library if specified
log.info("Creating Inference Engine")
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, "CPU")
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
net = ie.read_network(model=model_xml, weights=model_bin)
# Read a model in OpenVINO Intermediate Representation (.xml and .bin files) or ONNX (.onnx file) format
model = args.model
model_bin = None
model_name, model_ext = os.path.splitext(model)
log.info(f"Loading network files:\n\t{model}")
if model_ext == ".xml":
# Read .bin weights for IR format only
model_bin = model_name + ".bin"
log.info(f"\n\t{model_bin}")
net = ie.read_network(model=model, weights=model_bin)
assert len(net.input_info.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"