[ONNX FE] Extend ONNX FE NMS-9 (#11790)

* update ONNX FE NMS to v9

* remove reshaping dynamic shapes

* fix style

* xfail two MSFT models
This commit is contained in:
Bartek Szmelczynski
2022-07-05 12:10:29 +02:00
committed by GitHub
parent 654105d567
commit 82f691c38b
6 changed files with 163 additions and 21 deletions

View File

@@ -130,3 +130,6 @@ xfail_issue_81976 = xfail_test(reason="RuntimeError: z node not found in graph c
xfail_issue_82038 = xfail_test(reason="ScatterElements, ScatterND, AssertionError: Result mismatch")
xfail_issue_82039 = xfail_test(reason="Unsupported data type Optional, RuntimeError: [ NOT_IMPLEMENTED ] "
"CPU plugin: Input image format UNSPECIFIED is not supported yet...")
xfail_issue_86909 = xfail_test(reason="ssd_resnet34 - AssertionError: zoo models results mismatch")
xfail_issue_86911 = xfail_test(reason="LSTM_Seq_len_unpacked - AssertionError: zoo models results mismatch")

View File

@@ -40,9 +40,7 @@ class Runtime(object):
self.backend_name = backend_name
log.debug(f"Creating Inference Engine for {backend_name}")
self.backend = Core()
assert backend_name in self.backend.available_devices, (
'The requested device "' + backend_name + '" is not supported!'
)
assert backend_name in self.backend.available_devices, 'The requested device "' + backend_name + '" is not supported!'
def set_config(self, config: Dict[str, str]) -> None:
"""Set the inference engine configuration."""
@@ -110,28 +108,24 @@ class Computation(object):
# Input validation
if len(input_values) < len(self.parameters):
raise UserInputError(
"Expected %s params, received not enough %s values.", len(self.parameters), len(input_values),
"Expected %s params, received not enough %s values.",
len(self.parameters),
len(input_values),
)
param_names = [param.friendly_name for param in self.parameters]
input_shapes = [get_shape(input_value) for input_value in input_values]
if self.network_cache.get(str(input_shapes)) is None:
function = self.function
if self.function.is_dynamic():
function = function.clone()
function.reshape(dict(zip(param_names, [PartialShape(i) for i in input_shapes])))
self.network_cache[str(input_shapes)] = function
else:
function = self.network_cache[str(input_shapes)]
executable_network = self.runtime.backend.compile_model(function, self.runtime.backend_name)
compiled_model = self.runtime.backend.compile_model(function, self.runtime.backend_name)
is_bfloat16 = any(parameter.get_output_element_type(0) == Type.bf16 for parameter in self.parameters)
if is_bfloat16:
input_values = self.convert_to_tensors(input_values)
request = executable_network.create_infer_request()
request = compiled_model.create_infer_request()
result_buffers = request.infer(dict(zip(param_names, input_values)))
"""Note: other methods to get result_buffers from request
First call infer with no return value:

View File

@@ -21,6 +21,8 @@ from tests import (
xfail_issue_48190,
xfail_issue_58676,
xfail_issue_78843,
xfail_issue_86909,
xfail_issue_86911,
xfail_issue_onnx_models_140)
MODELS_ROOT_DIR = tests.MODEL_ZOO_DIR
@@ -192,6 +194,9 @@ if len(zoo_models) > 0:
(xfail_issue_47495, "test_MSFT_opset10_BERT_Squad_bertsquad10_cpu"),
(xfail_issue_78843, "test_MSFT_opset10_mlperf_ssd_mobilenet_300_ssd_mobilenet_v1_coco_2018_01_28_cpu"),
(xfail_issue_86909, "test_MSFT_opset10_mlperf_ssd_resnet34_1200_ssd_resnet34_mAP_20.2_cpu"),
(xfail_issue_86911, "test_MSFT_opset9_LSTM_Seq_lens_unpacked_model_cpu"),
]
for test_case in import_xfail_list + execution_xfail_list:
xfail, test_name = test_case