Add support api 2.0 to layer_tests (#9814)

* Add support api 2.0

* Upd imports

* Update onnx_tests

* Upd tenserflow_tests

* Fix test_split_concat.py

* Fixed comparing
This commit is contained in:
Ilya Sharikov
2022-01-31 13:15:30 +03:00
committed by GitHub
parent 0a2c0e1539
commit 56f2bc2f12
99 changed files with 2785 additions and 1719 deletions

View File

@@ -10,8 +10,10 @@ import xml.etree.ElementTree as ET
from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine
from pathlib import Path
import numpy as np
from common.constants import test_device, test_precision
from common.layer_utils import IEInfer
from common.layer_utils import IEInfer, InferAPI20
from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine
from common.utils.common_utils import generate_ir
from common.utils.parsers import mapping_parser
@@ -20,13 +22,16 @@ class CommonLayerTest:
input_model_key = "input_model"
def produce_model_path(self, framework_model, save_path):
pass
raise RuntimeError("This is base class, please implement produce_model_path function for"
" the specific framework")
def get_framework_results(self, inputs_dict, model_path):
pass
raise RuntimeError("This is base class, please implement get_framework_results function for"
" the specific framework")
def _test(self, framework_model, ref_net, ie_device, precision, ir_version, temp_dir, use_new_frontend=False,
infer_timeout=60, enabled_transforms='', disabled_transforms='', **kwargs):
def _test(self, framework_model, ref_net, ie_device, precision, ir_version, temp_dir, api_2,
use_new_frontend=False, infer_timeout=60, enabled_transforms='',
disabled_transforms='', **kwargs):
"""
:param enabled_transforms/disabled_transforms: string with idxs of transforms that should be enabled/disabled.
Example: "transform_1,transform_2"
@@ -34,6 +39,7 @@ class CommonLayerTest:
model_path = self.produce_model_path(framework_model=framework_model, save_path=temp_dir)
self.use_new_frontend = use_new_frontend
self.api_2 = api_2
# TODO Pass environment variables via subprocess environment
os.environ['MO_ENABLED_TRANSFORMS'] = enabled_transforms
os.environ['MO_DISABLED_TRANSFORMS'] = disabled_transforms
@@ -59,7 +65,8 @@ class CommonLayerTest:
del os.environ['MO_ENABLED_TRANSFORMS']
del os.environ['MO_DISABLED_TRANSFORMS']
assert not exit_code, ("IR generation failed with {} exit code: {}".format(exit_code, stderr))
assert not exit_code, (
"IR generation failed with {} exit code: {}".format(exit_code, stderr))
path_to_xml = Path(temp_dir, 'model.xml')
path_to_bin = Path(temp_dir, 'model.bin')
@@ -70,23 +77,23 @@ class CommonLayerTest:
# (flag, resp) = ir.compare(ref_net)
# assert flag, '\n'.join(resp)
from openvino.inference_engine import IECore
core = IECore()
net = core.read_network(path_to_xml, path_to_bin)
inputs_info = {}
for item in net.input_info.items():
inputs_info[item[0]] = item[1].tensor_desc.dims
if api_2:
ie_engine = InferAPI20(model=path_to_xml,
weights=path_to_bin,
device=ie_device)
else:
ie_engine = IEInfer(model=path_to_xml,
weights=path_to_bin,
device=ie_device)
# Prepare feed dict
if 'kwargs_to_prepare_input' in kwargs and kwargs['kwargs_to_prepare_input']:
inputs_dict = self._prepare_input(inputs_info, kwargs['kwargs_to_prepare_input'])
inputs_dict = self._prepare_input(ie_engine.get_inputs_info(precision),
kwargs['kwargs_to_prepare_input'])
else:
inputs_dict = self._prepare_input(inputs_info)
inputs_dict = self._prepare_input(ie_engine.get_inputs_info(precision))
# IE infer:
ie_engine = IEInfer(model=path_to_xml,
weights=path_to_bin,
device=ie_device)
infer_res = ie_engine.infer(input_data=inputs_dict, infer_timeout=infer_timeout)
if hasattr(self, 'skip_framework') and self.skip_framework:
@@ -111,8 +118,10 @@ class CommonLayerTest:
# Compare Ie results with Framework results
fw_eps = custom_eps if precision == 'FP32' else 5e-2
assert self.compare_ie_results_with_framework(infer_res=infer_res, framework_res=fw_res,
mapping_dict=mapping_dict, framework_eps=fw_eps), \
"Comparing with Framework failed: ie_res={}; framework_res={}.".format(infer_res, fw_res)
mapping_dict=mapping_dict,
framework_eps=fw_eps), \
"Comparing with Framework failed: ie_res={}; framework_res={}.".format(infer_res,
fw_res)
if len(inputs_dict.keys()) > 1 or len(infer_res.keys()) > 1:
tree = ET.parse(path_to_xml)
@@ -138,7 +147,6 @@ class CommonLayerTest:
'Output order does not match framework order. Output with index {} is {}, ' \
'but expected {}'.format(i, output_without_sink_port, output_name)
# Feed dict for each input is filled with random number.
# It is possible to redefine this function and generate your own input
def _prepare_input(self, inputs_dict):
@@ -146,7 +154,8 @@ class CommonLayerTest:
inputs_dict[input] = np.random.randint(-255, 255, inputs_dict[input]).astype(np.float32)
return inputs_dict
def compare_ie_results_with_framework(self, infer_res, framework_res, mapping_dict, framework_eps):
def compare_ie_results_with_framework(self, infer_res, framework_res, mapping_dict,
framework_eps):
is_ok = True
from common.utils.common_utils import allclose
for framework_out_name in framework_res:
@@ -158,11 +167,13 @@ class CommonLayerTest:
else:
ie_out_name = framework_out_name
if not allclose(infer_res[ie_out_name], framework_res[framework_out_name], atol=framework_eps,
if not allclose(infer_res[ie_out_name], framework_res[framework_out_name],
atol=framework_eps,
rtol=framework_eps):
is_ok = False
print("Max diff is {}".format(
np.array(abs(infer_res[ie_out_name] - framework_res[framework_out_name])).max()))
np.array(
abs(infer_res[ie_out_name] - framework_res[framework_out_name])).max()))
else:
print("Accuracy validation successful!\n")
print("absolute eps: {}, relative eps: {}".format(framework_eps, framework_eps))

View File

@@ -5,6 +5,8 @@ import subprocess
import sys
from common.utils.multiprocessing_utils import multiprocessing_run
from openvino.inference_engine import IECore, get_version as ie_get_version
from openvino.runtime import Core, get_version as ie2_get_version
def shell(cmd, env=None, cwd=None):
@@ -25,7 +27,10 @@ class BaseInfer:
def fw_infer(self, input_data):
raise RuntimeError("This is base class, please implement infer function for the specific framework")
def infer(self, input_data, infer_timeout=60):
def get_inputs_info(self, precision) -> dict:
raise RuntimeError("This is base class, please implement get_inputs_info function for the specific framework")
def infer(self, input_data, infer_timeout=10):
self.res = multiprocessing_run(self.fw_infer, [input_data], self.name, infer_timeout)
return self.res
@@ -38,7 +43,6 @@ class IEInfer(BaseInfer):
self.weights = weights
def fw_infer(self, input_data):
from openvino.inference_engine import IECore, get_version as ie_get_version
print("Inference Engine version: {}".format(ie_get_version()))
print("Creating IE Core Engine...")
@@ -56,3 +60,56 @@ class IEInfer(BaseInfer):
del ie
return result
def get_inputs_info(self, precision) -> dict:
core = IECore()
net = core.read_network(self.model, self.weights)
inputs_info = {}
for item in net.input_info.items():
inputs_info[item[0]] = item[1].tensor_desc.dims
return inputs_info
class InferAPI20(BaseInfer):
def __init__(self, model, weights, device):
super().__init__('Inference Engine')
self.device = device
self.model = model
self.weights = weights
def fw_infer(self, input_data):
print("Inference Engine version: {}".format(ie2_get_version()))
print("Creating IE Core Engine...")
ie = Core()
print("Reading network files")
net = ie.read_model(self.model, self.weights)
print("Loading network")
exec_net = ie.compile_model(net, self.device)
print("Starting inference")
request = exec_net.create_infer_request()
request_result = request.infer(input_data)
result = {}
for out_obj, out_tensor in request_result.items():
# all input and output tensors have to be named
assert out_obj.names, "Output tensor {} has no names".format(out_obj)
tensor_name = out_obj.get_any_name().split(':')[0]
result[tensor_name] = out_tensor
if "exec_net" in locals():
del exec_net
if "ie" in locals():
del ie
return result
def get_inputs_info(self, precision) -> dict:
core = Core()
net = core.read_model(self.model, self.weights)
inputs_info = {}
for item in net.inputs:
inputs_info[item.get_any_name()] = list(item.shape)
return inputs_info

View File

@@ -7,8 +7,8 @@ from common.layer_test_class import CommonLayerTest
from common.utils.tf_utils import summarize_graph
def transpose_nchw_to_nhwc(data, use_new_frontend):
if use_new_frontend:
def transpose_nchw_to_nhwc(data, use_new_frontend, api_2):
if use_new_frontend or api_2:
return data
if len(data.shape) == 4: # reshaping for 4D tensors
@@ -19,8 +19,8 @@ def transpose_nchw_to_nhwc(data, use_new_frontend):
return data
def transpose_nhwc_to_nchw(data, use_new_frontend):
if use_new_frontend:
def transpose_nhwc_to_nchw(data, use_new_frontend, api_2):
if use_new_frontend or api_2:
return data
if len(data.shape) == 4: # reshaping for 4D tensors
@@ -62,13 +62,20 @@ class CommonTFLayerTest(CommonLayerTest):
tf.compat.v1.import_graph_def(graph_def, name='')
input = dict()
for key in inputs_dict.keys():
data = inputs_dict.get(key)
input[key + ':0'] = transpose_nchw_to_nhwc(data, self.use_new_frontend)
if self.api_2:
input.update(inputs_dict)
else:
for key in inputs_dict.keys():
data = inputs_dict.get(key)
if not self.api_2:
key += ':0'
input[key] = transpose_nchw_to_nhwc(data, self.use_new_frontend, self.api_2)
tf_res = sess.run([out + ":0" for out in outputs_list], input)
result = dict()
for i, output in enumerate(outputs_list):
_tf_res = tf_res[i]
result[output] = transpose_nhwc_to_nchw(_tf_res, self.use_new_frontend)
result[output] = transpose_nhwc_to_nchw(_tf_res, self.use_new_frontend,
self.api_2)
return result

View File

@@ -79,6 +79,9 @@ def allclose(cur_array, ref_array, atol, rtol):
:param rtol: relative tolerance (threshold for relative difference)
:return: bool value means that values of tensors are equal with tolerance or not
"""
abs_diff = np.absolute(cur_array - ref_array)
if cur_array.dtype == bool:
abs_diff = np.absolute(cur_array ^ ref_array)
else:
abs_diff = np.absolute(cur_array - ref_array)
max_val = np.maximum(np.absolute(cur_array), np.absolute(ref_array))
return ((abs_diff < atol) | (abs_diff < rtol * max_val)).all()

View File

@@ -64,6 +64,11 @@ def pytest_addoption(parser):
required=False,
action="store_true",
help="Use Model Optimizer with new FrontEnd")
parser.addoption(
"--api_2",
action="store_true",
help="Use new API 2.0 for model processing in Inference Engine",
default=False)
@pytest.fixture(scope="session")
@@ -78,6 +83,12 @@ def use_new_frontend(request):
return request.config.getoption('use_new_frontend')
@pytest.fixture(scope="session")
def api_2(request):
"""Fixture function for command-line option."""
return request.config.getoption('api_2')
@pytest.fixture(scope="function")
def temp_dir(request):
"""Create directory for test purposes."""

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -167,12 +167,12 @@ class TestAbs(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_abs(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_abs(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_abs_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_abs_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -194,144 +194,160 @@ class TestOperations(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_add(self, params, ie_device, precision, ir_version, temp_dir):
def test_add(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Add', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_add_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Add', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_add_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, op='Add', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sub(self, params, ie_device, precision, ir_version, temp_dir):
def test_sub(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Sub', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sub_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Sub', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_sub_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, op='Sub', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_mul(self, params, ie_device, precision, ir_version, temp_dir):
def test_mul(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Mul', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_mul_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Mul', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_mul_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, op='Mul', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_div(self, params, ie_device, precision, ir_version, temp_dir):
def test_div(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Div', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_div_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Div', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_div_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, op='Div', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_add_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_add_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Add', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_add_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Add', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_add_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, op='Add', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_sub_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_sub_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Sub', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_sub_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Sub', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_sub_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, op='Sub', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_mul_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_mul_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Mul', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_mul_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Mul', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_mul_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, op='Mul', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_div_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_div_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Div', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_div_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Div', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_div_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, op='Div', precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_add_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, op='Add', precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_add_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Add', precision=precision, opset=6,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_add_const_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Add', precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_add_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, op='Add', precision=precision, opset=6,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sub_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, op='Sub', precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_sub_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Sub', precision=precision, opset=6,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sub_const_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Sub', precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_sub_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, op='Sub', precision=precision, opset=6,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_mul_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, op='Mul', precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_mul_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Mul', precision=precision, opset=6,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_mul_const_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Mul', precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_mul_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, op='Mul', precision=precision, opset=6,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_div_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, op='Div', precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_div_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, op='Div', precision=precision, opset=6,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_div_const_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, op='Div', precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_div_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, op='Div', precision=precision, opset=6,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -251,18 +251,21 @@ class TestAnd(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_and(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_and(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_and_one_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_and_one_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_and_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_and_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -101,7 +101,8 @@ class TestArgMax(OnnxRuntimeLayerTest):
'squeeze_const': {'kind': 'op', 'type': 'Const'},
'squeeze_const_data': {'shape': [1], 'kind': 'data'},
'squeeze': {'kind': 'op', 'type': 'Squeeze'},
'squeeze_data': {'shape': output_shape_squeeze, 'kind': 'data'}
'squeeze_data': {'shape': output_shape_squeeze,
'kind': 'data'}
})
edges.extend([('squeeze_const_indata', 'squeeze_const'),
('squeeze_const', 'squeeze_const_data'),
@@ -115,8 +116,9 @@ class TestArgMax(OnnxRuntimeLayerTest):
'flatten_const': {'kind': 'op', 'type': 'Const'},
'flatten_const_data': {'shape': [2], 'kind': 'data'},
'flatten': {'kind': 'op', 'type': 'Reshape'},
'flatten_data': {'shape': [output_shape_squeeze[0], np.prod(output_shape_squeeze[1:])],
'kind': 'data'}
'flatten_data': {
'shape': [output_shape_squeeze[0], np.prod(output_shape_squeeze[1:])],
'kind': 'data'}
})
edges.extend([('indices_data', 'flatten'),
('flatten_const_indata', 'flatten_const'),
@@ -148,6 +150,6 @@ class TestArgMax(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keepdims", [None, 0])
@pytest.mark.nightly
def test_argmax(self, params, keepdims, ie_device, precision, ir_version, temp_dir):
def test_argmax(self, params, keepdims, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, keepdims=keepdims),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -116,18 +116,18 @@ class TestBatchNormalization(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_bn(self, params, ie_device, precision, ir_version, temp_dir):
def test_bn(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_bn_opset6(self, params, ie_device, precision, ir_version, temp_dir):
def test_bn_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_bn_opset7(self, params, ie_device, precision, ir_version, temp_dir):
def test_bn_opset7(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, opset=7, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -171,24 +171,26 @@ class TestCeil(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_ceil_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_ceil_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_ceil_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_ceil_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_ceil(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_ceil(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_ceil_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_ceil_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -4,6 +4,7 @@
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -121,7 +122,8 @@ class TestClip(OnnxRuntimeLayerTest):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'input_const_data': {'kind': 'data', 'value': [min] if min is not None else [max]},
'input_const_data': {'kind': 'data',
'value': [min] if min is not None else [max]},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'shape': [], 'kind': 'data'},
'node': {'kind': 'op', 'type': 'Minimum' if max is not None else 'Maximum'},
@@ -158,12 +160,14 @@ class TestClip(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_clip_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, opset=6), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_clip_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, opset=6), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_clip_opset11(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, opset=11), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_clip_opset11(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, opset=11), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -140,7 +140,6 @@ class TestConcat(Caffe2OnnxLayerTest):
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
shape = input_shape
inputs_list = []
@@ -253,36 +252,39 @@ class TestConcat(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_concat_3D_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_concat_3D_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D_precommit)
@pytest.mark.precommit
def test_concat_4D_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_concat_4D_const_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_concat_4D_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_concat_4D_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D_precommit)
@pytest.mark.nightly
def test_concat_5D_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_concat_5D_const_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_concat_5D_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_concat_5D_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_concat_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_concat_inputs_order_params)
@pytest.mark.nightly
def test_concat_inputs_order(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_concat_net(**params, ir_version=ir_version), ie_device=ie_device, precision=precision,
ir_version=ir_version, temp_dir=temp_dir, input_names=params['input_names'])
def test_concat_inputs_order(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_concat_net(**params, ir_version=ir_version), ie_device=ie_device,
precision=precision, ir_version=ir_version, temp_dir=temp_dir,
input_names=params['input_names'], api_2=api_2)

View File

@@ -1,12 +1,11 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -16,7 +15,8 @@ class TestConv(OnnxRuntimeLayerTest):
inputs_dict[input] = np.random.randn(*inputs_dict[input]).astype(np.float32)
return inputs_dict
def create_net(self, shape, weights_shape, dilations, group, pads, strides, bias, ir_version, auto_pad=None):
def create_net(self, shape, weights_shape, dilations, group, pads, strides, bias, ir_version,
auto_pad=None):
"""
ONNX net IR net
@@ -119,7 +119,8 @@ class TestConv(OnnxRuntimeLayerTest):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'before_shape_const_indata': {'shape': [len(input_shape)], 'value': input_shape, 'kind': 'data'},
'before_shape_const_indata': {'shape': [len(input_shape)], 'value': input_shape,
'kind': 'data'},
'before_shape_const': {'kind': 'op', 'type': 'Const'},
'before_shape_const_data': {'shape': [len(input_shape)], 'kind': 'data'},
'reshape_before': {'kind': 'op', 'type': 'Reshape'},
@@ -127,11 +128,13 @@ class TestConv(OnnxRuntimeLayerTest):
'kernel_indata': {'kind': 'data', 'shape': [len(weights_const.flatten())]},
'kernel': {'kind': 'op', 'type': 'Const'},
'kernel_data': {'kind': 'data', 'value': None},
'node': {'kind': 'op', 'type': 'Convolution' if group == 1 else 'GroupConvolution',
'node': {'kind': 'op',
'type': 'Convolution' if group == 1 else 'GroupConvolution',
'dilations': [1, dilations[0]],
'pads_begin': [0, _pads[0, 0]], 'pads_end': [0, _pads[1, 0]]},
'node_data': {'shape': node_shape, 'kind': 'data'},
'after_shape_const_indata': {'shape': [len(output_shape)], 'value': output_shape, 'kind': 'data'},
'after_shape_const_indata': {'shape': [len(output_shape)],
'value': output_shape, 'kind': 'data'},
'after_shape_const': {'kind': 'op', 'type': 'Const'},
'after_shape_const_data': {'shape': [len(output_shape)], 'kind': 'data'},
'reshape_after': {'kind': 'op', 'type': 'Reshape'},
@@ -154,11 +157,12 @@ class TestConv(OnnxRuntimeLayerTest):
('after_shape_const_data', 'reshape_after'),
('reshape_after', 'reshape_after_data')]
if bias:
nodes_attributes.update({'const_indata': {'kind': 'data', 'value': bias_const.flatten()},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'kind': 'data', 'shape': None},
'bias': {'type': 'Add', 'kind': 'op'},
'bias_data': {'kind': 'data', 'shape': output_shape}})
nodes_attributes.update(
{'const_indata': {'kind': 'data', 'value': bias_const.flatten()},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'kind': 'data', 'shape': None},
'bias': {'type': 'Add', 'kind': 'op'},
'bias_data': {'kind': 'data', 'shape': output_shape}})
edges += [('reshape_after_data', 'bias'),
('const_indata', 'const'),
('const', 'const_data'),
@@ -178,8 +182,10 @@ class TestConv(OnnxRuntimeLayerTest):
'kernel_indata': {'kind': 'data', 'value': weights_const.flatten()},
'kernel': {'kind': 'op', 'type': 'Const'},
'kernel_data': {'kind': 'data', 'shape': _weights_shape},
'node': {'kind': 'op', 'type': 'Convolution' if group == 1 else 'GroupConvolution',
'dilations': dilations, 'pads_begin': _pads[0, :], 'pads_end': _pads[1, :]},
'node': {'kind': 'op',
'type': 'Convolution' if group == 1 else 'GroupConvolution',
'dilations': dilations, 'pads_begin': _pads[0, :],
'pads_end': _pads[1, :]},
'node_data': {'shape': output_shape, 'kind': 'data'},
'result': {'kind': 'op', 'type': 'Result'}}
edges = [('input', 'input_data'),
@@ -190,11 +196,12 @@ class TestConv(OnnxRuntimeLayerTest):
('node', 'node_data')]
if bias:
nodes_attributes.update({'const_indata': {'kind': 'data', 'value': bias_const.flatten()},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'kind': 'data', 'shape': None},
'bias': {'type': 'Add', 'kind': 'op'},
'bias_data': {'kind': 'data', 'shape': output_shape}})
nodes_attributes.update(
{'const_indata': {'kind': 'data', 'value': bias_const.flatten()},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'kind': 'data', 'shape': None},
'bias': {'type': 'Add', 'kind': 'op'},
'bias_data': {'kind': 'data', 'shape': output_shape}})
edges += [('node_data', 'bias'),
('const_indata', 'const'),
('const', 'const_data'),
@@ -243,42 +250,78 @@ class TestConv(OnnxRuntimeLayerTest):
dict(weights_shape=[3, 1, 3, 5], group=3)]
test_data_4D_autopad = [
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[1, 1, 1, 1], strides=[1, 1], dilations=[1, 1]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[2, 2, 2, 2], strides=[1, 1], dilations=[2, 2]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[3, 5, 3, 5], strides=[1, 1], dilations=[3, 5]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[1, 1, 1, 1], strides=[2, 2], dilations=[1, 1]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[2, 2, 2, 2], strides=[2, 2], dilations=[2, 2]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[3, 5, 3, 5], strides=[2, 2], dilations=[3, 5]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[1, 0, 1, 0], strides=[3, 5], dilations=[1, 1]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[2, 0, 2, 0], strides=[3, 5], dilations=[2, 2]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[3, 3, 3, 3], strides=[3, 5], dilations=[3, 5]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[2, 1, 2, 1], strides=[1, 1], dilations=[1, 1]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[4, 2, 4, 2], strides=[1, 1], dilations=[2, 2]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[6, 5, 6, 5], strides=[1, 1], dilations=[3, 5]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[2, 1, 2, 1], strides=[2, 2], dilations=[1, 1]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[4, 2, 4, 2], strides=[2, 2], dilations=[2, 2]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[6, 5, 6, 5], strides=[2, 2], dilations=[3, 5]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[2, 0, 2, 0], strides=[3, 5], dilations=[1, 1]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[4, 0, 4, 0], strides=[3, 5], dilations=[2, 2]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[6, 3, 6, 3], strides=[3, 5], dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[1, 1, 1, 1], strides=[1, 1], dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[2, 2, 2, 2], strides=[1, 1], dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[3, 5, 3, 5], strides=[1, 1], dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[1, 1, 1, 1], strides=[2, 2], dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[2, 2, 2, 2], strides=[2, 2], dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[3, 5, 3, 5], strides=[2, 2], dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[1, 0, 1, 0], strides=[3, 5], dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[2, 0, 2, 0], strides=[3, 5], dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[3, 3, 3, 3], strides=[3, 5], dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[1, 2, 1, 2], strides=[1, 1], dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[2, 4, 2, 4], strides=[1, 1], dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[3, 10, 3, 10], strides=[1, 1], dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[1, 2, 1, 2], strides=[2, 2], dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[2, 4, 2, 4], strides=[2, 2], dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[3, 10, 3, 10], strides=[2, 2], dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[1, 0, 1, 0], strides=[3, 5], dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[2, 2, 2, 2], strides=[3, 5], dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[3, 8, 3, 8], strides=[3, 5], dilations=[3, 5])]
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[1, 1, 1, 1], strides=[1, 1],
dilations=[1, 1]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[2, 2, 2, 2], strides=[1, 1],
dilations=[2, 2]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[3, 5, 3, 5], strides=[1, 1],
dilations=[3, 5]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[1, 1, 1, 1], strides=[2, 2],
dilations=[1, 1]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[2, 2, 2, 2], strides=[2, 2],
dilations=[2, 2]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[3, 5, 3, 5], strides=[2, 2],
dilations=[3, 5]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[1, 0, 1, 0], strides=[3, 5],
dilations=[1, 1]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[2, 0, 2, 0], strides=[3, 5],
dilations=[2, 2]),
dict(weights_shape=[1, 3, 3, 3], group=1, pads=[3, 3, 3, 3], strides=[3, 5],
dilations=[3, 5]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[2, 1, 2, 1], strides=[1, 1],
dilations=[1, 1]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[4, 2, 4, 2], strides=[1, 1],
dilations=[2, 2]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[6, 5, 6, 5], strides=[1, 1],
dilations=[3, 5]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[2, 1, 2, 1], strides=[2, 2],
dilations=[1, 1]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[4, 2, 4, 2], strides=[2, 2],
dilations=[2, 2]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[6, 5, 6, 5], strides=[2, 2],
dilations=[3, 5]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[2, 0, 2, 0], strides=[3, 5],
dilations=[1, 1]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[4, 0, 4, 0], strides=[3, 5],
dilations=[2, 2]),
dict(weights_shape=[1, 3, 5, 3], group=1, pads=[6, 3, 6, 3], strides=[3, 5],
dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[1, 1, 1, 1], strides=[1, 1],
dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[2, 2, 2, 2], strides=[1, 1],
dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[3, 5, 3, 5], strides=[1, 1],
dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[1, 1, 1, 1], strides=[2, 2],
dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[2, 2, 2, 2], strides=[2, 2],
dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[3, 5, 3, 5], strides=[2, 2],
dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[1, 0, 1, 0], strides=[3, 5],
dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[2, 0, 2, 0], strides=[3, 5],
dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 3], group=3, pads=[3, 3, 3, 3], strides=[3, 5],
dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[1, 2, 1, 2], strides=[1, 1],
dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[2, 4, 2, 4], strides=[1, 1],
dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[3, 10, 3, 10], strides=[1, 1],
dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[1, 2, 1, 2], strides=[2, 2],
dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[2, 4, 2, 4], strides=[2, 2],
dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[3, 10, 3, 10], strides=[2, 2],
dilations=[3, 5]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[1, 0, 1, 0], strides=[3, 5],
dilations=[1, 1]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[2, 2, 2, 2], strides=[3, 5],
dilations=[2, 2]),
dict(weights_shape=[3, 1, 3, 5], group=3, pads=[3, 8, 3, 8], strides=[3, 5],
dilations=[3, 5])]
test_data_5D_precommit = [
dict(weights_shape=[1, 3, 3, 3, 3], group=1),
@@ -291,42 +334,78 @@ class TestConv(OnnxRuntimeLayerTest):
dict(weights_shape=[3, 1, 5, 4, 3], group=3)]
test_data_5D_autopad = [
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[1, 1, 1, 1, 1, 1], strides=[1, 1, 1], dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[2, 2, 2, 2, 2, 2], strides=[1, 1, 1], dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[3, 4, 5, 3, 4, 5], strides=[1, 1, 1], dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[1, 1, 1, 1, 1, 1], strides=[2, 2, 2], dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[2, 2, 2, 2, 2, 2], strides=[2, 2, 2], dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[3, 4, 5, 3, 4, 5], strides=[2, 2, 2], dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[1, 1, 0, 1, 1, 0], strides=[3, 4, 5], dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[2, 2, 0, 2, 2, 0], strides=[3, 4, 5], dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[3, 4, 3, 3, 4, 3], strides=[3, 4, 5], dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[1, 1, 2, 1, 2, 2], strides=[1, 1, 1], dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[2, 3, 4, 2, 3, 4], strides=[1, 1, 1], dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[3, 6, 10, 3, 6, 10], strides=[1, 1, 1], dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[1, 1, 2, 1, 2, 2], strides=[2, 2, 2], dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[2, 3, 4, 2, 3, 4], strides=[2, 2, 2], dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[3, 6, 10, 3, 6, 10], strides=[2, 2, 2], dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[1, 1, 0, 1, 2, 0], strides=[3, 4, 5], dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[2, 3, 2, 2, 3, 2], strides=[3, 4, 5], dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[3, 6, 8, 3, 6, 8], strides=[3, 4, 5], dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[1, 1, 1, 1, 1, 1], strides=[1, 1, 1], dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[2, 2, 2, 2, 2, 2], strides=[1, 1, 1], dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[3, 4, 5, 3, 4, 5], strides=[1, 1, 1], dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[1, 1, 1, 1, 1, 1], strides=[2, 2, 2], dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[2, 2, 2, 2, 2, 2], strides=[2, 2, 2], dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[3, 4, 5, 3, 4, 5], strides=[2, 2, 2], dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[1, 1, 0, 1, 1, 0], strides=[3, 4, 5], dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[2, 2, 0, 2, 2, 0], strides=[3, 4, 5], dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[3, 4, 3, 3, 4, 3], strides=[3, 4, 5], dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[2, 1, 1, 2, 2, 1], strides=[1, 1, 1], dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[4, 3, 2, 4, 3, 2], strides=[1, 1, 1], dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[6, 6, 5, 6, 6, 5], strides=[1, 1, 1], dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[2, 1, 1, 2, 2, 1], strides=[2, 2, 2], dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[4, 3, 2, 4, 3, 2], strides=[2, 2, 2], dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[6, 6, 5, 6, 6, 5], strides=[2, 2, 2], dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[2, 1, 0, 2, 2, 0], strides=[3, 4, 5], dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[4, 3, 0, 4, 3, 0], strides=[3, 4, 5], dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[6, 6, 3, 6, 6, 3], strides=[3, 4, 5], dilations=[3, 4, 5])]
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[1, 1, 1, 1, 1, 1], strides=[1, 1, 1],
dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[2, 2, 2, 2, 2, 2], strides=[1, 1, 1],
dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[3, 4, 5, 3, 4, 5], strides=[1, 1, 1],
dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[1, 1, 1, 1, 1, 1], strides=[2, 2, 2],
dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[2, 2, 2, 2, 2, 2], strides=[2, 2, 2],
dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[3, 4, 5, 3, 4, 5], strides=[2, 2, 2],
dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[1, 1, 0, 1, 1, 0], strides=[3, 4, 5],
dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[2, 2, 0, 2, 2, 0], strides=[3, 4, 5],
dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 3, 3], group=1, pads=[3, 4, 3, 3, 4, 3], strides=[3, 4, 5],
dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[1, 1, 2, 1, 2, 2], strides=[1, 1, 1],
dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[2, 3, 4, 2, 3, 4], strides=[1, 1, 1],
dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[3, 6, 10, 3, 6, 10], strides=[1, 1, 1],
dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[1, 1, 2, 1, 2, 2], strides=[2, 2, 2],
dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[2, 3, 4, 2, 3, 4], strides=[2, 2, 2],
dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[3, 6, 10, 3, 6, 10], strides=[2, 2, 2],
dilations=[3, 4, 5]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[1, 1, 0, 1, 2, 0], strides=[3, 4, 5],
dilations=[1, 1, 1]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[2, 3, 2, 2, 3, 2], strides=[3, 4, 5],
dilations=[2, 2, 2]),
dict(weights_shape=[1, 3, 3, 4, 5], group=1, pads=[3, 6, 8, 3, 6, 8], strides=[3, 4, 5],
dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[1, 1, 1, 1, 1, 1], strides=[1, 1, 1],
dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[2, 2, 2, 2, 2, 2], strides=[1, 1, 1],
dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[3, 4, 5, 3, 4, 5], strides=[1, 1, 1],
dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[1, 1, 1, 1, 1, 1], strides=[2, 2, 2],
dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[2, 2, 2, 2, 2, 2], strides=[2, 2, 2],
dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[3, 4, 5, 3, 4, 5], strides=[2, 2, 2],
dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[1, 1, 0, 1, 1, 0], strides=[3, 4, 5],
dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[2, 2, 0, 2, 2, 0], strides=[3, 4, 5],
dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 3, 3, 3], group=3, pads=[3, 4, 3, 3, 4, 3], strides=[3, 4, 5],
dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[2, 1, 1, 2, 2, 1], strides=[1, 1, 1],
dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[4, 3, 2, 4, 3, 2], strides=[1, 1, 1],
dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[6, 6, 5, 6, 6, 5], strides=[1, 1, 1],
dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[2, 1, 1, 2, 2, 1], strides=[2, 2, 2],
dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[4, 3, 2, 4, 3, 2], strides=[2, 2, 2],
dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[6, 6, 5, 6, 6, 5], strides=[2, 2, 2],
dilations=[3, 4, 5]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[2, 1, 0, 2, 2, 0], strides=[3, 4, 5],
dilations=[1, 1, 1]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[4, 3, 0, 4, 3, 0], strides=[3, 4, 5],
dilations=[2, 2, 2]),
dict(weights_shape=[3, 1, 5, 4, 3], group=3, pads=[6, 6, 3, 6, 6, 3], strides=[3, 4, 5],
dilations=[3, 4, 5])]
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.parametrize("dilations", [[1], [2]])
@@ -334,19 +413,23 @@ class TestConv(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("strides", [[1], [2]])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.nightly
def test_conv_3D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, shape=[2, 3, 25], dilations=dilations, pads=pads, strides=strides,
def test_conv_3D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_net(**params, shape=[2, 3, 25], dilations=dilations, pads=pads,
strides=strides,
bias=bias, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D_autopad[:-1])
@pytest.mark.parametrize("auto_pad", ['SAME_UPPER', 'SAME_LOWER'])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.nightly
@pytest.mark.xfail(reason='autopad dimetions do not agree with framework')
def test_conv_3D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, shape=[2, 3, 25], bias=bias, auto_pad=auto_pad, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_conv_3D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_net(**params, shape=[2, 3, 25], bias=bias, auto_pad=auto_pad,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D_precommit)
@pytest.mark.parametrize("dilations", [[3, 5]])
@@ -355,10 +438,11 @@ class TestConv(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.precommit
def test_conv_4D_precommit(self, params, dilations, pads, strides, bias, ie_device, precision,
ir_version, temp_dir):
self._test(*self.create_net(**params, shape=[2, 3, 25, 25], dilations=dilations, pads=pads, strides=strides,
ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, shape=[2, 3, 25, 25], dilations=dilations, pads=pads,
strides=strides,
bias=bias, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.parametrize("dilations", [[1, 1], [2, 2], [3, 5]])
@@ -366,19 +450,24 @@ class TestConv(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("strides", [[1, 1], [2, 2], [3, 5]])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.nightly
def test_conv_4D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version, temp_dir):
def test_conv_4D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(
*self.create_net(**params, shape=[2, 3, 25, 25], dilations=dilations, pads=pads, strides=strides, bias=bias,
ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir)
*self.create_net(**params, shape=[2, 3, 25, 25], dilations=dilations, pads=pads,
strides=strides, bias=bias,
ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D_autopad[:-1])
@pytest.mark.parametrize("auto_pad", ['SAME_UPPER', 'SAME_LOWER'])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.nightly
@pytest.mark.xfail(reason='autopad dimetions do not agree with framework')
def test_conv_4D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version, temp_dir):
def test_conv_4D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_net(**params, shape=[2, 3, 25, 25], bias=bias, auto_pad=auto_pad,
ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir)
ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D_precommit)
@pytest.mark.parametrize("dilations", [[3, 4, 5]])
@@ -387,10 +476,12 @@ class TestConv(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.precommit
def test_conv_5D_precommit(self, params, dilations, pads, strides, bias, ie_device, precision,
ir_version, temp_dir):
self._test(*self.create_net(**params, shape=[2, 3, 25, 25, 25], dilations=dilations, pads=pads, strides=strides,
bias=bias, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ir_version, temp_dir, api_2):
self._test(
*self.create_net(**params, shape=[2, 3, 25, 25, 25], dilations=dilations, pads=pads,
strides=strides,
bias=bias, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.parametrize("dilations", [[1, 1, 1], [2, 2, 2], [3, 4, 5]])
@@ -398,16 +489,22 @@ class TestConv(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("strides", [[1, 1, 1], [2, 2, 2], [3, 4, 5]])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.nightly
def test_conv_5D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, shape=[2, 3, 25, 25, 25], dilations=dilations, pads=pads, strides=strides,
bias=bias, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_conv_5D(self, params, dilations, pads, strides, bias, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(
*self.create_net(**params, shape=[2, 3, 25, 25, 25], dilations=dilations, pads=pads,
strides=strides,
bias=bias, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D_autopad[:-1])
@pytest.mark.parametrize("auto_pad", ['SAME_UPPER', 'SAME_LOWER'])
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.nightly
@pytest.mark.xfail(reason='autopad dimetions do not agree with framework')
def test_conv_5D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, shape=[2, 3, 25, 25, 25], bias=bias, auto_pad=auto_pad,
ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir)
def test_conv_5D_autopad(self, params, auto_pad, bias, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(
*self.create_net(**params, shape=[2, 3, 25, 25, 25], bias=bias, auto_pad=auto_pad,
ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -12,8 +12,10 @@ class TestConvTranspose(OnnxRuntimeLayerTest):
inputs_dict[input] = np.random.randn(*inputs_dict[input]).astype(np.float32)
return inputs_dict
def create_conv_transpose(self, ir_version, input_shape, output_shape, kernel_shape, strides, group=1,
dilations=None, pads=None, force_output_shape=False, output_padding=None, bias=False,
def create_conv_transpose(self, ir_version, input_shape, output_shape, kernel_shape, strides,
group=1,
dilations=None, pads=None, force_output_shape=False,
output_padding=None, bias=False,
auto_pad=None):
#
# Create ONNX model
@@ -120,7 +122,8 @@ class TestConvTranspose(OnnxRuntimeLayerTest):
kernel_shape=[3, 3, 2, 2], strides=[1, 1], dilations=[2, 2]),
marks=pytest.mark.skip(reason="Skipped until fixed")),
pytest.param(dict(input_shape=[1, 2, 20, 20], output_shape=[1, 2, 85, 85],
kernel_shape=[2, 1, 8, 8], strides=[4, 4], group=2, output_padding=[1, 1]),
kernel_shape=[2, 1, 8, 8], strides=[4, 4], group=2,
output_padding=[1, 1]),
marks=pytest.mark.skip(reason="Skipped until fixed"))
]
@@ -141,9 +144,11 @@ class TestConvTranspose(OnnxRuntimeLayerTest):
dict(input_shape=[1, 2, 20, 20], output_shape=[1, 2, 80, 80],
kernel_shape=[2, 1, 8, 8], strides=[4, 4], group=2, pads=[2, 2, 2, 2]),
dict(input_shape=[1, 2, 20, 20], output_shape=[1, 2, 87, 87],
kernel_shape=[2, 1, 8, 8], strides=[4, 4], group=2, pads=[2, 2, 2, 2], dilations=[2, 2]),
kernel_shape=[2, 1, 8, 8], strides=[4, 4], group=2, pads=[2, 2, 2, 2],
dilations=[2, 2]),
dict(input_shape=[1, 2, 20, 20], output_shape=[1, 2, 80, 80],
kernel_shape=[2, 1, 8, 8], strides=[4, 4], group=2, pads=[2, 2, 2, 2], force_output_shape=True),
kernel_shape=[2, 1, 8, 8], strides=[4, 4], group=2, pads=[2, 2, 2, 2],
force_output_shape=True),
]
valid_auto_pad_tests_4D = common_tests_4D + [
@@ -178,38 +183,46 @@ class TestConvTranspose(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("auto_pad", ["NOTSET"])
@pytest.mark.precommit
def test_conv_transpose_4D_precommit(self, params, bias, ie_device, precision, ir_version, auto_pad, temp_dir):
def test_conv_transpose_4D_precommit(self, params, bias, ie_device, precision, ir_version,
auto_pad, temp_dir, api_2):
if ie_device == 'GPU' and 'dilations' in params:
pytest.xfail('dilations are not supported on GPU')
self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias, auto_pad=auto_pad),
ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias,
auto_pad=auto_pad),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", explicit_pads_tests_4D)
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("auto_pad", ["NOTSET"])
@pytest.mark.nightly
def test_conv_transpose_4D(self, params, bias, ie_device, precision, ir_version, auto_pad, temp_dir):
def test_conv_transpose_4D(self, params, bias, ie_device, precision, ir_version, auto_pad,
temp_dir, api_2):
if ie_device == 'GPU' and 'dilations' in params:
pytest.xfail('dilations are not supported on GPU')
self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias, auto_pad=auto_pad),
ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias,
auto_pad=auto_pad),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", valid_auto_pad_tests_4D)
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("auto_pad", ["VALID"])
@pytest.mark.nightly
def test_conv_transpose_valid_auto_pad_4D(self, params, bias, ie_device, precision, ir_version, auto_pad, temp_dir):
def test_conv_transpose_valid_auto_pad_4D(self, params, bias, ie_device, precision, ir_version,
auto_pad, temp_dir, api_2):
if ie_device == 'GPU' and 'dilations' in params:
pytest.xfail('dilations are not supported on GPU')
self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias, auto_pad=auto_pad),
ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias,
auto_pad=auto_pad),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", same_auto_pad_tests_4D)
@pytest.mark.parametrize("bias", [False, True])
@pytest.mark.parametrize("auto_pad", ["SAME_UPPER", "SAME_LOWER"])
@pytest.mark.nightly
def test_conv_transpose_same_auto_pad_4D(self, params, bias, ie_device, precision, ir_version, auto_pad, temp_dir):
def test_conv_transpose_same_auto_pad_4D(self, params, bias, ie_device, precision, ir_version,
auto_pad, temp_dir, api_2):
if ie_device == 'GPU' and 'dilations' in params:
pytest.xfail('dilations are not supported on GPU')
self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias, auto_pad=auto_pad),
ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(*self.create_conv_transpose(**params, ir_version=ir_version, bias=bias,
auto_pad=auto_pad),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -5,6 +5,7 @@ import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -116,7 +117,8 @@ class TestCumSum(OnnxRuntimeLayerTest):
return onnx_net, ref_net
def create_net_const(self, shape, precision, ir_version, axis=None, reverse=None, exclusive=None):
def create_net_const(self, shape, precision, ir_version, axis=None, reverse=None,
exclusive=None):
"""
ONNX net IR net
@@ -250,18 +252,23 @@ class TestCumSum(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("reverse", [0, 1])
@pytest.mark.parametrize("exclusive", [0, 1])
@pytest.mark.nightly
def test_cumsum(self, params, reverse, exclusive, ie_device, precision, ir_version, temp_dir):
def test_cumsum(self, params, reverse, exclusive, ie_device, precision, ir_version, temp_dir,
api_2):
if 'axis' not in params:
pytest.skip('No axis cases fail in ONNX')
self._test(*self.create_net(**params, exclusive=exclusive, reverse=reverse, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(
*self.create_net(**params, exclusive=exclusive, reverse=reverse, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("reverse", [0, 1])
@pytest.mark.parametrize("exclusive", [0, 1])
@pytest.mark.nightly
def test_cumsum_const(self, params, reverse, exclusive, ie_device, precision, ir_version, temp_dir):
def test_cumsum_const(self, params, reverse, exclusive, ie_device, precision, ir_version,
temp_dir, api_2):
if 'axis' not in params:
pytest.skip('No axis cases fail in ONNX')
self._test(*self.create_net_const(**params, precision=precision, exclusive=exclusive, reverse=reverse,
ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(*self.create_net_const(**params, precision=precision, exclusive=exclusive,
reverse=reverse,
ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -5,6 +5,7 @@ import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -17,7 +18,8 @@ class TestDequantizeLinear(OnnxRuntimeLayerTest):
dtype=self.inp_type)
return inputs_dict
def create_dequanize_linear(self, shape, y_scale: np.array, y_zero_point=None, axis=None, opset=10, ir_version='10'):
def create_dequanize_linear(self, shape, y_scale: np.array, y_zero_point=None, axis=None,
opset=10, ir_version='10'):
"""
ONNX net IR net
@@ -137,16 +139,26 @@ class TestDequantizeLinear(OnnxRuntimeLayerTest):
return onnx_net, ref_net
test_data = [
dict(shape=[8], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[8], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[2, 4], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[2, 4], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=np.float), y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[8], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[8], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[2, 4], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[2, 4], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[2, 4, 6], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[2, 4, 6, 8], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(1, dtype=np.int8)),
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(128, dtype=np.uint8)),
dict(shape=[2, 4, 6, 8, 10], y_scale=np.array(2, dtype=np.float),
y_zero_point=np.array(1, dtype=np.int8)),
]
test_data_def_zerop = [
dict(shape=[8], y_scale=np.array(2, dtype=np.float)),
@@ -182,24 +194,32 @@ class TestDequantizeLinear(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_def_zerop)
@pytest.mark.nightly
@pytest.mark.xfail(reason='Defualt zero_point fails on onnxruntime')
def test_quantize_linear_def_zerop_opset10(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_dequanize_linear(**params, ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir)
def test_quantize_linear_def_zerop_opset10(self, params, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_dequanize_linear(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_quantize_linear_opset10(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_dequanize_linear(**params, ir_version=ir_version), ie_device, precision, ir_version, temp_dir=temp_dir)
def test_quantize_linear_opset10(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_dequanize_linear(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data + test_data_def_zerop)
@pytest.mark.nightly
@pytest.mark.skip(reason='DequantizeLinear-13 is unsupported in MO')
def test_quantize_linear_opset13(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_dequanize_linear(**params, opset=13, ir_version=ir_version), ie_device, precision,
ir_version, temp_dir=temp_dir)
def test_quantize_linear_opset13(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_dequanize_linear(**params, opset=13, ir_version=ir_version),
ie_device, precision,
ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_axis)
@pytest.mark.nightly
@pytest.mark.skip(reason='DequantizeLinear-13 is unsupported in MO')
def test_quantize_linear_axis_opset13(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_dequanize_linear(**params, opset=13, ir_version=ir_version), ie_device, precision,
ir_version, temp_dir=temp_dir)
def test_quantize_linear_axis_opset13(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_dequanize_linear(**params, opset=13, ir_version=ir_version),
ie_device, precision,
ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -146,24 +146,28 @@ class TestDropout(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_dropout_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_dropout_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_dropout(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_dropout(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_dropout_const_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_dropout_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, opset=6, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_dropout_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_dropout_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -137,7 +137,8 @@ class TestElu(Caffe2OnnxLayerTest):
#
# Create reference IR net
#
constant = np.clip(constant, 0, np.inf) + (np.exp(np.clip(constant, -np.inf, 0)) - 1) * alpha
constant = np.clip(constant, 0, np.inf) + (
np.exp(np.clip(constant, -np.inf, 0)) - 1) * alpha
if precision == 'FP16':
constant = constant.astype(np.float16)
@@ -174,12 +175,13 @@ class TestElu(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_elu(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_elu(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_elu_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_elu_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -8,6 +8,7 @@ import pytest
import torch
import torch.nn as nn
from common.layer_test_class import CommonLayerTest, check_ir_version
from unit_tests.utils.graph import build_graph
@@ -27,7 +28,8 @@ class EmbeddingBagModel(torch.nn.Module):
def __init__(self, n, m, indices_shape=None, per_sample_weights=False, mode="sum"):
super(EmbeddingBagModel, self).__init__()
EE = nn.EmbeddingBag(n, m, mode=mode, sparse=True)
self.W = np.random.uniform(low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)).astype(np.float32)
self.W = np.random.uniform(low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)).astype(
np.float32)
EE.weight.data = torch.tensor(self.W, requires_grad=True)
self.embedding_bag = EE
if per_sample_weights:
@@ -52,7 +54,8 @@ class TestPytorchEmbeddingBag(PytorchLayerTest):
"""
# Create Pytorch model
EE = EmbeddingBagModel(n, m, indices_shape=[emb_batch_size], per_sample_weights=per_sample_weights)
EE = EmbeddingBagModel(n, m, indices_shape=[emb_batch_size],
per_sample_weights=per_sample_weights)
ref_net = None
if check_ir_version(10, None, ir_version):
@@ -113,7 +116,9 @@ class TestPytorchEmbeddingBag(PytorchLayerTest):
torch.from_numpy(np.array(offsets)).long())
else:
self.var = (
torch.from_numpy(np.random.choice(n, emb_batch_size).reshape(int(emb_batch_size / 2), 2)).long(),)
torch.from_numpy(
np.random.choice(n, emb_batch_size).reshape(int(emb_batch_size / 2),
2)).long(),)
return EE, ref_net
test_data = [
@@ -127,5 +132,6 @@ class TestPytorchEmbeddingBag(PytorchLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_pytorch_embedding_bag(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir)
def test_pytorch_embedding_bag(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params), ie_device, precision, ir_version, temp_dir=temp_dir,
api_2=api_2)

View File

@@ -184,119 +184,126 @@ class TestFlatten(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_3D(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_3D(self, params, opset, ie_device, precision, ir_version, temp_dir, api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_3D_const(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_3D_const(self, params, opset, ie_device, precision, ir_version, temp_dir,
api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_4D(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_4D(self, params, opset, ie_device, precision, ir_version, temp_dir, api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D_precommit)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.precommit
def test_flatten_4D_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_4D_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir,
api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D_precommit)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_4D_const_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_4D_const_precommit(self, params, opset, ie_device, precision, ir_version,
temp_dir, api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_4D_const(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_4D_const(self, params, opset, ie_device, precision, ir_version, temp_dir,
api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D_precommit)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_5D_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_5D_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir,
api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_5D(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_5D(self, params, opset, ie_device, precision, ir_version, temp_dir, api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D_precommit)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_5D_const_precommit(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_5D_const_precommit(self, params, opset, ie_device, precision, ir_version,
temp_dir, api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.parametrize("opset", [6, 9])
@pytest.mark.nightly
def test_flatten_5D_const(self, params, opset, ie_device, precision, ir_version, temp_dir):
def test_flatten_5D_const(self, params, opset, ie_device, precision, ir_version, temp_dir,
api_2):
# negative axis not allowed by onnx spec for flatten-1 and flatten-9
if params['axis'] < 0:
self.skip_framework = True
else:
self.skip_framework = False
self._test(*self.create_flatten_net_const(**params, ir_version=ir_version, opset=opset),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -168,12 +168,13 @@ class TestFloor(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_floor(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_floor(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_floor_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_floor_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -213,56 +213,72 @@ class TestGather(OnnxRuntimeLayerTest):
return onnx_net, ref_net
test_data_precommit = [
dict(shape=[6, 8, 10, 12], axis=2, indices=[[0, 2, 4], [5, 7, 9]], output_shape=[6, 8, 2, 3, 12]),
dict(shape=[6, 8, 10, 12], axis=2, indices=[[0, 2, 4], [5, 7, 9]],
output_shape=[6, 8, 2, 3, 12]),
dict(shape=[4, 6, 8, 10, 12], axis=1, indices=[2, 5], output_shape=[4, 2, 8, 10, 12]),
dict(shape=[4, 6, 8, 10, 12], axis=-1, indices=[5, 8], output_shape=[4, 6, 8, 10, 2]),
dict(shape=[6, 8, 10, 12], axis=-1, indices=[[[2, -1], [3, 2]], [[5, -1], [3, -2]]], output_shape=[6, 8, 10, 2, 2, 2])
dict(shape=[6, 8, 10, 12], axis=-1, indices=[[[2, -1], [3, 2]], [[5, -1], [3, -2]]],
output_shape=[6, 8, 10, 2, 2, 2])
]
test_data = [dict(shape=[10, 12], axis=0, indices=[3, 6], output_shape=[2, 12]),
dict(shape=[10, 12], axis=-1, indices=[4, 7], output_shape=[10, 2]),
dict(shape=[10, 12], axis=None, indices=[[0, 1, 3, 4], [5, 6, 8, 9]], output_shape=[2, 4, 12]),
dict(shape=[10, 12], axis=1, indices=[[0, 1, 3, 4, 5], [6, 7, 9, 10, 11]], output_shape=[10, 2, 5]),
dict(shape=[10, 12], axis=None, indices=[[0, 1, 3, 4], [5, 6, 8, 9]],
output_shape=[2, 4, 12]),
dict(shape=[10, 12], axis=1, indices=[[0, 1, 3, 4, 5], [6, 7, 9, 10, 11]],
output_shape=[10, 2, 5]),
dict(shape=[8, 10, 12], axis=0, indices=[3, 6], output_shape=[2, 10, 12]),
dict(shape=[8, 10, 12], axis=-1, indices=[5, 8], output_shape=[8, 10, 2]),
dict(shape=[8, 10, 12], axis=None, indices=[[0, 1], [3, 4], [6, 7]], output_shape=[3, 2, 10, 12]),
dict(shape=[8, 10, 12], axis=1, indices=[[0, 2, 4], [5, 7, 9]], output_shape=[8, 2, 3, 12]),
dict(shape=[8, 10, 12], axis=None, indices=[[0, 1], [3, 4], [6, 7]],
output_shape=[3, 2, 10, 12]),
dict(shape=[8, 10, 12], axis=1, indices=[[0, 2, 4], [5, 7, 9]],
output_shape=[8, 2, 3, 12]),
dict(shape=[6, 8, 10, 12], axis=-1, indices=[5, 8], output_shape=[6, 8, 10, 2]),
dict(shape=[6, 8, 10, 12], axis=None, indices=[[0, 1, 2], [3, 4, 5]], output_shape=[2, 3, 8, 10, 12]),
dict(shape=[6, 8, 10, 12], axis=2, indices=[[0, 2, 4], [5, 7, 9]], output_shape=[6, 8, 2, 3, 12]),
dict(shape=[4, 6, 8, 10, 12], axis=0, indices=[1, 3], output_shape=[2, 6, 8, 10, 12]),
dict(shape=[4, 6, 8, 10, 12], axis=1, indices=[2, 5], output_shape=[4, 2, 8, 10, 12]),
dict(shape=[4, 6, 8, 10, 12], axis=-1, indices=[5, 8], output_shape=[4, 6, 8, 10, 2])]
dict(shape=[6, 8, 10, 12], axis=None, indices=[[0, 1, 2], [3, 4, 5]],
output_shape=[2, 3, 8, 10, 12]),
dict(shape=[6, 8, 10, 12], axis=2, indices=[[0, 2, 4], [5, 7, 9]],
output_shape=[6, 8, 2, 3, 12]),
dict(shape=[4, 6, 8, 10, 12], axis=0, indices=[1, 3],
output_shape=[2, 6, 8, 10, 12]),
dict(shape=[4, 6, 8, 10, 12], axis=1, indices=[2, 5],
output_shape=[4, 2, 8, 10, 12]),
dict(shape=[4, 6, 8, 10, 12], axis=-1, indices=[5, 8],
output_shape=[4, 6, 8, 10, 2])]
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_gather(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_gather(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_gather(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_gather(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_gather_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_gather_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
test_data_negative_indices = [dict(shape=[10, 12], axis=0, indices=[3, -1, -4], output_shape=[3, 12]),
dict(shape=[6, 10, 14, 12], axis=1, indices=[[0, -1, 3, -4], [-5, 6, -7, 8]],
output_shape=[6, 2, 4, 14, 12]),
dict(shape=[8, 10, 14, 12], axis=1, indices=[[-2, 2, -4], [5, -7, 9]],
output_shape=[8, 2, 3, 14, 12]),
dict(shape=[6, 8, 10, 12], axis=-1, indices=[[[2, -1], [3, 2]], [[5, -1], [3, -2]]],
output_shape=[6, 8, 10, 2, 2, 2])]
test_data_negative_indices = [
dict(shape=[10, 12], axis=0, indices=[3, -1, -4], output_shape=[3, 12]),
dict(shape=[6, 10, 14, 12], axis=1, indices=[[0, -1, 3, -4], [-5, 6, -7, 8]],
output_shape=[6, 2, 4, 14, 12]),
dict(shape=[8, 10, 14, 12], axis=1, indices=[[-2, 2, -4], [5, -7, 9]],
output_shape=[8, 2, 3, 14, 12]),
dict(shape=[6, 8, 10, 12], axis=-1, indices=[[[2, -1], [3, 2]], [[5, -1], [3, -2]]],
output_shape=[6, 8, 10, 2, 2, 2])]
@pytest.mark.xfail(reason='negative indices are not yet implemented on CPU: xxx-54630')
@pytest.mark.parametrize("params", test_data_negative_indices)
@pytest.mark.nightly
def test_gather_nightly_negative_indices(self, params, ie_device, precision, ir_version, temp_dir):
def test_gather_nightly_negative_indices(self, params, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -16,7 +16,8 @@ class TestGemm(OnnxRuntimeLayerTest):
inputs_dict[input] = np.random.randn(*inputs_dict[input]).astype(np.float32)
return inputs_dict
def create_net(self, shapeA, shapeB, shapeC, alpha, beta, trans_a, trans_b, precision, opset, ir_version,):
def create_net(self, shapeA, shapeB, shapeC, alpha, beta, trans_a, trans_b, precision, opset,
ir_version, ):
"""
ONNX net IR net
@@ -128,7 +129,8 @@ class TestGemm(OnnxRuntimeLayerTest):
return onnx_net, ref_net
def create_net_double(self, shapeA, shapeB, shapeC, alpha, beta, trans_a, trans_b, precision, ir_version):
def create_net_double(self, shapeA, shapeB, shapeC, alpha, beta, trans_a, trans_b, precision,
ir_version):
"""
ONNX net IR net
@@ -223,10 +225,14 @@ class TestGemm(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("opset", [None, 11])
@pytest.mark.nightly
@pytest.mark.precommit
def test_gemm(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, opset, ir_version, temp_dir):
self._test(*self.create_net(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta, trans_a,
trans_b, precision, opset, ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_gemm(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, opset,
ir_version, temp_dir, api_2):
self._test(
*self.create_net(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta,
trans_a,
trans_b, precision, opset, ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_bc)
@pytest.mark.parametrize("alpha", [None, 0.1, 2.0])
@@ -236,10 +242,14 @@ class TestGemm(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("opset", [None, 11])
@pytest.mark.nightly
@pytest.mark.precommit
def test_gemm_bc(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, opset, ir_version, temp_dir):
self._test(*self.create_net(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta, trans_a,
trans_b, precision, opset, ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_gemm_bc(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, opset,
ir_version, temp_dir, api_2):
self._test(
*self.create_net(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta,
trans_a,
trans_b, precision, opset, ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("alpha", [None, 0.1, 2.0])
@@ -248,10 +258,14 @@ class TestGemm(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("trans_b", [None, 1])
@pytest.mark.nightly
@pytest.mark.precommit
def test_gemm_double(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_double(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta,
trans_a, trans_b, precision, ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_gemm_double(self, params, alpha, beta, trans_a, trans_b, ie_device, precision,
ir_version, temp_dir, api_2):
self._test(
*self.create_net_double(params['shapeA'], params['shapeB'], params['shapeC'], alpha,
beta,
trans_a, trans_b, precision, ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_bc)
@pytest.mark.parametrize("alpha", [None, 0.1, 2.0])
@@ -260,10 +274,14 @@ class TestGemm(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("trans_b", [None, 1])
@pytest.mark.nightly
@pytest.mark.precommit
def test_gemm_double_bc(self, params, alpha, beta, trans_a, trans_b, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_double(params['shapeA'], params['shapeB'], params['shapeC'], alpha, beta,
trans_a, trans_b, precision, ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_gemm_double_bc(self, params, alpha, beta, trans_a, trans_b, ie_device, precision,
ir_version, temp_dir, api_2):
self._test(
*self.create_net_double(params['shapeA'], params['shapeB'], params['shapeC'], alpha,
beta,
trans_a, trans_b, precision, ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
class PytorchLayerTest(CommonLayerTest):
@@ -316,6 +334,7 @@ class TestPytorchMM(PytorchLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_pytorch_mm(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(precision, **params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_pytorch_mm(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(precision, **params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -66,7 +66,8 @@ class TestHardSigmoid(OnnxRuntimeLayerTest):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'input_alpha_data': {'kind': 'data', 'value': [alpha if alpha is not None else 0.2]},
'input_alpha_data': {'kind': 'data',
'value': [alpha if alpha is not None else 0.2]},
'alpha': {'kind': 'op', 'type': 'Const'},
'alpha_data': {'shape': [], 'kind': 'data'},
'input_beta_data': {'kind': 'data', 'value': [beta if beta is not None else 0.5]},
@@ -165,7 +166,9 @@ class TestHardSigmoid(OnnxRuntimeLayerTest):
#
# Create reference IR net
#
constant = np.clip(constant * (alpha if alpha is not None else 0.2) + (beta if beta is not None else 0.5), 0, 1)
constant = np.clip(
constant * (alpha if alpha is not None else 0.2) + (beta if beta is not None else 0.5),
0, 1)
if precision == 'FP16':
constant = constant.astype(np.float16)
@@ -218,18 +221,20 @@ class TestHardSigmoid(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_hard_sigmoid(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_hard_sigmoid(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.nightly
def test_hard_sigmoid_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_hard_sigmoid_const_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_hard_sigmoid_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_hard_sigmoid_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -141,7 +141,6 @@ class TestIdentity(Caffe2OnnxLayerTest):
ref_net = None
if check_ir_version(10, None, ir_version):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
@@ -172,12 +171,13 @@ class TestIdentity(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_identity(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_identity(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_identity_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_identity_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -139,23 +139,27 @@ class TestImageScaler(Caffe2OnnxLayerTest):
dict(shape=[6, 8, 10, 12], scale=4.5)]
@pytest.mark.parametrize("params", test_data_precommit)
def test_image_scaler_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_image_scaler_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_image_scaler(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_image_scaler(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
def test_image_scaler_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_image_scaler_const_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_image_scaler_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_image_scaler_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -100,12 +100,14 @@ class TestInstanceNormalization(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_instance_normalization(self, params, ie_device, precision, ir_version, temp_dir):
def test_instance_normalization(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_instance_normalization(self, params, ie_device, precision, ir_version, temp_dir):
def test_instance_normalization(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -181,24 +181,27 @@ class TestLeakyRelu(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_leaky_relu_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_leaky_relu_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_leaky_relu(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_leaky_relu(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_leaky_relu_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_leaky_relu_const_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_leaky_relu_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_leaky_relu_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,16 +3,17 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
class TestLog(OnnxRuntimeLayerTest):
def _prepare_input(self, inputs_dict):
for input in inputs_dict.keys():
inputs_dict[input] = np.random.rand(*(inputs_dict[input])).astype(np.float32) * 255 + 0.5
inputs_dict[input] = np.random.rand(*(inputs_dict[input])).astype(
np.float32) * 255 + 0.5
return inputs_dict
def create_net(self, shape, ir_version):
@@ -170,24 +171,26 @@ class TestLog(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_log_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_log_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_log(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_log(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.nightly
def test_log_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_log_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_log_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_log_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph
@@ -37,7 +37,8 @@ def get_flatten_shape(src_shape, axis):
class TestLog(OnnxRuntimeLayerTest):
def _prepare_input(self, inputs_dict):
for input in inputs_dict.keys():
inputs_dict[input] = np.random.rand(*(inputs_dict[input])).astype(np.float32) * 255 + 0.5
inputs_dict[input] = np.random.rand(*(inputs_dict[input])).astype(
np.float32) * 255 + 0.5
return inputs_dict
def create_net(self, shape, logsoftmax_axis, ir_version):
@@ -95,19 +96,24 @@ class TestLog(OnnxRuntimeLayerTest):
'kind': 'data',
'value': int64_array(reshape_data_val)},
'flatten_shape': {'type': 'Const', 'kind': 'op', 'shape': 2},
'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data', 'value': None},
'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data',
'value': None},
'reshape': {'kind': 'op', 'type': 'Reshape'},
'reshape_data': {'kind': 'data', 'shape': flatten_shape, 'value': None},
'reduce_max_axis_val': {'shape': int64_array([1]).shape, 'kind': 'data', 'value': int64_array([1])},
'reduce_max_axis_val': {'shape': int64_array([1]).shape, 'kind': 'data',
'value': int64_array([1])},
'reduce_max_axis': {'type': 'Const', 'kind': 'op', 'shape': 1},
'reduce_max_axis_data': {'shape': int64_array([1]), 'kind': 'data', 'value': None},
'reduce_max_axis_data': {'shape': int64_array([1]), 'kind': 'data',
'value': None},
'reduce_max': {'type': 'ReduceMax', 'kind': 'op', 'keep_dims': True},
'reduce_max_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'sub_first': {'type': 'Subtract', 'kind': 'op'},
'sub_first_data': {'shape': flatten_shape, 'kind': 'data', 'value': None},
'reduce_sum_axis_val': {'shape': int64_array([1]).shape, 'kind': 'data', 'value': int64_array([1])},
'reduce_sum_axis_val': {'shape': int64_array([1]).shape, 'kind': 'data',
'value': int64_array([1])},
'reduce_sum_axis': {'type': 'Const', 'kind': 'op', 'shape': 1},
'reduce_sum_axis_data': {'shape': int64_array([1]), 'kind': 'data', 'value': None},
'reduce_sum_axis_data': {'shape': int64_array([1]), 'kind': 'data',
'value': None},
'reduce_sum': {'type': 'ReduceSum', 'kind': 'op', 'keep_dims': True},
'reduce_sum_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'exp': {'type': 'Exp', 'kind': 'op'},
@@ -156,19 +162,24 @@ class TestLog(OnnxRuntimeLayerTest):
'kind': 'data',
'value': int64_array(reshape_data_val)},
'flatten_shape': {'type': 'Const', 'kind': 'op', 'shape': 2},
'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data', 'value': None},
'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data',
'value': None},
'reshape': {'kind': 'op', 'type': 'Reshape'},
'reshape_data': {'kind': 'data', 'shape': flatten_shape, 'value': None},
'reduce_max_axis_val': {'shape': int64_array([1]).shape, 'kind': 'data', 'value': int64_array([1])},
'reduce_max_axis_val': {'shape': int64_array([1]).shape, 'kind': 'data',
'value': int64_array([1])},
'reduce_max_axis': {'type': 'Const', 'kind': 'op', 'shape': 1},
'reduce_max_axis_data': {'shape': int64_array([1]), 'kind': 'data', 'value': None},
'reduce_max_axis_data': {'shape': int64_array([1]), 'kind': 'data',
'value': None},
'reduce_max': {'type': 'ReduceMax', 'kind': 'op', 'keep_dims': True},
'reduce_max_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'sub_first': {'type': 'Subtract', 'kind': 'op'},
'sub_first_data': {'shape': flatten_shape, 'kind': 'data', 'value': None},
'reduce_sum_axis_val': {'shape': int64_array([1]).shape, 'kind': 'data', 'value': int64_array([1])},
'reduce_sum_axis_val': {'shape': int64_array([1]).shape, 'kind': 'data',
'value': int64_array([1])},
'reduce_sum_axis': {'type': 'Const', 'kind': 'op', 'shape': 1},
'reduce_sum_axis_data': {'shape': int64_array([1]), 'kind': 'data', 'value': None},
'reduce_sum_axis_data': {'shape': int64_array([1]), 'kind': 'data',
'value': None},
'reduce_sum': {'type': 'ReduceSum', 'kind': 'op', 'keep_dims': True},
'reduce_sum_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'exp': {'type': 'Exp', 'kind': 'op'},
@@ -177,9 +188,11 @@ class TestLog(OnnxRuntimeLayerTest):
'log_data': {'shape': reduce_sum_shape, 'kind': 'data', 'value': None},
'sub_second': {'type': 'Subtract', 'kind': 'op'},
'sub_second_data': {'shape': flatten_shape, 'kind': 'data', 'value': None},
'last_shape_val': {'shape': int64_array(shape).shape, 'kind': 'data', 'value': int64_array(shape)},
'last_shape_val': {'shape': int64_array(shape).shape, 'kind': 'data',
'value': int64_array(shape)},
'last_shape': {'type': 'Const', 'kind': 'op', 'shape': len(shape)},
'last_shape_data': {'shape': int64_array([len(shape)]), 'kind': 'data', 'value': None},
'last_shape_data': {'shape': int64_array([len(shape)]), 'kind': 'data',
'value': None},
'last_reshape': {'kind': 'op', 'type': 'Reshape'},
'last_reshape_data': {'kind': 'data', 'shape': shape, 'value': None},
'result': {'kind': 'op', 'type': 'Result'},
@@ -235,6 +248,7 @@ class TestLog(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_log(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_log(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -10,7 +10,6 @@ from common.onnx_layer_test_class import OnnxRuntimeLayerTest
class TestLoop(OnnxRuntimeLayerTest):
@staticmethod
def create_const(name, tensor_type, value):
import onnx
from onnx import helper
from onnx import TensorProto
@@ -29,12 +28,12 @@ class TestLoop(OnnxRuntimeLayerTest):
vals=value.flatten().astype(np_type)))
@staticmethod
def create_body_graph(input_nodes, output_nodes, input_names, output_names, input_shape, graph_name):
def create_body_graph(input_nodes, output_nodes, input_names, output_names, input_shape,
graph_name):
# input_nodes - list of input nodes with structure {counter, condition, <other inputs>}
# output_nodes - list of output nodes with structure {condition, <back edges>, <external outputs>}.
# In this function I assume that every <other input> have <back edge> and <external output>
# input_shape - shape of all inputs from <other inputs>
import onnx
from onnx import helper
from onnx import TensorProto
@@ -45,27 +44,29 @@ class TestLoop(OnnxRuntimeLayerTest):
other_inputs_count = len(input_nodes) - 2
one_value = np.ones(input_shape, dtype=np.float)
one = TestLoop.create_const('one_'+graph_name, TensorProto.FLOAT, one_value)
one_int = TestLoop.create_const('one_int_'+graph_name, TensorProto.INT64, np.ones([1]))
one = TestLoop.create_const('one_' + graph_name, TensorProto.FLOAT, one_value)
one_int = TestLoop.create_const('one_int_' + graph_name, TensorProto.INT64, np.ones([1]))
# add one to all inputs except counter and condition
add_one_nodes = []
for i in range(2, len(input_names)):
add_one_nodes.append(helper.make_node('Add', inputs=[input_names[i], 'one_'+graph_name],
outputs=[output_names[other_inputs_count + i - 1]]))
add_one_nodes.append(
helper.make_node('Add', inputs=[input_names[i], 'one_' + graph_name],
outputs=[output_names[other_inputs_count + i - 1]]))
# add 1 to counter
add_one_to_m_node = helper.make_node(
'Add',
inputs=[input_names[0], 'one_int_'+graph_name],
outputs=['counter_plus_1_'+graph_name]
inputs=[input_names[0], 'one_int_' + graph_name],
outputs=['counter_plus_1_' + graph_name]
)
# map inputs to outputs - back edges
identity_nodes = []
for i in range(1, len(input_nodes)):
identity_nodes.append(helper.make_node('Identity',
inputs=[input_names[i]], outputs=[output_names[i-1]]))
inputs=[input_names[i]],
outputs=[output_names[i - 1]]))
body_nodes = [one, one_int]
body_nodes.extend(add_one_nodes)
@@ -87,7 +88,6 @@ class TestLoop(OnnxRuntimeLayerTest):
Input->Loop->Output => Only accuracy check
"""
import onnx
from onnx import helper
from onnx import TensorProto
@@ -113,7 +113,8 @@ class TestLoop(OnnxRuntimeLayerTest):
M_1 = self.create_const('M_1', TensorProto.INT64, m_1_value)
cond = self.create_const('cond', TensorProto.BOOL, cond_value)
body_graph_1 = self.create_body_graph([m_1, cond_int_1, in_1_int], [cond_out_1, in_1_int_out, out_1],
body_graph_1 = self.create_body_graph([m_1, cond_int_1, in_1_int],
[cond_out_1, in_1_int_out, out_1],
['m_1', 'cond_int_1', 'in_1_int'],
['cond_out_1', 'in_1_int_out', 'OUT_1'],
input_shape, 'body_graph_1')
@@ -150,7 +151,6 @@ class TestLoop(OnnxRuntimeLayerTest):
Input->Loop(Loop)->Output => Only accuracy check
"""
import onnx
from onnx import helper
from onnx import TensorProto
@@ -204,9 +204,11 @@ class TestLoop(OnnxRuntimeLayerTest):
cond_2 = self.create_const('cond_2', TensorProto.BOOL, cond_value)
# create body for internal loop
body_graph_2 = self.create_body_graph([m_2, cond_int_2, in_2_int], [cond_out_2, in_2_int_out, out_2],
body_graph_2 = self.create_body_graph([m_2, cond_int_2, in_2_int],
[cond_out_2, in_2_int_out, out_2],
['m_2', 'cond_int_2', 'in_2_int'],
['cond_out_2', 'in_2_int_out', 'OUT_2'], input_shape, 'body_graph_2')
['cond_out_2', 'in_2_int_out', 'OUT_2'], input_shape,
'body_graph_2')
node_loop_2 = helper.make_node(
'Loop',
inputs=['M_2', 'cond_2', 'IN_2'],
@@ -234,7 +236,8 @@ class TestLoop(OnnxRuntimeLayerTest):
)
body_graph_1 = helper.make_graph(
[one, add_one_node, one_int, add_one_to_m_node, M_2, cond_2, node_loop_2, out_1_node, cond_1_node,
[one, add_one_node, one_int, add_one_to_m_node, M_2, cond_2, node_loop_2, out_1_node,
cond_1_node,
in_1_int_node],
'body_graph_1',
[m_1, cond_int_1, in_1_int],
@@ -270,15 +273,15 @@ class TestLoop(OnnxRuntimeLayerTest):
@pytest.mark.precommit
@pytest.mark.timeout(250)
def test_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir):
def test_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir, api_2):
if ie_device == 'GPU':
pytest.skip('Loop not supported on GPU')
self._test(*self.create_loop(), ie_device, precision, ir_version, temp_dir=temp_dir,
infer_timeout=150)
infer_timeout=150, api_2=api_2)
@pytest.mark.precommit
@pytest.mark.timeout(250)
def test_loop_in_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir):
def test_loop_in_loop_simple_precommit(self, ie_device, precision, ir_version, temp_dir, api_2):
pytest.skip('The model used in the test is incorrect according to ONNX standart: 70158')
self._test(*self.create_loop_in_loop(), ie_device, precision, ir_version, temp_dir=temp_dir,
infer_timeout=150)
infer_timeout=150, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -68,7 +68,8 @@ class TestLRN(OnnxRuntimeLayerTest):
'const_indata': {'value': [1], 'kind': 'data'},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'shape': [1], 'kind': 'data'},
'norm': {'kind': 'op', 'type': 'LRN', 'alpha': alpha / bias, 'beta': beta, 'bias': bias,
'norm': {'kind': 'op', 'type': 'LRN', 'alpha': alpha / bias, 'beta': beta,
'bias': bias,
'size': size}, # 'region': 'across'
'norm_data': {'shape': shape, 'kind': 'data'},
'result': {'kind': 'op', 'type': 'Result'}
@@ -110,24 +111,27 @@ class TestLRN(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_lrn_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_lrn_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
# onnxruntime only supports 4D tensors for LRN
self.skip_framework = True
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_lrn(self, params, ie_device, precision, ir_version, temp_dir):
def test_lrn(self, params, ie_device, precision, ir_version, temp_dir, api_2):
# onnxruntime only supports 4D tensors for LRN
self.skip_framework = True
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_lrn_4D(self, params, ie_device, precision, ir_version, temp_dir):
def test_lrn_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self.skip_framework = False
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -143,20 +143,25 @@ class TestLSTM(Caffe2OnnxLayerTest):
@pytest.mark.timeout(250)
@pytest.mark.parametrize('direction', ["forward", "bidirectional", "reverse"])
@pytest.mark.parametrize('cell_type', ["LSTM", "GRU", "RNN"])
def test_lstm_simple_precommit(self, direction, cell_type, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_lstm(direction, cell_type), ie_device, precision, ir_version, temp_dir=temp_dir,
infer_timeout=150)
def test_lstm_simple_precommit(self, direction, cell_type, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_lstm(direction, cell_type), ie_device, precision, ir_version,
temp_dir=temp_dir, infer_timeout=150, api_2=api_2)
# LSTM/RNN/GRU Sequence Generation
@pytest.mark.parametrize('direction', ["forward", "bidirectional", "reverse"])
@pytest.mark.parametrize('cell_type', ["LSTM", "GRU", "RNN"])
def test_lstm_sequence_generate(self, direction, cell_type, ie_device, precision, ir_version, temp_dir):
def test_lstm_sequence_generate(self, direction, cell_type, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_lstm(direction, cell_type), ie_device, precision, ir_version,
disabled_transforms='lstm_to_tensor_iterator,gru_and_rnn_to_tensor_iterator', temp_dir=temp_dir)
disabled_transforms='lstm_to_tensor_iterator,gru_and_rnn_to_tensor_iterator',
temp_dir=temp_dir, api_2=api_2)
# TODO: add more params for nightly
@pytest.mark.nightly
@pytest.mark.parametrize('direction', ["forward", "bidirectional", "reverse"])
@pytest.mark.parametrize('cell_type', ["LSTM", "GRU", "RNN"])
def test_lstm_nightly(self, direction, cell_type, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_lstm(direction, cell_type), ie_device, precision, ir_version, temp_dir=temp_dir)
def test_lstm_nightly(self, direction, cell_type, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_lstm(direction, cell_type), ie_device, precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -167,24 +167,26 @@ class TestMatMul(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_matmul(self, params, ie_device, precision, ir_version, temp_dir):
def test_matmul(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_broadcasting)
@pytest.mark.nightly
def test_matmul_bc(self, params, ie_device, precision, ir_version, temp_dir):
def test_matmul_bc(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_dual_matmul(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_dual_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_dual_matmul(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_dual_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_broadcasting)
@pytest.mark.nightly
def test_dual_matmul_bc(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_dual_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_dual_matmul_bc(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_dual_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -68,6 +68,6 @@ class TestMeanVarianceNormalization(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_mvn(self, params, ie_device, precision, ir_version, temp_dir):
def test_mvn(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -80,12 +80,14 @@ class TestNeg(Caffe2OnnxLayerTest):
@pytest.mark.parametrize('params', test_data_precommit)
@pytest.mark.precommit
def test_neg_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_neg(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_neg_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_neg(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize('params', test_data)
@pytest.mark.nightly
def test_neg(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_neg(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_neg(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_neg(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -55,7 +55,8 @@ class TestNonZero(Caffe2OnnxLayerTest):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'node': {'kind': 'op', 'type': 'NonZero', 'version': 'opset3', 'output_type': 'i64'},
'node': {'kind': 'op', 'type': 'NonZero', 'version': 'opset3',
'output_type': 'i64'},
'node_data': {'shape': [len(shape), np.prod(shape)], 'kind': 'data'},
'result': {'kind': 'op', 'type': 'Result'}
}
@@ -175,20 +176,23 @@ class TestNonZero(Caffe2OnnxLayerTest):
output_value=np.array([0, 0, 1, 1, 1, 3, 0, 2]).reshape((2, 4)),
),
dict(
input_value=np.array([1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0]).reshape((2, 3, 3)),
input_value=np.array([1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0]).reshape(
(2, 3, 3)),
output_value=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 2, 2, 2,
0, 0, 0, 1, 1, 2, 0, 2, 1, 0, 1, 2, 0, 1, 2, 0, 2, 1]).reshape((3, 12)),
0, 0, 0, 1, 1, 2, 0, 2, 1, 0, 1, 2, 0, 1, 2, 0, 2, 1]).reshape(
(3, 12)),
),
]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_non_zero(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_non_zero(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_const_data)
@pytest.mark.nightly
def test_non_zero_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_non_zero_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -173,18 +173,21 @@ class TestNot(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_not_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_not_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_not(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_not(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_not_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_not_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -254,24 +254,28 @@ class TestOr(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_or_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_or_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_or(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_or(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_or_one_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_or_one_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_or_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_or_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -5,6 +5,7 @@ import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -180,10 +181,12 @@ class TestPad(OnnxRuntimeLayerTest):
('reflect', None),
('edge', None)])
@pytest.mark.nightly
def test_pad_opset_9(self, params, mode_value, ie_device, precision, ir_version, temp_dir):
def test_pad_opset_9(self, params, mode_value, ie_device, precision, ir_version, temp_dir,
api_2):
mode, value = mode_value
self._test(*self.create_net(**params, mode=mode, value=value, ir_version=ir_version, opset=9),
ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(
*self.create_net(**params, mode=mode, value=value, ir_version=ir_version, opset=9),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.parametrize("mode_value", [(None, None),
@@ -193,10 +196,11 @@ class TestPad(OnnxRuntimeLayerTest):
('reflect', None),
('edge', None)])
@pytest.mark.precommit
def test_pad_opset_latest_precommit(self, params, mode_value, ie_device, precision, ir_version, temp_dir):
def test_pad_opset_latest_precommit(self, params, mode_value, ie_device, precision, ir_version,
temp_dir, api_2):
mode, value = mode_value
self._test(*self.create_net(**params, mode=mode, value=value, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("mode_value", [(None, None),
@@ -206,7 +210,8 @@ class TestPad(OnnxRuntimeLayerTest):
('reflect', None),
('edge', None)])
@pytest.mark.nightly
def test_pad_opset_latest(self, params, mode_value, ie_device, precision, ir_version, temp_dir):
def test_pad_opset_latest(self, params, mode_value, ie_device, precision, ir_version, temp_dir,
api_2):
mode, value = mode_value
self._test(*self.create_net(**params, mode=mode, value=value, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -14,7 +14,8 @@ def float_array(x):
class TestPooling(OnnxRuntimeLayerTest):
def create_net(self, shape, kernel_shape, pads, strides, op, ir_version, count_include_pad=None, auto_pad=None,
def create_net(self, shape, kernel_shape, pads, strides, op, ir_version, count_include_pad=None,
auto_pad=None,
storage_order=None, ceil=False, opset=None):
"""
ONNX net IR net
@@ -63,7 +64,8 @@ class TestPooling(OnnxRuntimeLayerTest):
else:
rounding = np.ceil if ceil else np.floor
out_spacial_shape = rounding(
(float_array(shape[2:]) + np.add(_pads[:, 0], _pads[:, 1]) - float_array(kernel_shape)) / strides + 1)
(float_array(shape[2:]) + np.add(_pads[:, 0], _pads[:, 1]) - float_array(
kernel_shape)) / strides + 1)
out_shape = np.array(shape)
out_shape[2:] = out_spacial_shape
@@ -240,7 +242,8 @@ class TestPooling(OnnxRuntimeLayerTest):
test_data_precommit = [
dict(shape=[2, 3, 10], kernel_shape=[2], pads=None, strides=[3]),
dict(shape=[2, 3, 30, 30], kernel_shape=[5, 5], pads=None, strides=[3, 2]),
dict(shape=[2, 3, 28, 28, 28], kernel_shape=[5, 5, 5], pads=[2, 4, 2, 0, 0, 2], strides=None),
dict(shape=[2, 3, 28, 28, 28], kernel_shape=[5, 5, 5], pads=[2, 4, 2, 0, 0, 2],
strides=None),
dict(shape=[2, 3, 30, 30, 30], kernel_shape=[5, 5, 5], pads=None, strides=[3, 3, 5])]
test_data = [
@@ -260,80 +263,124 @@ class TestPooling(OnnxRuntimeLayerTest):
dict(shape=[2, 3, 30, 30, 30], kernel_shape=[4, 2, 2], pads=None, strides=None),
dict(shape=[2, 3, 30, 30, 30], kernel_shape=[2, 4, 2], pads=None, strides=None),
dict(shape=[2, 3, 30, 30, 30], kernel_shape=[2, 2, 4], pads=None, strides=None),
dict(shape=[2, 3, 28, 28, 28], kernel_shape=[3, 3, 3], pads=[2, 2, 2, 2, 2, 2], strides=None),
dict(shape=[2, 3, 28, 28, 28], kernel_shape=[5, 5, 5], pads=[2, 4, 2, 0, 0, 2], strides=None),
dict(shape=[2, 3, 28, 28, 28], kernel_shape=[3, 3, 3], pads=[2, 2, 2, 2, 2, 2],
strides=None),
dict(shape=[2, 3, 28, 28, 28], kernel_shape=[5, 5, 5], pads=[2, 4, 2, 0, 0, 2],
strides=None),
dict(shape=[2, 3, 30, 30, 30], kernel_shape=[5, 5, 5], pads=None, strides=[3, 3, 3]),
dict(shape=[2, 3, 30, 30, 30], kernel_shape=[5, 5, 5], pads=None, strides=[5, 3, 3]),
dict(shape=[2, 3, 30, 30, 30], kernel_shape=[5, 5, 5], pads=None, strides=[3, 5, 3]),
dict(shape=[2, 3, 30, 30, 30], kernel_shape=[5, 5, 5], pads=None, strides=[3, 3, 5])]
test_data_autopad_precommit = [
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[2, 2, 4], pads=None, strides=None),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None, strides=[3, 2, 3]),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None, strides=[3, 3, 2])]
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[2, 2, 4], pads=None,
strides=None),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None,
strides=[3, 2, 3]),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None,
strides=[3, 3, 2])]
test_data_autopad = [
dict(shape=[2, 3, 10], auto_pad='SAME_UPPER', kernel_shape=[2], pads=[0, 1], strides=[3]),
dict(shape=[2, 3, 10], auto_pad='SAME_LOWER', kernel_shape=[2], pads=[0, 1], strides=[3]),
dict(shape=[2, 3, 10], auto_pad='VALID', kernel_shape=[2], pads=None, strides=[3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 2], pads=[0, 0, 1, 1], strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[4, 2], pads=[1, 0, 2, 1], strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 4], pads=[0, 1, 1, 2], strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5], pads=[1, 1, 1, 1], strides=[3, 3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5], pads=[1, 1, 2, 1], strides=[2, 3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5], pads=[1, 1, 1, 2], strides=[3, 2]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 2], pads=[0, 0, 1, 1], strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[4, 2], pads=[1, 0, 2, 1], strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 4], pads=[0, 1, 1, 2], strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5], pads=[1, 1, 1, 1], strides=[3, 3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5], pads=[1, 1, 2, 1], strides=[2, 3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5], pads=[1, 1, 1, 2], strides=[3, 2]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 2], pads=[0, 0, 1, 1],
strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[4, 2], pads=[1, 0, 2, 1],
strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 4], pads=[0, 1, 1, 2],
strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5], pads=[1, 1, 1, 1],
strides=[3, 3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5], pads=[1, 1, 2, 1],
strides=[2, 3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5], pads=[1, 1, 1, 2],
strides=[3, 2]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 2], pads=[0, 0, 1, 1],
strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[4, 2], pads=[1, 0, 2, 1],
strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 4], pads=[0, 1, 1, 2],
strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5], pads=[1, 1, 1, 1],
strides=[3, 3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5], pads=[1, 1, 2, 1],
strides=[2, 3]),
dict(shape=[2, 3, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5], pads=[1, 1, 1, 2],
strides=[3, 2]),
dict(shape=[2, 3, 30, 30], auto_pad='VALID', kernel_shape=[2, 2], pads=None, strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='VALID', kernel_shape=[4, 2], pads=None, strides=None),
dict(shape=[2, 3, 30, 30], auto_pad='VALID', kernel_shape=[2, 4], pads=None, strides=None),
dict(shape=[2, 3, 21, 21], auto_pad='VALID', kernel_shape=[3, 3], pads=None, strides=[3, 3]),
dict(shape=[2, 3, 21, 21], auto_pad='VALID', kernel_shape=[3, 3], pads=None, strides=[2, 3]),
dict(shape=[2, 3, 21, 21], auto_pad='VALID', kernel_shape=[3, 3], pads=None, strides=[3, 2]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 2, 2], pads=[0, 0, 0, 1, 1, 1],
dict(shape=[2, 3, 21, 21], auto_pad='VALID', kernel_shape=[3, 3], pads=None,
strides=[3, 3]),
dict(shape=[2, 3, 21, 21], auto_pad='VALID', kernel_shape=[3, 3], pads=None,
strides=[2, 3]),
dict(shape=[2, 3, 21, 21], auto_pad='VALID', kernel_shape=[3, 3], pads=None,
strides=[3, 2]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 2, 2],
pads=[0, 0, 0, 1, 1, 1],
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[4, 2, 2], pads=[1, 0, 0, 2, 1, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[4, 2, 2],
pads=[1, 0, 0, 2, 1, 1],
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 4, 2], pads=[0, 1, 0, 1, 2, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 4, 2],
pads=[0, 1, 0, 1, 2, 1],
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 2, 4], pads=[0, 0, 1, 1, 1, 2],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[2, 2, 4],
pads=[0, 0, 1, 1, 1, 2],
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5, 5], pads=[1, 1, 1, 1, 1, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5, 5],
pads=[1, 1, 1, 1, 1, 1],
strides=[3, 3, 3]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5, 5], pads=[0, 1, 1, 0, 1, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5, 5],
pads=[0, 1, 1, 0, 1, 1],
strides=[5, 3, 3]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5, 5], pads=[1, 0, 1, 1, 0, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5, 5],
pads=[1, 0, 1, 1, 0, 1],
strides=[3, 5, 3]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5, 5], pads=[1, 1, 0, 1, 1, 0],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_UPPER', kernel_shape=[5, 5, 5],
pads=[1, 1, 0, 1, 1, 0],
strides=[3, 3, 5]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 2, 2], pads=[0, 0, 0, 1, 1, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 2, 2],
pads=[0, 0, 0, 1, 1, 1],
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[4, 2, 2], pads=[1, 0, 0, 2, 1, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[4, 2, 2],
pads=[1, 0, 0, 2, 1, 1],
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 4, 2], pads=[0, 1, 0, 1, 2, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 4, 2],
pads=[0, 1, 0, 1, 2, 1],
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 2, 4], pads=[0, 0, 1, 1, 1, 2],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[2, 2, 4],
pads=[0, 0, 1, 1, 1, 2],
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5, 5], pads=[1, 1, 1, 1, 1, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5, 5],
pads=[1, 1, 1, 1, 1, 1],
strides=[3, 3, 3]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5, 5], pads=[0, 1, 1, 0, 1, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5, 5],
pads=[0, 1, 1, 0, 1, 1],
strides=[5, 3, 3]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5, 5], pads=[1, 0, 1, 1, 0, 1],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5, 5],
pads=[1, 0, 1, 1, 0, 1],
strides=[3, 5, 3]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5, 5], pads=[1, 1, 0, 1, 1, 0],
dict(shape=[2, 3, 30, 30, 30], auto_pad='SAME_LOWER', kernel_shape=[5, 5, 5],
pads=[1, 1, 0, 1, 1, 0],
strides=[3, 3, 5]),
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[2, 2, 2], pads=None, strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[4, 2, 2], pads=None, strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[2, 4, 2], pads=None, strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[2, 2, 4], pads=None, strides=None),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None, strides=[3, 3, 3]),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None, strides=[2, 3, 3]),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None, strides=[3, 2, 3]),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None, strides=[3, 3, 2])]
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[2, 2, 2], pads=None,
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[4, 2, 2], pads=None,
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[2, 4, 2], pads=None,
strides=None),
dict(shape=[2, 3, 30, 30, 30], auto_pad='VALID', kernel_shape=[2, 2, 4], pads=None,
strides=None),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None,
strides=[3, 3, 3]),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None,
strides=[2, 3, 3]),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None,
strides=[3, 2, 3]),
dict(shape=[2, 3, 21, 21, 21], auto_pad='VALID', kernel_shape=[3, 3, 3], pads=None,
strides=[3, 3, 2])]
global_test_data = [dict(shape=[2, 3, 10]),
dict(shape=[2, 3, 32, 32]),
@@ -342,95 +389,111 @@ class TestPooling(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("incl_pad", [None, 1])
@pytest.mark.nightly
def test_avgpool_opset7(self, params, incl_pad, ie_device, precision, ir_version, temp_dir):
def test_avgpool_opset7(self, params, incl_pad, ie_device, precision, ir_version, temp_dir,
api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(
*self.create_net(**params, op='AveragePool', count_include_pad=incl_pad, ir_version=ir_version, opset=7),
ie_device, precision, ir_version, temp_dir=temp_dir)
*self.create_net(**params, op='AveragePool', count_include_pad=incl_pad,
ir_version=ir_version, opset=7),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_autopad)
@pytest.mark.nightly
def test_avgpool_opset7_autopad(self, params, ie_device, precision, ir_version, temp_dir):
def test_avgpool_opset7_autopad(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_net(**params, op='AveragePool', ir_version=ir_version, opset=7),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("incl_pad", [None, 1])
@pytest.mark.parametrize("ceil", [True, False])
@pytest.mark.nightly
def test_avgpool_opset10(self, params, incl_pad, ceil, ie_device, precision, ir_version, temp_dir):
def test_avgpool_opset10(self, params, incl_pad, ceil, ie_device, precision, ir_version,
temp_dir, api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(
*self.create_net(**params, op='AveragePool', count_include_pad=incl_pad, ceil=ceil, ir_version=ir_version,
opset=10), ie_device, precision, ir_version, temp_dir=temp_dir)
*self.create_net(**params, op='AveragePool', count_include_pad=incl_pad, ceil=ceil,
ir_version=ir_version,
opset=10), ie_device, precision, ir_version, temp_dir=temp_dir,
api_2=api_2)
@pytest.mark.parametrize("params", test_data_autopad)
@pytest.mark.nightly
def test_avgpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir):
def test_avgpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_net(**params, op='AveragePool', ir_version=ir_version, opset=10),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("st_order", [None, 1])
@pytest.mark.nightly
def test_maxpool_opset8(self, params, st_order, ie_device, precision, ir_version, temp_dir):
def test_maxpool_opset8(self, params, st_order, ie_device, precision, ir_version, temp_dir,
api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_net(**params, op='MaxPool', storage_order=st_order, ir_version=ir_version, opset=8),
ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(
*self.create_net(**params, op='MaxPool', storage_order=st_order, ir_version=ir_version,
opset=8),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_autopad)
@pytest.mark.nightly
def test_maxpool_opset8_autopad(self, params, ie_device, precision, ir_version, temp_dir):
def test_maxpool_opset8_autopad(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_net(**params, op='MaxPool', ir_version=ir_version, opset=8),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("st_order", [None, 1])
@pytest.mark.parametrize("ceil", [True, False])
@pytest.mark.nightly
def test_maxpool_opset10(self, params, st_order, ceil, ie_device, precision, ir_version, temp_dir):
def test_maxpool_opset10(self, params, st_order, ceil, ie_device, precision, ir_version,
temp_dir, api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_net(**params, op='MaxPool', storage_order=st_order, ceil=ceil, ir_version=ir_version,
opset=10), ie_device, precision, ir_version, temp_dir=temp_dir)
self._test(*self.create_net(**params, op='MaxPool', storage_order=st_order, ceil=ceil,
ir_version=ir_version,
opset=10), ie_device, precision, ir_version, temp_dir=temp_dir,
api_2=api_2)
@pytest.mark.parametrize("params", test_data_autopad_precommit)
@pytest.mark.precommit
def test_maxpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir):
def test_maxpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_net(**params, op='MaxPool', ir_version=ir_version, opset=10),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_autopad)
@pytest.mark.nightly
def test_maxpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir):
def test_maxpool_opset10_autopad(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_net(**params, op='MaxPool', ir_version=ir_version, opset=10),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", global_test_data)
@pytest.mark.nightly
def test_global_avgpool(self, params, ie_device, precision, ir_version, temp_dir):
def test_global_avgpool(self, params, ie_device, precision, ir_version, temp_dir, api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_global_net(**params, op='GlobalAveragePool', ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", global_test_data)
@pytest.mark.nightly
def test_global_maxpool(self, params, ie_device, precision, ir_version, temp_dir):
def test_global_maxpool(self, params, ie_device, precision, ir_version, temp_dir, api_2):
if not len(params['shape']) in [4, 5]:
pytest.skip("Pooling layer support only 4D and 5D input tensors")
self._test(*self.create_global_net(**params, op='GlobalMaxPool', ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -68,7 +68,6 @@ class TestPRelu(Caffe2OnnxLayerTest):
#
ref_net = None
if check_ir_version(10, None, ir_version):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
@@ -114,48 +113,52 @@ class TestPRelu(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_scalar)
@pytest.mark.nightly
def test_prelu_opset6_scalar(self, params, ie_device, precision, ir_version, temp_dir):
def test_prelu_opset6_scalar(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_shared_channels)
@pytest.mark.nightly
def test_prelu_opset6_shared_channels(self, params, ie_device, precision, ir_version, temp_dir):
def test_prelu_opset6_shared_channels(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net(**params, precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_scalar)
@pytest.mark.nightly
def test_prelu_opset7_scalar(self, params, ie_device, precision, ir_version, temp_dir):
def test_prelu_opset7_scalar(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, opset=7, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_shared_channels)
@pytest.mark.nightly
def test_prelu_opset7_shared_channels(self, params, ie_device, precision, ir_version, temp_dir):
def test_prelu_opset7_shared_channels(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net(**params, precision=precision, opset=7, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_prelu_shared_channels_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_prelu_shared_channels_precommit(self, params, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_scalar_precommit)
@pytest.mark.precommit
def test_prelu_scalar_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_prelu_scalar_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_scalar)
@pytest.mark.nightly
def test_prelu_scalar(self, params, ie_device, precision, ir_version, temp_dir):
def test_prelu_scalar(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_shared_channels)
@pytest.mark.nightly
def test_prelu_shared_channels(self, params, ie_device, precision, ir_version, temp_dir):
def test_prelu_shared_channels(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -148,24 +148,27 @@ class TestReciprocal(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_reciprocal_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reciprocal_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_reciprocal(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reciprocal(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_reciprocal_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_reciprocal_const_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_reciprocal_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_reciprocal_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -124,56 +124,69 @@ class TestReduce(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.precommit
def test_reduce_max_precommit(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce(**params, op='ReduceMax', keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_max_precommit(self, params, keep_dims, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_reduce(**params, op='ReduceMax', keep_dims=keep_dims,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.nightly
def test_reduce_max(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce(**params, op='ReduceMax', keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_max(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reduce(**params, op='ReduceMax', keep_dims=keep_dims,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.nightly
def test_reduce_sum(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce(**params, op='ReduceSum', keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_sum(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reduce(**params, op='ReduceSum', keep_dims=keep_dims,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.nightly
def test_reduce_prod(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce(**params, op='ReduceProd', keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_prod(self, params, keep_dims, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_reduce(**params, op='ReduceProd', keep_dims=keep_dims,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.precommit
def test_reduce_mean_precommit(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce(**params, op='ReduceMean', keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_mean_precommit(self, params, keep_dims, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_reduce(**params, op='ReduceMean', keep_dims=keep_dims,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.nightly
@pytest.mark.precommit
def test_reduce_mean(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce(**params, op='ReduceMean', keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_mean(self, params, keep_dims, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_reduce(**params, op='ReduceMean', keep_dims=keep_dims,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.precommit
def test_reduce_min_precommit(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce(**params, op='ReduceMin', keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_min_precommit(self, params, keep_dims, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_reduce(**params, op='ReduceMin', keep_dims=keep_dims,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.nightly
def test_reduce_min(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce(**params, op='ReduceMin', keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_min(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reduce(**params, op='ReduceMin', keep_dims=keep_dims,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -5,6 +5,7 @@ import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -168,7 +169,9 @@ class TestReduceL1L2(OnnxRuntimeLayerTest):
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
constant = np.power(np.sum(a=np.abs(np.power(constant, reduce_p)), axis=tuple(_axes), keepdims=keep_dims), 1 / reduce_p)
constant = np.power(
np.sum(a=np.abs(np.power(constant, reduce_p)), axis=tuple(_axes), keepdims=keep_dims),
1 / reduce_p)
ref_net = None
if check_ir_version(10, None, ir_version):
nodes_attributes = {
@@ -217,31 +220,39 @@ class TestReduceL1L2(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.parametrize("reduce_p", [1, 2])
@pytest.mark.precommit
def test_reduce_lp_precommit(self, params, keep_dims, reduce_p, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce_lp(**params, keep_dims=keep_dims, reduce_p=reduce_p, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_lp_precommit(self, params, keep_dims, reduce_p, ie_device, precision,
ir_version, temp_dir, api_2):
self._test(*self.create_reduce_lp(**params, keep_dims=keep_dims, reduce_p=reduce_p,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.parametrize("reduce_p", [1, 2])
@pytest.mark.nightly
def test_reduce_lp(self, params, keep_dims, reduce_p, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce_lp(**params, keep_dims=keep_dims, reduce_p=reduce_p, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_lp(self, params, keep_dims, reduce_p, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_reduce_lp(**params, keep_dims=keep_dims, reduce_p=reduce_p,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.parametrize("reduce_p", [1, 2])
@pytest.mark.precommit
def test_reduce_lp_const_precommit(self, params, keep_dims, reduce_p, ie_device, precision, ir_version, temp_dir):
def test_reduce_lp_const_precommit(self, params, keep_dims, reduce_p, ie_device, precision,
ir_version, temp_dir, api_2):
self._test(
*self.create_reduce_lp_const(**params, keep_dims=keep_dims, reduce_p=reduce_p, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
*self.create_reduce_lp_const(**params, keep_dims=keep_dims, reduce_p=reduce_p,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.parametrize("reduce_p", [1, 2])
@pytest.mark.nightly
def test_reduce_lp_const(self, params, keep_dims, reduce_p, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reduce_lp_const(**params, keep_dims=keep_dims, reduce_p=reduce_p,ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_reduce_lp_const(self, params, keep_dims, reduce_p, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_reduce_lp_const(**params, keep_dims=keep_dims, reduce_p=reduce_p,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -172,12 +172,13 @@ class TestRelu(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_relu(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_relu(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_relu_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_relu_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -69,9 +69,11 @@ class TestReshape(Caffe2OnnxLayerTest):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': input_shape, 'kind': 'data'},
'input_data_1': {'shape': [len(output_shape)], 'value': output_shape, 'kind': 'data'},
'input_data_1': {'shape': [len(output_shape)], 'value': output_shape,
'kind': 'data'},
'const_1': {'kind': 'op', 'type': 'Const'},
'const_data_1': {'shape': [len(output_shape)], 'value': None, 'kind': 'data'}, # 'value': output_shape,
'const_data_1': {'shape': [len(output_shape)], 'value': None, 'kind': 'data'},
# 'value': output_shape,
'reshape': {'kind': 'op', 'type': 'Reshape'},
'reshape_data': {'shape': output_shape, 'kind': 'data'},
'result': {'kind': 'op', 'type': 'Result'}
@@ -229,55 +231,64 @@ class TestReshape(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_5D_precommit)
@pytest.mark.precommit
def test_reshape_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D_precommit)
@pytest.mark.precommit
def test_reshape_4D_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_4D_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D_precommit)
@pytest.mark.precommit
def test_reshape_3D_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_3D_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_reshape_5D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_reshape_4D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_reshape_3D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_reshape_const_5D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_const_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_reshape_const_4D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_const_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_reshape_const_3D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_reshape_const_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reshape_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,11 +3,12 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np, np_data_type_to_destination_type
from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np, \
np_data_type_to_destination_type
from unit_tests.utils.graph import build_graph
@@ -35,7 +36,8 @@ class TestResize(OnnxRuntimeLayerTest):
onnx_scales = scales
if scales is None:
onnx_scales = np.array(output_shape).astype(np.float) / np.array(input_shape).astype(np.float)
onnx_scales = np.array(output_shape).astype(np.float) / np.array(input_shape).astype(
np.float)
scales_node = onnx.helper.make_node(
'Constant',
inputs=[],
@@ -143,10 +145,12 @@ class TestResize(OnnxRuntimeLayerTest):
}
if shape_calculation_mode == 'scales':
ref_net = create_ref_net_in_scales_mode(precision, input_shape_as_array, output_shape,
ref_net = create_ref_net_in_scales_mode(precision, input_shape_as_array,
output_shape,
sizes_value, scales_value, interp_attrs)
else:
ref_net = create_ref_net_in_sizes_mode(precision, input_shape_as_array, output_shape,
ref_net = create_ref_net_in_sizes_mode(precision, input_shape_as_array,
output_shape,
sizes_value, scales_value, interp_attrs)
return onnx_net, ref_net
@@ -199,9 +203,10 @@ class TestResize(OnnxRuntimeLayerTest):
]
@pytest.mark.parametrize("params", test_data)
def test_resize(self, params, ie_device, precision, ir_version, temp_dir):
def test_resize(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_resize_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, custom_eps=2.0e-4, temp_dir=temp_dir)
ie_device, precision, ir_version, custom_eps=2.0e-4, temp_dir=temp_dir,
api_2=api_2)
test_data_cubic = [
dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150],
@@ -229,13 +234,16 @@ class TestResize(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("cubic_coeff_a", [-0.75])
@pytest.mark.parametrize("mode", ['cubic'])
@pytest.mark.parametrize("nearest_mode", ['round_prefer_floor'])
def test_resize_combined_cubic(self, params, coordinate_transformation_mode, cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir):
def test_resize_combined_cubic(self, params, coordinate_transformation_mode, cubic_coeff_a,
mode,
nearest_mode, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_resize_net(**params,
coordinate_transformation_mode=coordinate_transformation_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode,
nearest_mode=nearest_mode,
precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, custom_eps=2.6e-2, temp_dir=temp_dir)
ie_device, precision, ir_version, custom_eps=2.6e-2, temp_dir=temp_dir,
api_2=api_2)
test_data_nearest = [
dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150],
@@ -256,13 +264,16 @@ class TestResize(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("mode", ['nearest'])
@pytest.mark.parametrize("nearest_mode", ['round_prefer_floor', 'round_prefer_ceil',
'floor', 'ceil'])
def test_resize_combined_nearest(self, params, coordinate_transformation_mode, cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir):
def test_resize_combined_nearest(self, params, coordinate_transformation_mode, cubic_coeff_a,
mode,
nearest_mode, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_resize_net(**params,
coordinate_transformation_mode=coordinate_transformation_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode,
nearest_mode=nearest_mode,
precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
test_data_linear = [
dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150],
@@ -290,13 +301,17 @@ class TestResize(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("cubic_coeff_a", [-0.75])
@pytest.mark.parametrize("mode", ['linear'])
@pytest.mark.parametrize("nearest_mode", ['round_prefer_floor'])
def test_resize_combined_linear(self, params, coordinate_transformation_mode, cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir):
def test_resize_combined_linear(self, params, coordinate_transformation_mode, cubic_coeff_a,
mode,
nearest_mode, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_resize_net(**params,
coordinate_transformation_mode=coordinate_transformation_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode,
nearest_mode=nearest_mode,
precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, custom_eps=2.0e-2, temp_dir=temp_dir)
ie_device, precision, ir_version, custom_eps=2.0e-2, temp_dir=temp_dir,
api_2=api_2)
test_data_sizes = [
dict(input_shape=[1, 1, 4, 4], output_shape=[1, 1, 3, 3],
@@ -342,9 +357,9 @@ class TestResize(OnnxRuntimeLayerTest):
]
@pytest.mark.parametrize("params", test_data_sizes)
def test_resize_sizes(self, params, ie_device, precision, ir_version, temp_dir):
def test_resize_sizes(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_resize_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
test_data_sizes_cubic = [
dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150],
@@ -372,13 +387,17 @@ class TestResize(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("cubic_coeff_a", [-0.75])
@pytest.mark.parametrize("mode", ['cubic'])
@pytest.mark.parametrize("nearest_mode", ['round_prefer_floor'])
def test_resize_combined_sizes_cubic(self, params, coordinate_transformation_mode, cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir):
def test_resize_combined_sizes_cubic(self, params, coordinate_transformation_mode,
cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_resize_net(**params,
coordinate_transformation_mode=coordinate_transformation_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode,
nearest_mode=nearest_mode,
precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, custom_eps=2.6e-2, temp_dir=temp_dir)
ie_device, precision, ir_version, custom_eps=2.6e-2, temp_dir=temp_dir,
api_2=api_2)
test_data_sizes_nearest = [
dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150],
@@ -423,13 +442,16 @@ class TestResize(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("mode", ['nearest'])
@pytest.mark.parametrize("nearest_mode", ['round_prefer_floor', 'round_prefer_ceil',
'floor', 'ceil'])
def test_resize_combined_sizes_nearest(self, params, coordinate_transformation_mode, cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir):
def test_resize_combined_sizes_nearest(self, params, coordinate_transformation_mode,
cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_resize_net(**params,
coordinate_transformation_mode=coordinate_transformation_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode,
nearest_mode=nearest_mode,
precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
test_data_sizes_linear = [
dict(input_shape=[1, 3, 100, 200], output_shape=[1, 3, 350, 150],
@@ -457,16 +479,21 @@ class TestResize(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("cubic_coeff_a", [-0.75])
@pytest.mark.parametrize("mode", ['linear'])
@pytest.mark.parametrize("nearest_mode", ['round_prefer_floor'])
def test_resize_combined_sizes_linear(self, params, coordinate_transformation_mode, cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir):
def test_resize_combined_sizes_linear(self, params, coordinate_transformation_mode,
cubic_coeff_a, mode,
nearest_mode, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_resize_net(**params,
coordinate_transformation_mode=coordinate_transformation_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode, nearest_mode=nearest_mode,
cubic_coeff_a=cubic_coeff_a, mode=mode,
nearest_mode=nearest_mode,
precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, custom_eps=2.0e-2, temp_dir=temp_dir)
ie_device, precision, ir_version, custom_eps=2.0e-2, temp_dir=temp_dir,
api_2=api_2)
def create_ref_net_in_sizes_mode(precision, input_shape, output_shape, sizes_value, scales_value, attrs):
def create_ref_net_in_sizes_mode(precision, input_shape, output_shape, sizes_value, scales_value,
attrs):
input_data_type = np_data_type_to_destination_type(data_type_str_to_np(precision))
input_rank = len(input_shape)
epsilon = np.array([1.0e-5])
@@ -560,7 +587,8 @@ def create_ref_net_in_sizes_mode(precision, input_shape, output_shape, sizes_val
return build_graph(nodes_attrs, edges)
def create_ref_net_in_scales_mode(precision, input_shape, output_shape, sizes_value, scales_value, attrs):
def create_ref_net_in_scales_mode(precision, input_shape, output_shape, sizes_value, scales_value,
attrs):
input_data_type = np_data_type_to_destination_type(data_type_str_to_np(precision))
input_rank = len(input_shape)
epsilon = np.array([1.0e-5])

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -68,8 +68,10 @@ class TestROIAlign(OnnxRuntimeLayerTest):
'3_indices': {'kind': 'op', 'type': 'Parameter'},
'indices_data': {'shape': indices_shape, 'kind': 'data'},
'node': {'kind': 'op', 'type': 'ROIAlign', 'pooled_h': pooled_h, 'pooled_w': pooled_w,
'mode': mode, 'sampling_ratio': sampling_ratio, 'spatial_scale': spatial_scale},
'node': {'kind': 'op', 'type': 'ROIAlign', 'pooled_h': pooled_h,
'pooled_w': pooled_w,
'mode': mode, 'sampling_ratio': sampling_ratio,
'spatial_scale': spatial_scale},
'node_data': {'shape': output_shape, 'kind': 'data'},
'result': {'kind': 'op', 'type': 'Result'}
@@ -104,6 +106,7 @@ class TestROIAlign(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_roi_align(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_roi_align(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -133,12 +133,13 @@ class TestScale(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_scale(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_scale(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_scale_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_scale_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -5,6 +5,7 @@ import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -70,7 +71,8 @@ class TestScatters(Caffe2OnnxLayerTest):
'3_updates': {'kind': 'op', 'type': 'Parameter'},
'updates_data': {'shape': updates_shape, 'kind': 'data'},
'const_indata': {'kind': 'data', 'value': np.int64(axis) if axis is not None else np.int64(0)},
'const_indata': {'kind': 'data',
'value': np.int64(axis) if axis is not None else np.int64(0)},
'const': {'kind': 'op', 'type': 'Const'},
'const_data': {'kind': 'data'},
@@ -101,7 +103,8 @@ class TestScatters(Caffe2OnnxLayerTest):
test_data = [
dict(input_shape=[1, 5], indices_shape=[1, 2], updates_shape=[1, 2],
axis=1, output_shape=[1, 5]),
dict(input_shape=[1, 256, 200, 272], indices_shape=[1, 256, 200, 272], updates_shape=[1, 256, 200, 272],
dict(input_shape=[1, 256, 200, 272], indices_shape=[1, 256, 200, 272],
updates_shape=[1, 256, 200, 272],
axis=None, output_shape=[1, 256, 200, 272])]
@@ -110,9 +113,10 @@ class TestScatter(TestScatters):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_scatter(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_scatter(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
class TestScatterElements(TestScatters):
@@ -120,6 +124,7 @@ class TestScatterElements(TestScatters):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_scatter_elements(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_scatter_elements(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -179,18 +179,20 @@ class TestSigmoid(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_sigmoid_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sigmoid_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sigmoid(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sigmoid(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sigmoid_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_sigmoid_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -4,6 +4,7 @@
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -165,12 +166,13 @@ class TestSign(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sign(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sign(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sign_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_sign_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -325,7 +325,8 @@ class TestSlice(OnnxRuntimeLayerTest):
dict(shape=[4, 6, 8, 10, 12], axes=[2, 3, 4], starts=[1, 1, 1], ends=[7, 9, 11]),
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3], starts=[1, 1, 1, 1], ends=[3, 5, 7, 9]),
dict(shape=[4, 6, 8, 10, 12], axes=[1, 2, 3, 4], starts=[1, 1, 1, 1], ends=[5, 7, 9, 11]),
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3, 4], starts=[1, 1, 1, 1, 1], ends=[3, 5, 7, 9, 11]),
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3, 4], starts=[1, 1, 1, 1, 1],
ends=[3, 5, 7, 9, 11]),
]
test_data_with_steps = [
@@ -335,65 +336,77 @@ class TestSlice(OnnxRuntimeLayerTest):
dict(shape=[10, 12], axes=[1], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[10, 12], axes=[0, 1], starts=[9, 11], ends=[1, 1], steps=[-2, -2]),
dict(shape=[8, 10, 12], axes=[0, 1, 2], starts=[1, 1, 1], ends=[7, 9, 11], steps=[2, 2, 2]),
dict(shape=[8, 10, 12], axes=[0, 1, 2], starts=[7, 9, 11], ends=[1, 1, 1], steps=[-1, -1, -1]),
dict(shape=[8, 10, 12], axes=[0, 1, 2], starts=[7, 9, 11], ends=[1, 1, 1],
steps=[-1, -1, -1]),
dict(shape=[8, 10, 12], axes=[0], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[8, 10, 12], axes=[1], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[8, 10, 12], axes=[2], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[8, 10, 12], axes=[0, 1, 2], starts=[7, 9, 11], ends=[1, 1, 1], steps=[-2, -2, -2]),
dict(shape=[6, 8, 10, 12], axes=[0, 1, 2, 3], starts=[1, 1, 1, 1], ends=[5, 7, 9, 11], steps=[2, 2, 2, 2]),
dict(shape=[6, 8, 10, 12], axes=[0, 1, 2, 3], starts=[5, 7, 9, 11], ends=[1, 1, 1, 1], steps=[-1, -1, -1, -1]),
dict(shape=[8, 10, 12], axes=[0, 1, 2], starts=[7, 9, 11], ends=[1, 1, 1],
steps=[-2, -2, -2]),
dict(shape=[6, 8, 10, 12], axes=[0, 1, 2, 3], starts=[1, 1, 1, 1], ends=[5, 7, 9, 11],
steps=[2, 2, 2, 2]),
dict(shape=[6, 8, 10, 12], axes=[0, 1, 2, 3], starts=[5, 7, 9, 11], ends=[1, 1, 1, 1],
steps=[-1, -1, -1, -1]),
dict(shape=[6, 8, 10, 12], axes=[0], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[6, 8, 10, 12], axes=[1], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[6, 8, 10, 12], axes=[2], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[6, 8, 10, 12], axes=[3], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[6, 8, 10, 12], axes=[0, 1, 2, 3], starts=[5, 7, 9, 11], ends=[1, 1, 1, 1], steps=[-2, -2, -2, -2]),
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3, 4], starts=[1, 1, 1, 1, 1], ends=[3, 5, 7, 9, 11],
dict(shape=[6, 8, 10, 12], axes=[0, 1, 2, 3], starts=[5, 7, 9, 11], ends=[1, 1, 1, 1],
steps=[-2, -2, -2, -2]),
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3, 4], starts=[1, 1, 1, 1, 1],
ends=[3, 5, 7, 9, 11],
steps=[2, 2, 2, 2, 2]),
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3, 4], starts=[3, 5, 7, 9, 11], ends=[1, 1, 1, 1, 1],
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3, 4], starts=[3, 5, 7, 9, 11],
ends=[1, 1, 1, 1, 1],
steps=[-1, -1, -1, -1, -1]),
dict(shape=[4, 6, 8, 10, 12], axes=[0], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[4, 6, 8, 10, 12], axes=[1], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[4, 6, 8, 10, 12], axes=[2], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[4, 6, 8, 10, 12], axes=[3], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[4, 6, 8, 10, 12], axes=[4], starts=[-1], ends=[-9999], steps=[-1]),
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3, 4], starts=[3, 5, 7, 9, 11], ends=[1, 1, 1, 1, 1],
dict(shape=[4, 6, 8, 10, 12], axes=[0, 1, 2, 3, 4], starts=[3, 5, 7, 9, 11],
ends=[1, 1, 1, 1, 1],
steps=[-2, -2, -2, -2, -2]),
]
@pytest.mark.parametrize("params", test_data_no_steps)
@pytest.mark.nightly
def test_slice_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_slice_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_no_steps)
@pytest.mark.nightly
def test_slice_const_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_slice_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, opset=6, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_no_steps + test_data_with_steps)
@pytest.mark.nightly
def test_slice_opset10(self, params, ie_device, precision, ir_version, temp_dir):
def test_slice_opset10(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net(**params, opset=10, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
*self.create_net(**params, opset=10, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_no_steps + test_data_with_steps)
@pytest.mark.nightly
def test_slice_const_opset10(self, params, ie_device, precision, ir_version, temp_dir):
def test_slice_const_opset10(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, opset=10, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_no_steps + test_data_with_steps)
@pytest.mark.nightly
def test_slice_opset11(self, params, ie_device, precision, ir_version, temp_dir):
def test_slice_opset11(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net(**params, opset=11, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
*self.create_net(**params, opset=11, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_no_steps + test_data_with_steps)
@pytest.mark.nightly
def test_slice_const_opset11(self, params, ie_device, precision, ir_version, temp_dir):
def test_slice_const_opset11(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, opset=11, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph
@@ -17,7 +17,7 @@ def second_input_data_of_reshape(src_shape, axis):
return [0, -1]
if axis > 1:
return [int(np.prod(int64_array(src_shape[: axis]))), -1]
return [-1, int(np.prod(int64_array(src_shape[len(src_shape) + axis: ])))]
return [-1, int(np.prod(int64_array(src_shape[len(src_shape) + axis:])))]
def get_flatten_shape(src_shape, axis):
@@ -27,10 +27,10 @@ def get_flatten_shape(src_shape, axis):
snd_dim = int(np.prod(int64_array(src_shape)))
elif flatten_axis == 1:
fst_dim = src_shape[0]
snd_dim = int(np.prod(int64_array(src_shape[1: ])))
snd_dim = int(np.prod(int64_array(src_shape[1:])))
else:
fst_dim = int(np.prod(int64_array(src_shape[: flatten_axis])))
snd_dim = int(np.prod(int64_array(src_shape[flatten_axis: ])))
snd_dim = int(np.prod(int64_array(src_shape[flatten_axis:])))
return [fst_dim, snd_dim]
@@ -96,7 +96,8 @@ class TestSoftmax(OnnxRuntimeLayerTest):
'kind': 'data',
'value': int64_array(reshape_data_val)},
'flatten_shape': {'type': 'Const', 'kind': 'op', 'shape': 2},
'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data', 'value': None},
'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data',
'value': None},
'reshape': {'kind': 'op', 'type': 'Reshape'},
'reshape_data': {'kind': 'data', 'shape': flatten_shape, 'value': None},
'softmax': {'type': 'SoftMax', 'kind': 'op', 'axis': 1},
@@ -123,14 +124,17 @@ class TestSoftmax(OnnxRuntimeLayerTest):
'kind': 'data',
'value': int64_array(reshape_data_val)},
'flatten_shape': {'type': 'Const', 'kind': 'op', 'shape': 2},
'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data', 'value': None},
'flatten_shape_data': {'shape': int64_array([2]), 'kind': 'data',
'value': None},
'reshape': {'kind': 'op', 'type': 'Reshape'},
'reshape_data': {'kind': 'data', 'shape': flatten_shape, 'value': None},
'softmax': {'type': 'SoftMax', 'kind': 'op', 'axis': 1},
'softmax_data': {'shape': flatten_shape, 'kind': 'data', 'value': None},
'last_shape_val': {'shape': int64_array(shape).shape, 'kind': 'data', 'value': int64_array(shape)},
'last_shape_val': {'shape': int64_array(shape).shape, 'kind': 'data',
'value': int64_array(shape)},
'last_shape': {'type': 'Const', 'kind': 'op', 'shape': len(shape)},
'last_shape_data': {'shape': int64_array([len(shape)]), 'kind': 'data', 'value': None},
'last_shape_data': {'shape': int64_array([len(shape)]), 'kind': 'data',
'value': None},
'last_reshape': {'kind': 'op', 'type': 'Reshape'},
'last_reshape_data': {'kind': 'data', 'shape': shape, 'value': None},
'result': {'kind': 'op', 'type': 'Result'},
@@ -162,6 +166,7 @@ class TestSoftmax(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_softmax(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_softmax(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -2,9 +2,9 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
from unit_tests.utils.graph import build_graph
@@ -171,12 +171,13 @@ class TestSoftplus(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_softplus(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_softplus(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_softplus_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_softplus_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -5,21 +5,28 @@ import pytest
from common.onnx_layer_test_class import Caffe2OnnxLayerTest
test_data_3D = [
dict(input_shape=[1, 50, 50], output_shapes=[[1, 50, 25], [1, 50, 25]], axis=2),
dict(input_shape=[2, 50, 50], output_shapes=[[2, 20, 50], [2, 15, 50], [2, 15, 50]], axis=1),
dict(input_shape=[4, 50, 50], output_shapes=[[1, 50, 50], [1, 50, 50], [1, 50, 50], [1, 50, 50]], axis=0)]
dict(input_shape=[4, 50, 50],
output_shapes=[[1, 50, 50], [1, 50, 50], [1, 50, 50], [1, 50, 50]], axis=0)]
test_data_4D = [
dict(input_shape=[1, 32, 800, 800], output_shapes=[[1, 16, 800, 800], [1, 16, 800, 800]], axis=1),
dict(input_shape=[4, 32, 80, 80], output_shapes=[[4, 8, 80, 80], [4, 8, 80, 80], [4, 8, 80, 80], [4, 8, 80, 80]],
dict(input_shape=[1, 32, 800, 800], output_shapes=[[1, 16, 800, 800], [1, 16, 800, 800]],
axis=1),
dict(input_shape=[2, 21, 80, 80], output_shapes=[[2, 7, 80, 80], [2, 7, 80, 80], [2, 7, 80, 80]], axis=1),
dict(input_shape=[3, 21, 80, 80], output_shapes=[[3, 14, 80, 80], [3, 5, 80, 80], [3, 2, 80, 80]], axis=1),
dict(input_shape=[3, 21, 80, 80], output_shapes=[[1, 21, 80, 80], [1, 21, 80, 80], [1, 21, 80, 80]], axis=0),
dict(input_shape=[3, 21, 80, 80], output_shapes=[[3, 21, 20, 80], [3, 21, 35, 80], [3, 21, 25, 80]], axis=2),
dict(input_shape=[3, 21, 80, 80], output_shapes=[[3, 21, 80, 40], [3, 21, 80, 10], [3, 21, 80, 30]], axis=3)]
dict(input_shape=[4, 32, 80, 80],
output_shapes=[[4, 8, 80, 80], [4, 8, 80, 80], [4, 8, 80, 80], [4, 8, 80, 80]],
axis=1),
dict(input_shape=[2, 21, 80, 80],
output_shapes=[[2, 7, 80, 80], [2, 7, 80, 80], [2, 7, 80, 80]], axis=1),
dict(input_shape=[3, 21, 80, 80],
output_shapes=[[3, 14, 80, 80], [3, 5, 80, 80], [3, 2, 80, 80]], axis=1),
dict(input_shape=[3, 21, 80, 80],
output_shapes=[[1, 21, 80, 80], [1, 21, 80, 80], [1, 21, 80, 80]], axis=0),
dict(input_shape=[3, 21, 80, 80],
output_shapes=[[3, 21, 20, 80], [3, 21, 35, 80], [3, 21, 25, 80]], axis=2),
dict(input_shape=[3, 21, 80, 80],
output_shapes=[[3, 21, 80, 40], [3, 21, 80, 10], [3, 21, 80, 30]], axis=3)]
test_data_5D = [
dict(input_shape=[1, 50, 50, 80, 60],
@@ -28,7 +35,8 @@ test_data_5D = [
[1, 50, 10, 80, 60],
[1, 50, 10, 80, 60],
[1, 50, 10, 80, 60]], axis=2),
dict(input_shape=[1, 50, 50, 80, 60], output_shapes=[[1, 25, 50, 80, 60], [1, 25, 50, 80, 60]], axis=1)]
dict(input_shape=[1, 50, 50, 80, 60], output_shapes=[[1, 25, 50, 80, 60], [1, 25, 50, 80, 60]],
axis=1)]
test_multiple_out = [
dict(input_shape=[3, 10, 10],
@@ -93,6 +101,7 @@ test_multiple_out_with_identity = [
),
]
class TestSplitConcat(Caffe2OnnxLayerTest):
# TODO Add test with default values (axis=0)
def create_split_concat_net(self, input_shape, output_shapes, axis, ir_version):
@@ -119,7 +128,8 @@ class TestSplitConcat(Caffe2OnnxLayerTest):
split.append(output_shape[axis])
# Output for concat
output_concat = helper.make_tensor_value_info('output_concat', TensorProto.FLOAT, input_shape)
output_concat = helper.make_tensor_value_info('output_concat', TensorProto.FLOAT,
input_shape)
node_split_def = onnx.helper.make_node(
'Split',
@@ -191,7 +201,8 @@ class TestSplitConcat(Caffe2OnnxLayerTest):
split.append(output_shape[axis])
# Output for concat
output_concat = helper.make_tensor_value_info('output_dyn_concat', TensorProto.FLOAT, concat_output_shape)
output_concat = helper.make_tensor_value_info('output_dyn_concat', TensorProto.FLOAT,
concat_output_shape)
node_const_def = onnx.helper.make_node(
'Constant',
@@ -250,42 +261,42 @@ class TestSplitConcat(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_split_4D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_split_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_split_concat_net(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_split_3D_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_split_3D_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
*self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_split_4D_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_split_4D_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
*self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_split_5D_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_split_5D_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
*self.create_split_concat_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, api_2=api_2)
class TestSplit(Caffe2OnnxLayerTest):
@@ -309,7 +320,8 @@ class TestSplit(Caffe2OnnxLayerTest):
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, input_shape)
outputs, split = [], []
for id, output_shape in enumerate(output_shapes):
out = helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT, output_shape)
out = helper.make_tensor_value_info('output_{}'.format(id), TensorProto.FLOAT,
output_shape)
outputs.append((out, 'output_{}'.format(id)))
split.append(output_shape[axis])
@@ -350,8 +362,8 @@ class TestSplit(Caffe2OnnxLayerTest):
return onnx_net, ref_net
def create_split_net_ordered_outputs(self, input_shape, output_shapes, axis, output_names, ir_version):
def create_split_net_ordered_outputs(self, input_shape, output_shapes, axis, output_names,
ir_version):
"""
ONNX net IR net
@@ -373,7 +385,8 @@ class TestSplit(Caffe2OnnxLayerTest):
output_list = []
for i, output_name in enumerate(output_names):
output_list.append(helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
output_list.append(
helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
node = onnx.helper.make_node('Split', inputs=['input'], outputs=output_names, axis=axis)
@@ -392,7 +405,8 @@ class TestSplit(Caffe2OnnxLayerTest):
return onnx_net, ref_net
def create_split_net_ordered_outputs_with_add(self, input_shape, output_shapes, axis, output_names, ir_version):
def create_split_net_ordered_outputs_with_add(self, input_shape, output_shapes, axis,
output_names, ir_version):
"""
This test checks the case when graph has a node that is connected with Result and some other operation
from single output port.
@@ -421,15 +435,17 @@ class TestSplit(Caffe2OnnxLayerTest):
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
add_output_name1 = output_names[len(output_names)-2]
add_output_name2 = output_names[len(output_names)-1]
outputs_without_add = output_names[:len(output_names)-2]
add_output_name1 = output_names[len(output_names) - 2]
add_output_name2 = output_names[len(output_names) - 1]
outputs_without_add = output_names[:len(output_names) - 2]
output_list = []
for i, output_name in enumerate(outputs_without_add):
output_list.append(helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
output_list.append(
helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
node = onnx.helper.make_node('Split', inputs=['input'], outputs=outputs_without_add, axis=axis)
node = onnx.helper.make_node('Split', inputs=['input'], outputs=outputs_without_add,
axis=axis)
node_add1 = helper.make_node(
'Add',
inputs=[outputs_without_add[1], outputs_without_add[2]],
@@ -441,7 +457,11 @@ class TestSplit(Caffe2OnnxLayerTest):
outputs=[add_output_name2]
)
output_list = output_list + [helper.make_tensor_value_info(add_output_name1, TensorProto.FLOAT, output_shapes[0])] + [helper.make_tensor_value_info(add_output_name2, TensorProto.FLOAT, output_shapes[0])]
output_list = output_list + [
helper.make_tensor_value_info(add_output_name1, TensorProto.FLOAT,
output_shapes[0])] + [
helper.make_tensor_value_info(add_output_name2, TensorProto.FLOAT,
output_shapes[0])]
# Create the graph (GraphProto)
graph_def = helper.make_graph(
@@ -458,7 +478,10 @@ class TestSplit(Caffe2OnnxLayerTest):
return onnx_net, ref_net
def create_split_net_ordered_outputs_multiple_tensor_names(self, input_shape, output_shapes, axis, split_out_names, identity_names, output_names, ir_version):
def create_split_net_ordered_outputs_multiple_tensor_names(self, input_shape, output_shapes,
axis, split_out_names,
identity_names, output_names,
ir_version):
"""
This test checks the case of multiple tensor names on connection incoming to Result. In this case
Result name is equal to one of tensor names from the list.
@@ -490,13 +513,18 @@ class TestSplit(Caffe2OnnxLayerTest):
output_list = []
for i, output_name in enumerate(split_out_names):
output_list.append(helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
output_list.append(helper.make_tensor_value_info(identity_names[2], TensorProto.FLOAT, output_shapes[i]))
output_list.append(
helper.make_tensor_value_info(output_name, TensorProto.FLOAT, output_shapes[i]))
output_list.append(
helper.make_tensor_value_info(identity_names[2], TensorProto.FLOAT, output_shapes[i]))
node = onnx.helper.make_node('Split', inputs=['input'], outputs=split_out_names, axis=axis)
identity1 = onnx.helper.make_node('Identity', inputs=[split_out_names[0]], outputs=[identity_names[0]])
identity2 = onnx.helper.make_node('Identity', inputs=[identity_names[0]], outputs=[identity_names[1]])
identity3 = onnx.helper.make_node('Identity', inputs=[identity_names[1]], outputs=[identity_names[2]])
identity1 = onnx.helper.make_node('Identity', inputs=[split_out_names[0]],
outputs=[identity_names[0]])
identity2 = onnx.helper.make_node('Identity', inputs=[identity_names[0]],
outputs=[identity_names[1]])
identity3 = onnx.helper.make_node('Identity', inputs=[identity_names[1]],
outputs=[identity_names[2]])
# Create the graph (GraphProto)
graph_def = helper.make_graph(
@@ -515,36 +543,35 @@ class TestSplit(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_split_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision,
ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_split_4D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_split_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision,
ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_split_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_split_net(**params, ir_version=ir_version), ie_device, precision,
ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_multiple_out)
def test_split_outputs_order(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_split_net_ordered_outputs(**params, ir_version=ir_version), ie_device, precision,
ir_version, temp_dir=temp_dir, output_names=params['output_names'])
def test_split_outputs_order(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_split_net_ordered_outputs(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir,
output_names=params['output_names'], api_2=api_2)
@pytest.mark.parametrize("params", test_multiple_out_with_add)
def test_split_outputs_order_multiple_connection_before_result_case(self,
params,
ie_device,
precision,
ir_version,
temp_dir):
self._test(*self.create_split_net_ordered_outputs_with_add(**params, ir_version=ir_version), ie_device,
precision, ir_version, temp_dir=temp_dir, output_names=params['output_names'])
def test_split_outputs_order_multiple_connection_before_result_case(self, params, ie_device,
precision, ir_version,
temp_dir, api_2):
self._test(*self.create_split_net_ordered_outputs_with_add(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir,
output_names=params['output_names'], api_2=api_2)
@pytest.mark.parametrize("params", test_multiple_out_with_identity)
def test_split_outputs_order_multiple_tensors_before_result_case(self,
@@ -552,6 +579,8 @@ class TestSplit(Caffe2OnnxLayerTest):
ie_device,
precision,
ir_version,
temp_dir):
self._test(*self.create_split_net_ordered_outputs_multiple_tensor_names(**params, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, output_names=params['output_names'])
temp_dir, api_2):
self._test(*self.create_split_net_ordered_outputs_multiple_tensor_names(**params,
ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir,
output_names=params['output_names'], api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -176,12 +176,13 @@ class TestSqrt(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sqrt(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sqrt(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sqrt_const(self, params, ie_device, precision, ir_version, temp_dir):
def test_sqrt_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -175,36 +175,42 @@ class TestSqueeze(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_squeeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_squeeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_squeeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_squeeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_squeeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_squeeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_squeeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -191,29 +191,38 @@ class TestSum(OnnxRuntimeLayerTest):
dict(dyn_shapes=[[4, 6, 8], [4, 6, 8], [4, 6, 8]], const_shapes=[]),
dict(dyn_shapes=[[4, 6, 8], [4, 6, 8], [4, 6, 8]], const_shapes=[[4, 6, 8]]),
dict(dyn_shapes=[[4, 6, 8], [4, 6, 8], [4, 6, 8]], const_shapes=[[4, 6, 8], [4, 6, 8]]),
dict(dyn_shapes=[[4, 6, 8], [4, 6, 8], [4, 6, 8]], const_shapes=[[4, 6, 8], [4, 6, 8], [4, 6, 8]]),
dict(dyn_shapes=[[4, 6, 8], [4, 6, 8], [4, 6, 8]],
const_shapes=[[4, 6, 8], [4, 6, 8], [4, 6, 8]]),
dict(dyn_shapes=[[4, 6, 8, 10]], const_shapes=[[4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10]], const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10]], const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10]],
const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]], const_shapes=[]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]], const_shapes=[[4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]], const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]], const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]],
const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]],
const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]], const_shapes=[]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]], const_shapes=[[4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]], const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]],
const_shapes=[[4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]],
const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]],
const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12]], const_shapes=[[4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12]], const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12]], const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12]],
const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]], const_shapes=[]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]], const_shapes=[[4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]], const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]],
const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]],
const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]], const_shapes=[]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]], const_shapes=[[4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]],
const_shapes=[[4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]],
const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(dyn_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]],
@@ -221,7 +230,8 @@ class TestSum(OnnxRuntimeLayerTest):
const_test_data_precommit = [
dict(const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]]),
dict(const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]])
dict(const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12],
[4, 6, 8, 10, 12]])
]
const_test_data = [
@@ -236,7 +246,8 @@ class TestSum(OnnxRuntimeLayerTest):
dict(const_shapes=[[4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10], [4, 6, 8, 10]]),
dict(const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]]),
dict(const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12]])
dict(const_shapes=[[4, 6, 8, 10, 12], [4, 6, 8, 10, 12], [4, 6, 8, 10, 12],
[4, 6, 8, 10, 12]])
]
const_test_data_broadcasting_precommit = [
@@ -274,49 +285,57 @@ class TestSum(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sum_opset6(self, params, ie_device, precision, ir_version, temp_dir):
def test_sum_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, opset=6, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_sum_precommit(self, params, ie_device, precision, ir_version, temp_dir):
def test_sum_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sum(self, params, ie_device, precision, ir_version, temp_dir):
def test_sum(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net(**params, precision=precision, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
*self.create_net(**params, precision=precision, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", const_test_data)
@pytest.mark.nightly
def test_sum_const_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_const_net(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sum_const_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_const_net(**params, opset=6, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", const_test_data_precommit)
@pytest.mark.precommit
def test_sum_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sum_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", const_test_data)
@pytest.mark.nightly
def test_sum_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sum_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", const_test_data_broadcasting_precommit)
@pytest.mark.precommit
def test_sum_const_broadcasting_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sum_const_broadcasting_precommit(self, params, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", const_test_data_broadcasting)
@pytest.mark.nightly
def test_sum_const_broadcasting(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sum_const_broadcasting(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_const_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -147,21 +147,25 @@ class TestTopK(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_topk_opset6(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_topk_opset6(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, opset=6, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_topk_opset10(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, opset=10, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_topk_opset10(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, opset=10, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("largest", [1, 0, None])
@pytest.mark.parametrize("sorted", [1, 0, None])
@pytest.mark.nightly
def test_topk_opset11(self, params, ie_device, precision, ir_version, largest, sorted, temp_dir):
def test_topk_opset11(self, params, ie_device, precision, ir_version, largest, sorted, temp_dir,
api_2):
self._test(*self.create_net(**params, largest=largest, sorted=sorted,
opset=11, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
opset=11, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -155,24 +155,29 @@ class TestTranspose(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_transpose_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_transpose_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_transpose(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_transpose(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.nightly
def test_transpose_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_transpose_const_precommit(self, params, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_transpose_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_transpose_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -179,120 +179,140 @@ class TestTrigonomery(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sin(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Sin'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sin(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Sin'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sinh(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Sinh'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_sinh(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Sinh'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_asin(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Asin'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_asin(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Asin'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_cos_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Cos'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_cos_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Cos'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_cos(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Cos'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_cos(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Cos'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_cosh(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Cosh'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_cosh(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Cosh'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_acos(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Acos'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_acos(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Acos'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_tan(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Tan'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_tan(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Tan'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_tanh(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Tanh'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_tanh(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Tanh'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_atan(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version, op='Atan'), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_atan(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version, op='Atan'), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sin_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Sin'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_sin_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Sin'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_sinh_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Sinh'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_sinh_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Sinh'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_asin_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Asin'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_asin_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Asin'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_cos_const_precommit(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cos'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_cos_const_precommit(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cos'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_cos_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cos'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_cos_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cos'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_cosh_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cosh'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_cosh_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Cosh'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_acos_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Acos'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_acos_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Acos'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_tan_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Tan'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_tan_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Tan'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_tanh_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Tanh'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_tanh_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Tanh'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_atan_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Atan'),
ie_device, precision, ir_version, temp_dir=temp_dir)
def test_atan_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(
*self.create_net_const(**params, ir_version=ir_version, precision=precision, op='Atan'),
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -175,36 +175,42 @@ class TestUnsqueeze(Caffe2OnnxLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_unsqueeze_5D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_unsqueeze_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_unsqueeze_4D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_unsqueeze_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_unsqueeze_3D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_unsqueeze_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_unsqueeze_net(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_unsqueeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_unsqueeze_const_5D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_unsqueeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_unsqueeze_const_4D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_unsqueeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_unsqueeze_const_3D(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_unsqueeze_net_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -94,17 +94,20 @@ class TestUpsample(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("mode", [None, 'nearest'])
@pytest.mark.parametrize("opset", [7, 9])
@pytest.mark.nightly
def test_upsample_nearest(self, params, mode, opset, ie_device, precision, ir_version, temp_dir):
def test_upsample_nearest(self, params, mode, opset, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net(**params, mode=mode, opset=opset, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("opset", [7, 9])
@pytest.mark.nightly
@pytest.mark.xfail(reason='Both onnxruntime and caffe2 calculate linear upsampling differently from IE')
def test_upsample_linear(self, params, opset, ie_device, precision, ir_version, temp_dir):
@pytest.mark.xfail(
reason='Both onnxruntime and caffe2 calculate linear upsampling differently from IE')
def test_upsample_linear(self, params, opset, ie_device, precision, ir_version, temp_dir,
api_2):
self._test(*self.create_net(**params, mode='linear', opset=opset, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
class PytorchLayerTest(CommonLayerTest):
@@ -172,17 +175,21 @@ class TestPytorchUpsample(PytorchLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.parametrize("mode", [None, 'nearest'])
def test_pytorch_upsample_precommit(self, params, mode, ie_device, precision, ir_version, temp_dir):
def test_pytorch_upsample_precommit(self, params, mode, ie_device, precision, ir_version,
temp_dir, api_2):
if ie_device == 'GPU':
pytest.skip('Linear upsampling not supported on GPU')
self._test(*self.create_net(**params, mode=mode, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
self._test(*self.create_net(**params, mode=mode, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("mode", [None, 'nearest', 'bilinear'])
@pytest.mark.nightly
def test_pytorch_upsample(self, params, mode, ie_device, precision, ir_version, temp_dir):
def test_pytorch_upsample(self, params, mode, ie_device, precision, ir_version, temp_dir,
api_2):
if ie_device == 'GPU' and mode == 'bilinear':
pytest.skip('Linear upsampling not supported on GPU')
self._test(*self.create_net(**params, mode=mode, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
self._test(*self.create_net(**params, mode=mode, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -86,12 +86,15 @@ class TestWhere(OnnxRuntimeLayerTest):
dict(condition_shape=[15, 3, 5], shape_than=[15, 1, 5], else_shape=[15, 3, 5]),
dict(condition_shape=[2, 3, 4, 5], shape_than=[], else_shape=[2, 3, 4, 5]),
dict(condition_shape=[2, 3, 4, 5], shape_than=[5], else_shape=[2, 3, 4, 5]),
dict(condition_shape=[2, 3, 4, 5], shape_than=[2, 1, 1, 5], else_shape=[2, 3, 4, 5]),
dict(condition_shape=[2, 3, 4, 5], shape_than=[2, 3, 4, 5], else_shape=[1, 3, 1, 5]),
]
dict(condition_shape=[2, 3, 4, 5], shape_than=[2, 1, 1, 5],
else_shape=[2, 3, 4, 5]),
dict(condition_shape=[2, 3, 4, 5], shape_than=[2, 3, 4, 5],
else_shape=[1, 3, 1, 5]),
]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_where(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_where(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
from unit_tests.utils.graph import build_graph
@@ -251,18 +251,21 @@ class TestXor(OnnxRuntimeLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_xor(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_xor(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_xor_one_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_xor_one_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_one_const(**params, ir_version=ir_version), ie_device,
precision, ir_version,
temp_dir=temp_dir, api_2=api_2)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_xor_const(self, params, ie_device, precision, ir_version, temp_dir):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision, ir_version,
temp_dir=temp_dir)
def test_xor_const(self, params, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_net_const(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, api_2=api_2)

View File

@@ -69,9 +69,12 @@ class TestAdd(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_1D)
@pytest.mark.nightly
def test_add_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_2D = [
# Power
@@ -79,34 +82,44 @@ class TestAdd(CommonTFLayerTest):
# ScaleShift
dict(x_shape=[1, 3], y_shape=[1, 3]),
# Eltwise
pytest.param(dict(x_shape=[3, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19180")),
pytest.param(dict(x_shape=[3, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19180")),
# Eltwise
dict(x_shape=[2, 3], y_shape=[2, 3])
]
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [
# Power
dict(x_shape=[1, 1, 1], y_shape=[1, 1, 1]),
# ScaleShift
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1, 3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1, 3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 3], y_shape=[1, 1, 3]),
marks=[pytest.mark.xfail(reason="*-19053"), pytest.mark.xfail(reason="*-18830")]),
marks=[pytest.mark.xfail(reason="*-19053"),
pytest.mark.xfail(reason="*-18830")]),
# Eltwise
pytest.param(dict(x_shape=[1, 3, 224], y_shape=[1, 3, 224]), marks=pytest.mark.xfail(reason="*-19053"))
pytest.param(dict(x_shape=[1, 3, 224], y_shape=[1, 3, 224]),
marks=pytest.mark.xfail(reason="*-19053"))
]
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [
# Power
@@ -114,7 +127,8 @@ class TestAdd(CommonTFLayerTest):
# ScaleShift
dict(x_shape=[1, 3, 1, 1], y_shape=[1, 3, 1, 1]),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 1, 3], y_shape=[1, 1, 1, 3]), marks=pytest.mark.xfail(reason="*-19180")),
pytest.param(dict(x_shape=[1, 1, 1, 3], y_shape=[1, 1, 1, 3]),
marks=pytest.mark.xfail(reason="*-19180")),
# Eltwise
dict(x_shape=[1, 3, 222, 224], y_shape=[1, 3, 222, 224])
]
@@ -122,9 +136,12 @@ class TestAdd(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
# Power
@@ -141,9 +158,12 @@ class TestAdd(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
###############################################################################################
# #
@@ -158,9 +178,12 @@ class TestAdd(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_broadcast_1D)
@pytest.mark.nightly
def test_add_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_2D = [
# Power
@@ -172,37 +195,50 @@ class TestAdd(CommonTFLayerTest):
# Eltwise
dict(x_shape=[3, 1], y_shape=[3]),
# Eltwise
pytest.param(dict(x_shape=[3, 1], y_shape=[1, 3, 1, 1]), marks=pytest.mark.xfail(reason="*-19051"))
pytest.param(dict(x_shape=[3, 1], y_shape=[1, 3, 1, 1]),
marks=pytest.mark.xfail(reason="*-19051"))
]
@pytest.mark.parametrize("params", test_data_broadcast_2D)
@pytest.mark.nightly
def test_add_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_3D = [
# Power
dict(x_shape=[1, 1, 1], y_shape=[1]),
# Power
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1]),
marks=pytest.mark.xfail(reason="*-19053")),
# ScaleShift
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 1, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[3, 1, 224], y_shape=[1, 3, 224]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[3, 1, 224], y_shape=[1, 3, 224]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[2, 3, 1], y_shape=[1, 3, 2]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[2, 3, 1], y_shape=[1, 3, 2]),
marks=pytest.mark.xfail(reason="*-19053")),
]
@pytest.mark.parametrize("params", test_data_broadcast_3D)
@pytest.mark.nightly
def test_add_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_4D = [
# Power
@@ -230,9 +266,12 @@ class TestAdd(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_broadcast_4D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_add_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_add_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version=ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_5D = [
# Power
@@ -258,7 +297,11 @@ class TestAdd(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_broadcast_5D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_add_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
def test_add_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
# we do not perform transpose in the test in case of new frontend
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision,
ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision,
ir_version=ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend,
api_2=api_2)

View File

@@ -7,7 +7,8 @@ from common.tf_layer_test_class import CommonTFLayerTest
class TestBatchToSpace(CommonTFLayerTest):
def create_batch_to_space_net(self, in_shape, crops_value, block_shape_value, out_shape, ir_version, use_new_frontend):
def create_batch_to_space_net(self, in_shape, crops_value, block_shape_value, out_shape,
ir_version, use_new_frontend):
"""
Tensorflow net IR net
@@ -61,12 +62,16 @@ class TestBatchToSpace(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_batch_to_space_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_batch_to_space_4D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
dict(in_shape=[72, 2, 1, 4, 2], block_shape_value=[3, 4, 2], crops_value=[[1, 2], [0, 0], [3, 0]],
dict(in_shape=[72, 2, 1, 4, 2], block_shape_value=[3, 4, 2],
crops_value=[[1, 2], [0, 0], [3, 0]],
out_shape=[3, 3, 4, 5, 2]),
# todo: enable these tests after supporting the general case on CPU
# dict(in_shape=[144, 2, 1, 4, 1], block_shape_value=[3, 4, 2, 2],
@@ -75,6 +80,9 @@ class TestBatchToSpace(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_batch_to_space_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_batch_to_space_5D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_batch_to_space_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -93,7 +93,8 @@ class TestBiasAdd(CommonTFLayerTest):
add = tf.nn.bias_add(x, y, name="Operation")
placeholder = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def
placeholder = tf.compat.v1.placeholder(tf.float32, tf_x_shape,
'Input') # Input_1 in graph_def
concat = tf.concat([placeholder, add], axis=tf_concat_axis, name='Operation')
@@ -117,15 +118,21 @@ class TestBiasAdd(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_bias_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bias_add_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_bias_add_2_consts_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bias_add_2_consts_2D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [
pytest.param(dict(shape=[1, 1, 224]), marks=pytest.mark.xfail(reason="*-19053")),
@@ -134,15 +141,21 @@ class TestBiasAdd(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_bias_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bias_add_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_bias_add_2_consts_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bias_add_2_consts_3D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [
dict(shape=[1, 1, 100, 224]),
@@ -152,15 +165,21 @@ class TestBiasAdd(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_bias_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bias_add_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_bias_add_2_consts_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bias_add_2_consts_4D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
dict(shape=[1, 1, 50, 100, 224]),
@@ -170,12 +189,18 @@ class TestBiasAdd(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_bias_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bias_add_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bias_add_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_bias_add_2_consts_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bias_add_2_consts_5D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bias_add_2_consts_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -36,7 +36,8 @@ class TestBinaryOps(CommonTFLayerTest):
inputs_dict[input] = generate_input(self.current_op_type, inputs_dict[input])
return inputs_dict
def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, op_type, use_new_frontend):
def create_add_placeholder_const_net(self, x_shape, y_shape, ir_version, op_type,
use_new_frontend):
"""
Tensorflow net IR net
@@ -115,15 +116,20 @@ class TestBinaryOps(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_precommits)
@pytest.mark.parametrize("op_type",
['Add', 'Sub', 'Mul', 'Div', 'RealDiv', 'SquaredDifference', 'Pow', 'Maximum', 'Minimum',
'Equal', 'NotEqual', 'Mod', 'Greater', 'GreaterEqual', 'Less', 'LessEqual',
'LogicalAnd', 'LogicalOr', 'LogicalXor', 'FloorMod'])
['Add', 'Sub', 'Mul', 'Div', 'RealDiv', 'SquaredDifference', 'Pow',
'Maximum', 'Minimum',
'Equal', 'NotEqual', 'Mod', 'Greater', 'GreaterEqual', 'Less',
'LessEqual',
'LogicalAnd', 'LogicalOr', 'LogicalXor', 'FloorMod'])
@pytest.mark.nightly
@pytest.mark.precommit
def test_binary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_new_frontend):
def test_binary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type,
use_new_frontend, api_2):
if ie_device == 'GPU' and precision == "FP16":
pytest.skip("BinaryOps tests temporary skipped on GPU with FP16 precision."
"Several tests don't pass accuracy checks.")
self._test(*self.create_add_placeholder_const_net(**params, ir_version=ir_version, op_type=op_type,
use_new_frontend=use_new_frontend), ie_device, precision,
ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
self._test(
*self.create_add_placeholder_const_net(**params, ir_version=ir_version, op_type=op_type,
use_new_frontend=use_new_frontend), ie_device,
precision,
ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -6,11 +6,13 @@ import pytest
import tensorflow as tf
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from unit_tests.utils.graph import build_graph
class TestBucketize(CommonTFLayerTest):
def create_bucketize_net(self, input_shape, input_type, boundaries_size, ir_version, use_new_frontend):
def create_bucketize_net(self, input_shape, input_type, boundaries_size, ir_version,
use_new_frontend):
"""
Tensorflow net: IR net:
Input => Input Boundaries
@@ -23,7 +25,8 @@ class TestBucketize(CommonTFLayerTest):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(input_type, input_shape, 'Input')
constant_value = np.arange(-boundaries_size * 5, boundaries_size * 5, 10, dtype=np.float32)
constant_value = np.arange(-boundaries_size * 5, boundaries_size * 5, 10,
dtype=np.float32)
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
@@ -64,9 +67,12 @@ class TestBucketize(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_float32)
@pytest.mark.nightly
def test_bucketize_float32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bucketize_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bucketize_float32(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bucketize_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_int32 = [
dict(input_shape=[5], input_type=tf.int32, boundaries_size=1),
@@ -78,6 +84,9 @@ class TestBucketize(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_int32)
@pytest.mark.nightly
def test_bucketize_int32(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_bucketize_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_bucketize_int32(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_bucketize_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -6,6 +6,7 @@ import pytest
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
class TestConcat(CommonTFLayerTest):
def create_concat_net(self, shape, axis, ir_version, use_new_frontend):
"""
@@ -25,7 +26,6 @@ class TestConcat(CommonTFLayerTest):
# Create the graph and model
with tf.compat.v1.Session() as sess:
ax = axis
tf_x_shape = shape.copy()
@@ -60,28 +60,38 @@ class TestConcat(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_1D)
@pytest.mark.nightly
def test_concat_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_concat_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_concat_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_2D = [dict(shape=[1, 224], axis=0),
dict(shape=[1, 224], axis=-1)]
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_concat_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_concat_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_concat_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [pytest.param(dict(shape=[1, 3, 224], axis=0), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(shape=[1, 3, 224], axis=-1), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(shape=[1, 3, 224], axis=2), marks=pytest.mark.xfail(reason="*-19053"))]
test_data_3D = [
pytest.param(dict(shape=[1, 3, 224], axis=0), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(shape=[1, 3, 224], axis=-1), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(shape=[1, 3, 224], axis=2), marks=pytest.mark.xfail(reason="*-19053"))]
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_concat_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_concat_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_concat_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [dict(shape=[1, 3, 100, 224], axis=0),
dict(shape=[1, 3, 100, 224], axis=-1),
@@ -90,9 +100,12 @@ class TestConcat(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_concat_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_concat_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_concat_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [dict(shape=[1, 3, 50, 100, 224], axis=0),
dict(shape=[1, 3, 50, 100, 224], axis=-1),
@@ -100,6 +113,9 @@ class TestConcat(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_concat_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_concat_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_concat_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_concat_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -32,7 +32,7 @@ class TestEltwise(CommonTFLayerTest):
tf_x_shape = permute_nchw_to_nhwc(tf_x_shape, use_new_frontend)
x = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input')
y = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def
y = tf.compat.v1.placeholder(tf.float32, tf_x_shape, 'Input') # Input_1 in graph_def
if operation == 'sum':
tf.add(x, y, name='Operation')
@@ -62,9 +62,12 @@ class TestEltwise(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_eltwise(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_eltwise_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_eltwise(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_eltwise_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = []
for operation in ['sum', 'max', 'mul']:
@@ -72,8 +75,11 @@ class TestEltwise(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.precommit
def test_eltwise_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
def test_eltwise_5D_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
if ie_device == 'GPU':
pytest.skip("5D tensors is not supported on GPU")
self._test(*self.create_eltwise_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
self._test(*self.create_eltwise_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -3,9 +3,9 @@
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, connect, \
shaped_data, connect_front, regular_op
@@ -35,7 +35,8 @@ class TestFakeQuantize(CommonTFLayerTest):
expected_nudged_input_max + expected_step
])}
def create_fake_quantize_net(self, il, ih, num_bits, narrow_range, nudged_il, nudged_ih, expected_step, ir_version, use_new_frontend):
def create_fake_quantize_net(self, il, ih, num_bits, narrow_range, nudged_il, nudged_ih,
expected_step, ir_version, use_new_frontend):
# original tf model
import tensorflow as tf
tf.compat.v1.reset_default_graph()
@@ -43,7 +44,8 @@ class TestFakeQuantize(CommonTFLayerTest):
data = tf.compat.v1.placeholder(tf.float32, [11], 'parameter')
input_min = tf.constant(il, name='input_min')
input_max = tf.constant(ih, name='input_max')
tf.quantization.fake_quant_with_min_max_vars(data, input_min, input_max, num_bits, narrow_range, 'fq')
tf.quantization.fake_quant_with_min_max_vars(data, input_min, input_max, num_bits,
narrow_range, 'fq')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
@@ -69,7 +71,8 @@ class TestFakeQuantize(CommonTFLayerTest):
**const_for_layer_tests('ih', np.array([nudged_ih], dtype=np.float32)),
**const_for_layer_tests('ol', np.array([nudged_il], dtype=np.float32)),
**const_for_layer_tests('oh', np.array([nudged_ih], dtype=np.float32)),
**regular_op_with_shaped_data('fq', [11], {'type': 'FakeQuantize', 'levels': levels}),
**regular_op_with_shaped_data('fq', [11],
{'type': 'FakeQuantize', 'levels': levels}),
**regular_op('result', {'type': 'Result'}),
}
edges = [
@@ -86,40 +89,60 @@ class TestFakeQuantize(CommonTFLayerTest):
test_data = [
# with8BitsNoScalingNoNudging
dict(il=0.0, ih=255.0, num_bits=8, narrow_range=False, nudged_il=0.0, nudged_ih=255.0, expected_step=1.0),
dict(il=0.0, ih=255.0, num_bits=8, narrow_range=False, nudged_il=0.0, nudged_ih=255.0,
expected_step=1.0),
# with8BitsScalingAndNudgingDown
dict(il=0.5, ih=128.0, num_bits=8, narrow_range=False, nudged_il=0.0, nudged_ih=127.5, expected_step=0.5),
dict(il=0.5, ih=128.0, num_bits=8, narrow_range=False, nudged_il=0.0, nudged_ih=127.5,
expected_step=0.5),
# with8BitsScalingAndNudgingUp
dict(il=-128.0, ih=-0.5, num_bits=8, narrow_range=False, nudged_il=-127.5, nudged_ih=0.0, expected_step=0.5),
dict(il=-128.0, ih=-0.5, num_bits=8, narrow_range=False, nudged_il=-127.5, nudged_ih=0.0,
expected_step=0.5),
# with8BitsScalingAndNudgingBetween
dict(il=-0.1, ih=127.4, num_bits=8, narrow_range=False, nudged_il=0.0, nudged_ih=127.5, expected_step=0.5),
dict(il=-0.1, ih=127.4, num_bits=8, narrow_range=False, nudged_il=0.0, nudged_ih=127.5,
expected_step=0.5),
# with8BitsNarrowRangeNoScalingNoNudging
dict(il=0.0, ih=254.0, num_bits=8, narrow_range=True, nudged_il=0.0, nudged_ih=254.0, expected_step=1.0),
dict(il=0.0, ih=254.0, num_bits=8, narrow_range=True, nudged_il=0.0, nudged_ih=254.0,
expected_step=1.0),
# with8BitsNarrowRangeScalingAndNudgingDown
dict(il=0.1, ih=127.1, num_bits=8, narrow_range=True, nudged_il=0.0, nudged_ih=127.0, expected_step=0.5),
dict(il=0.1, ih=127.1, num_bits=8, narrow_range=True, nudged_il=0.0, nudged_ih=127.0,
expected_step=0.5),
# with8BitsNarrowRangeScalingAndNudgingUp
dict(il=-127.1, ih=-0.1, num_bits=8, narrow_range=True, nudged_il=-127.0, nudged_ih=0.0, expected_step=0.5),
dict(il=-127.1, ih=-0.1, num_bits=8, narrow_range=True, nudged_il=-127.0, nudged_ih=0.0,
expected_step=0.5),
# with8BitsNarrowRangeScalingAndNudgingBetween
dict(il=-0.1, ih=126.9, num_bits=8, narrow_range=True, nudged_il=0.0, nudged_ih=127.0, expected_step=0.5),
dict(il=-0.1, ih=126.9, num_bits=8, narrow_range=True, nudged_il=0.0, nudged_ih=127.0,
expected_step=0.5),
# with7BitsNoScalingNoNudging
dict(il=0.0, ih=127.0, num_bits=7, narrow_range=False, nudged_il=0.0, nudged_ih=127.0, expected_step=1.0),
dict(il=0.0, ih=127.0, num_bits=7, narrow_range=False, nudged_il=0.0, nudged_ih=127.0,
expected_step=1.0),
# with7BitsScalingAndNudgingDown
dict(il=0.5, ih=64.0, num_bits=7, narrow_range=False, nudged_il=0.0, nudged_ih=63.5, expected_step=0.5),
dict(il=0.5, ih=64.0, num_bits=7, narrow_range=False, nudged_il=0.0, nudged_ih=63.5,
expected_step=0.5),
# with7BitsScalingAndNudgingUp
dict(il=-64.0, ih=-0.5, num_bits=7, narrow_range=False, nudged_il=-63.5, nudged_ih=0.0, expected_step=0.5),
dict(il=-64.0, ih=-0.5, num_bits=7, narrow_range=False, nudged_il=-63.5, nudged_ih=0.0,
expected_step=0.5),
# with7BitsScalingAndNudgingBetween
dict(il=-0.1, ih=63.4, num_bits=7, narrow_range=False, nudged_il=0.0, nudged_ih=63.5, expected_step=0.5),
dict(il=-0.1, ih=63.4, num_bits=7, narrow_range=False, nudged_il=0.0, nudged_ih=63.5,
expected_step=0.5),
# with7BitsNarrowRangeNoScalingNoNudging
dict(il=0.0, ih=126.0, num_bits=7, narrow_range=True, nudged_il=0.0, nudged_ih=126.0, expected_step=1.0),
dict(il=0.0, ih=126.0, num_bits=7, narrow_range=True, nudged_il=0.0, nudged_ih=126.0,
expected_step=1.0),
# with7BitsNarrowRangeScalingAndNudgingDown
dict(il=0.1, ih=63.1, num_bits=7, narrow_range=True, nudged_il=0.0, nudged_ih=63.0, expected_step=0.5),
dict(il=0.1, ih=63.1, num_bits=7, narrow_range=True, nudged_il=0.0, nudged_ih=63.0,
expected_step=0.5),
# with7BitsNarrowRangeScalingAndNudgingUp
dict(il=-63.1, ih=-0.1, num_bits=7, narrow_range=True, nudged_il=-63.0, nudged_ih=0.0, expected_step=0.5),
dict(il=-63.1, ih=-0.1, num_bits=7, narrow_range=True, nudged_il=-63.0, nudged_ih=0.0,
expected_step=0.5),
# with7BitsNarrowRangeScalingAndNudgingBetween
dict(il=-0.1, ih=62.9, num_bits=7, narrow_range=True, nudged_il=0.0, nudged_ih=63.0, expected_step=0.5)]
dict(il=-0.1, ih=62.9, num_bits=7, narrow_range=True, nudged_il=0.0, nudged_ih=63.0,
expected_step=0.5)]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_fake_quantize(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_fake_quantize_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision, ir_version,
kwargs_to_prepare_input=params, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_fake_quantize(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_fake_quantize_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend), ie_device,
precision, ir_version,
kwargs_to_prepare_input=params, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -19,7 +19,8 @@ class TestGather(CommonTFLayerTest):
with tf.compat.v1.Session() as sess:
data = tf.compat.v1.placeholder(tf.float32, data_shape, 'data')
indices = tf.constant(indices, dtype=tf.int32)
gather = tf.gather(data, indices, axis=axis, batch_dims=batch_dims, name='gather_output')
gather = tf.gather(data, indices, axis=axis, batch_dims=batch_dims,
name='gather_output')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
@@ -36,26 +37,33 @@ class TestGather(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_gather(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_gather_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_gather(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_gather_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_nightly = [
dict(data_shape=[2, 3], axis=1, indices=[0, 2], batch_dims=0),
dict(data_shape=[10, 12], axis=0, indices=[3, 6], batch_dims=0),
dict(data_shape=[10, 12], axis=1, indices=[[0, 1, 3, 4, 5], [6, 7, 9, 10, 11]], batch_dims=0),
dict(data_shape=[10, 12], axis=1, indices=[[0, 1, 3, 4, 5], [6, 7, 9, 10, 11]],
batch_dims=0),
dict(data_shape=[8, 10, 12], axis=0, indices=[3, 6], batch_dims=0),
dict(data_shape=[8, 10, 12], axis=-1, indices=[5, 8], batch_dims=0),
dict(data_shape=[6, 8, 10, 12], axis=0, indices=[2, 5], batch_dims=0),
dict(data_shape=[6, 8, 10, 12], axis=-1, indices=[5, 8], batch_dims=0),
dict(data_shape=[6, 8, 10, 12], axis=2, indices=[[0, 2, 4], [5, 7, 9]], batch_dims=0),
dict(data_shape=[2, 14, 10, 12], axis=1, indices=[[0, 1, 3, 4, 5], [6, 7, 9, 10, 11]], batch_dims=1),
dict(data_shape=[2, 14, 10, 12], axis=1, indices=[[0, 1, 3, 4, 5], [6, 7, 9, 10, 11]],
batch_dims=1),
dict(data_shape=[4, 6, 8, 10, 12], axis=0, indices=[1, 3], batch_dims=0),
dict(data_shape=[4, 6, 8, 10, 12], axis=-1, indices=[5, 8], batch_dims=0),
]
@pytest.mark.parametrize("params", test_data_nightly)
@pytest.mark.nightly
def test_gather_nightly(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
def test_gather_nightly(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_gather_net(**params, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -2,10 +2,10 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
from unit_tests.utils.graph import build_graph
@@ -68,9 +68,12 @@ class TestIdentity(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_identity_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_identity_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_identity_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_identity_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data = [dict(shape=[1]),
dict(shape=[1, 224]),
@@ -80,6 +83,9 @@ class TestIdentity(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_identity(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_identity_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_identity(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_identity_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -8,6 +8,7 @@ import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph
@@ -122,9 +123,12 @@ class TestLogSoftmax(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_log_softmax_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_log_softmax_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data = [dict(shape=[1], reduction_axis=-1),
dict(shape=[2, 5], reduction_axis=-1),
@@ -133,6 +137,9 @@ class TestLogSoftmax(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_log_softmax(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_log_softmax(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_log_softmax_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -68,9 +68,12 @@ class TestMul(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_1D)
@pytest.mark.nightly
def test_mul_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_2D = [
# Power
@@ -78,34 +81,44 @@ class TestMul(CommonTFLayerTest):
# ScaleShift
dict(x_shape=[1, 3], y_shape=[1, 3]),
# Eltwise
pytest.param(dict(x_shape=[3, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19180")),
pytest.param(dict(x_shape=[3, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19180")),
# Eltwise
dict(x_shape=[2, 3], y_shape=[2, 3])
]
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_mul_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [
# Power
dict(x_shape=[1, 1, 1], y_shape=[1, 1, 1]),
# ScaleShift
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1, 3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1, 3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 3], y_shape=[1, 1, 3]),
marks=[pytest.mark.xfail(reason="*-19053"), pytest.mark.xfail(reason="*-18830")]),
marks=[pytest.mark.xfail(reason="*-19053"),
pytest.mark.xfail(reason="*-18830")]),
# Eltwise
pytest.param(dict(x_shape=[1, 3, 224], y_shape=[1, 3, 224]), marks=pytest.mark.xfail(reason="*-19053"))
pytest.param(dict(x_shape=[1, 3, 224], y_shape=[1, 3, 224]),
marks=pytest.mark.xfail(reason="*-19053"))
]
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_mul_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [
# Power
@@ -113,16 +126,20 @@ class TestMul(CommonTFLayerTest):
# ScaleShift
dict(x_shape=[1, 3, 1, 1], y_shape=[1, 3, 1, 1]),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 1, 3], y_shape=[1, 1, 1, 3]), marks=pytest.mark.xfail(reason="*-19180")),
pytest.param(dict(x_shape=[1, 1, 1, 3], y_shape=[1, 1, 1, 3]),
marks=pytest.mark.xfail(reason="*-19180")),
# Eltwise
dict(x_shape=[1, 3, 222, 224], y_shape=[1, 3, 222, 224])
]
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_mul_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
# Power
@@ -139,9 +156,12 @@ class TestMul(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_mul_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
###############################################################################################
# #
@@ -155,9 +175,12 @@ class TestMul(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_broadcast_1D)
@pytest.mark.nightly
def test_mul_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_2D = [
# Power
@@ -169,38 +192,51 @@ class TestMul(CommonTFLayerTest):
# Eltwise
dict(x_shape=[3, 1], y_shape=[3]),
# Eltwise
pytest.param(dict(x_shape=[3, 1], y_shape=[1, 3, 1, 1]), marks=pytest.mark.xfail(reason="*-19051"))
pytest.param(dict(x_shape=[3, 1], y_shape=[1, 3, 1, 1]),
marks=pytest.mark.xfail(reason="*-19051"))
]
@pytest.mark.parametrize("params", test_data_broadcast_2D)
@pytest.mark.nightly
def test_mul_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_3D = [
# Power
dict(x_shape=[1, 1, 1], y_shape=[1]),
# Power
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1]),
marks=pytest.mark.xfail(reason="*-19053")),
# ScaleShift
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 1, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[3, 1, 224], y_shape=[1, 3, 224]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[3, 1, 224], y_shape=[1, 3, 224]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[2, 3, 1], y_shape=[1, 3, 2]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[2, 3, 1], y_shape=[1, 3, 2]),
marks=pytest.mark.xfail(reason="*-19053")),
]
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_broadcast_3D)
@pytest.mark.nightly
def test_mul_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_4D = [
# Power
@@ -228,9 +264,12 @@ class TestMul(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_broadcast_4D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_mul_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_5D = [
# Power
@@ -255,6 +294,9 @@ class TestMul(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_broadcast_5D)
@pytest.mark.nightly
def test_mul_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_mul_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_mul_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -3,11 +3,11 @@
import numpy as np
import pytest
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph
from common.utils.tf_utils import permute_nchw_to_nhwc, permute_nchw_to_nhwc
class TestNormalizeL2(CommonTFLayerTest):
@@ -39,7 +39,8 @@ class TestNormalizeL2(CommonTFLayerTest):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'axes_input_data': {'shape': int64_array([len(axes)]), 'kind': 'data', 'value': int64_array(output_axes)},
'axes_input_data': {'shape': int64_array([len(axes)]), 'kind': 'data',
'value': int64_array(output_axes)},
'axes': {'kind': 'op', 'type': 'Const'},
'axes_data': {'shape': int64_array([len(axes)]), 'kind': 'data'},
'normalize_l2': {'kind': 'op', 'type': 'NormalizeL2'},
@@ -75,7 +76,8 @@ class TestNormalizeL2(CommonTFLayerTest):
'input': {'kind': 'op', 'type': 'Parameter'},
'input_data': {'shape': shape, 'kind': 'data'},
'power_const_input_data': {'shape': int64_array([1]), 'kind': 'data', 'value': np.array([2.0])},
'power_const_input_data': {'shape': int64_array([1]), 'kind': 'data',
'value': np.array([2.0])},
'power_const': {'kind': 'op', 'type': 'Const'},
'power_const_data': {'shape': eltwise_shapes, 'kind': 'data'},
'power': {'kind': 'op', 'type': 'Power'},
@@ -88,13 +90,15 @@ class TestNormalizeL2(CommonTFLayerTest):
'reduce_axes': {'kind': 'op', 'type': 'Const'},
'reduce_axes_data': {'shape': int64_array([len(axes)]), 'kind': 'data'},
'maximum_const_input_data': {'shape': int64_array([1]), 'kind': 'data', 'value': np.array([1e-12])},
'maximum_const_input_data': {'shape': int64_array([1]), 'kind': 'data',
'value': np.array([1e-12])},
'maximum_const': {'kind': 'op', 'type': 'Const'},
'maximum_const_data': {'shape': eltwise_shapes, 'kind': 'data'},
'maximum': {'kind': 'op', 'type': 'Maximum'},
'maximum_data': {'shape': reduced_shape, 'kind': 'data'},
'power2_const_input_data': {'shape': int64_array([1]), 'kind': 'data', 'value': np.array([-0.5])},
'power2_const_input_data': {'shape': int64_array([1]), 'kind': 'data',
'value': np.array([-0.5])},
'power2_const': {'kind': 'op', 'type': 'Const'},
'power2_const_data': {'shape': eltwise_shapes, 'kind': 'data'},
'power2': {'kind': 'op', 'type': 'Power'},
@@ -151,9 +155,12 @@ class TestNormalizeL2(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_fusable_precommit)
@pytest.mark.precommit
def test_NormalizeL2_fusable_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_normalize_l2_net_fusable(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_NormalizeL2_fusable_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_normalize_l2_net_fusable(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_non_fusable_precommit = [
pytest.param(dict(shape=[2, 3, 5], axes=[0, 1, 2], output_axes=[0, 1, 2]),
@@ -166,10 +173,12 @@ class TestNormalizeL2(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_non_fusable_precommit)
@pytest.mark.precommit
def test_NormalizeL2_non_fusable_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_normalize_l2_net_non_fusable(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_NormalizeL2_non_fusable_precommit(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_normalize_l2_net_non_fusable(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_fusable = [
dict(shape=[5, 6], axes=[1], output_axes=[1]),
@@ -182,9 +191,12 @@ class TestNormalizeL2(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_fusable)
@pytest.mark.nightly
def test_NormalizeL2_fusable(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_normalize_l2_net_fusable(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_NormalizeL2_fusable(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_normalize_l2_net_fusable(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_non_fusable = [
dict(shape=[5], axes=[0], output_axes=[0]),
@@ -205,7 +217,9 @@ class TestNormalizeL2(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_non_fusable)
@pytest.mark.nightly
def test_NormalizeL2_non_fusable(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_normalize_l2_net_non_fusable(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_NormalizeL2_non_fusable(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_normalize_l2_net_non_fusable(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -1,7 +1,6 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
@@ -40,11 +39,11 @@ class TestOneHot(CommonTFLayerTest):
indices = tf.compat.v1.placeholder(tf.int32, shape=net_shape, name='input_indices')
result = tf.one_hot(indices,
depth,
on_value,
off_value,
axis,
name='Operation')
depth,
on_value,
off_value,
axis,
name='Operation')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
@@ -64,9 +63,12 @@ class TestOneHot(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_1D)
@pytest.mark.nightly
def test_OneHot_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_OneHot_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_2D = [
dict(shape=[5, 6], depth=7, on_value=None, off_value=None, axis=None),
@@ -83,9 +85,12 @@ class TestOneHot(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_OneHot_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_OneHot_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [
dict(shape=[5, 6, 7], depth=8, on_value=None, off_value=None, axis=None),
@@ -103,9 +108,12 @@ class TestOneHot(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_OneHot_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_OneHot_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [
dict(shape=[5, 6, 7, 8], depth=9, on_value=None, off_value=None, axis=None),
@@ -125,9 +133,12 @@ class TestOneHot(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_OneHot_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_OneHot_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
dict(shape=[4, 5, 6, 7, 8], depth=9, on_value=None, off_value=None, axis=None),
@@ -147,6 +158,9 @@ class TestOneHot(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_OneHot_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_OneHot_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_one_hot_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -4,11 +4,13 @@
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from unit_tests.utils.graph import build_graph
class TestPooling(CommonTFLayerTest):
def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, method, ir_version, use_new_frontend):
def create_pooling_net(self, kernel_size, strides, pads, in_shape, out_shape, method,
ir_version, use_new_frontend):
"""
Tensorflow net IR net
@@ -38,9 +40,11 @@ class TestPooling(CommonTFLayerTest):
kernel = [1, kernel_size[0], kernel_size[1], 1]
if method == 'max':
tf.nn.max_pool2d(input=input, ksize=kernel, strides=stride, padding=padding, name='Operation')
tf.nn.max_pool2d(input=input, ksize=kernel, strides=stride, padding=padding,
name='Operation')
elif method == 'avg':
tf.nn.avg_pool2d(input=input, ksize=kernel, strides=stride, padding=padding, name='Operation')
tf.nn.avg_pool2d(input=input, ksize=kernel, strides=stride, padding=padding,
name='Operation')
# 5D tensors
elif len(in_shape) == 5:
@@ -51,9 +55,11 @@ class TestPooling(CommonTFLayerTest):
kernel = [1, kernel_size[0], kernel_size[1], kernel_size[2], 1]
if method == 'max':
tf.nn.max_pool3d(input, kernel, stride, padding, name='Operation') # , data_format='NCHW')
tf.nn.max_pool3d(input, kernel, stride, padding,
name='Operation') # , data_format='NCHW')
elif method == 'avg':
tf.nn.avg_pool3d(input, kernel, stride, padding, name='Operation') # , data_format='NCHW')
tf.nn.avg_pool3d(input, kernel, stride, padding,
name='Operation') # , data_format='NCHW')
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
@@ -99,138 +105,161 @@ class TestPooling(CommonTFLayerTest):
test_data_4D = []
for method in ['max', 'avg']:
test_data_4D.extend([dict(kernel_size=[1, 1], strides=[1, 1], pads=[[0, 0], [0, 0], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 224, 224], method=method),
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 224, 224],
method=method),
dict(kernel_size=[2, 2], strides=[2, 2], pads=[[0, 0], [0, 0], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112], method=method),
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112],
method=method),
dict(kernel_size=[2, 4], strides=[2, 4], pads=[[0, 0], [0, 0], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 56], method=method),
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 56],
method=method),
dict(kernel_size=[4, 2], strides=[4, 2], pads=[[0, 0], [0, 0], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 56, 112], method=method),
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 56, 112],
method=method),
dict(kernel_size=[2, 3], strides=[2, 3], pads=[[0, 0], [0, 1], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 75], method=method),
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 75],
method=method),
dict(kernel_size=[3, 2], strides=[3, 2], pads=[[0, 0], [1, 0], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 75, 112], method=method),
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 75, 112],
method=method),
dict(kernel_size=[3, 3], strides=[2, 2], pads=[[0, 0], [1, 1], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112], method=method),
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112],
method=method),
dict(kernel_size=[3, 2], strides=[2, 2], pads=[[0, 0], [1, 0], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112], method=method),
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112],
method=method),
dict(kernel_size=[2, 3], strides=[2, 3], pads=[[0, 0], [0, 1], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 75], method=method),
dict(kernel_size=[111, 111], strides=[111, 111], pads=[[54, 54], [55, 55], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 75],
method=method),
dict(kernel_size=[111, 111], strides=[111, 111],
pads=[[54, 54], [55, 55], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 3, 3], method=method),
dict(kernel_size=[111, 113], strides=[111, 113], pads=[[54, 1], [55, 1], 'SAME'],
dict(kernel_size=[111, 113], strides=[111, 113],
pads=[[54, 1], [55, 1], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 3, 2], method=method),
dict(kernel_size=[113, 113], strides=[113, 113], pads=[[1, 1], [1, 1], 'SAME'],
dict(kernel_size=[113, 113], strides=[113, 113],
pads=[[1, 1], [1, 1], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 2, 2], method=method),
dict(kernel_size=[113, 113], strides=[111, 111], pads=[[55, 55], [56, 56], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 3, 3], method=method)])
dict(kernel_size=[113, 113], strides=[111, 111],
pads=[[55, 55], [56, 56], 'SAME'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 3, 3],
method=method)])
test_data_4D.extend([dict(kernel_size=[1, 1], strides=[1, 1], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 224, 224], method=method),
dict(kernel_size=[2, 2], strides=[2, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112], method=method),
dict(kernel_size=[2, 4], strides=[2, 4], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 56], method=method),
dict(kernel_size=[4, 2], strides=[4, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 56, 112], method=method),
dict(kernel_size=[2, 3], strides=[2, 3], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 74], method=method),
dict(kernel_size=[3, 2], strides=[3, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 74, 112], method=method),
dict(kernel_size=[3, 3], strides=[2, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 111, 111], method=method),
dict(kernel_size=[3, 2], strides=[2, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 111, 112], method=method),
dict(kernel_size=[2, 3], strides=[2, 3], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 74], method=method),
dict(kernel_size=[111, 111], strides=[111, 111], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 2, 2], method=method),
dict(kernel_size=[111, 113], strides=[111, 113], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 2, 1], method=method),
dict(kernel_size=[113, 113], strides=[113, 113], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 1, 1], method=method),
dict(kernel_size=[113, 113], strides=[111, 111], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 2, 2], method=method),
dict(kernel_size=[224, 224], strides=[1, 1], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 1, 1], method=method)])
test_data_4D.extend(
[dict(kernel_size=[1, 1], strides=[1, 1], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 224, 224], method=method),
dict(kernel_size=[2, 2], strides=[2, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 112], method=method),
dict(kernel_size=[2, 4], strides=[2, 4], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 56], method=method),
dict(kernel_size=[4, 2], strides=[4, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 56, 112], method=method),
dict(kernel_size=[2, 3], strides=[2, 3], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 74], method=method),
dict(kernel_size=[3, 2], strides=[3, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 74, 112], method=method),
dict(kernel_size=[3, 3], strides=[2, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 111, 111], method=method),
dict(kernel_size=[3, 2], strides=[2, 2], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 111, 112], method=method),
dict(kernel_size=[2, 3], strides=[2, 3], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 112, 74], method=method),
dict(kernel_size=[111, 111], strides=[111, 111], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 2, 2], method=method),
dict(kernel_size=[111, 113], strides=[111, 113], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 2, 1], method=method),
dict(kernel_size=[113, 113], strides=[113, 113], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 1, 1], method=method),
dict(kernel_size=[113, 113], strides=[111, 111], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 2, 2], method=method),
dict(kernel_size=[224, 224], strides=[1, 1], pads=[[0, 0], [0, 0], 'VALID'],
in_shape=[1, 3, 224, 224], out_shape=[1, 3, 1, 1], method=method)])
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_pooling_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_pool_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_pooling_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = []
for method in ['max', 'avg']:
test_data_5D.extend([dict(kernel_size=[1, 1, 1], strides=[1, 1, 1], pads=[[0, 0, 0], [0, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 224, 224, 224], method=method),
dict(kernel_size=[2, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method),
dict(kernel_size=[2, 2, 4], strides=[2, 2, 4], pads=[[0, 0, 0], [0, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 56], method=method),
dict(kernel_size=[4, 2, 2], strides=[4, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 56, 112, 112], method=method),
dict(kernel_size=[2, 2, 3], strides=[2, 2, 3], pads=[[0, 0, 0], [0, 0, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 75], method=method),
dict(kernel_size=[3, 2, 2], strides=[3, 2, 2], pads=[[0, 0, 0], [1, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 75, 112, 112], method=method),
dict(kernel_size=[3, 3, 3], strides=[2, 2, 2], pads=[[0, 0, 0], [1, 1, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method),
dict(kernel_size=[3, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [1, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method),
dict(kernel_size=[2, 2, 3], strides=[2, 2, 3], pads=[[0, 0, 0], [0, 0, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 75], method=method),
dict(kernel_size=[111, 111, 111], strides=[111, 111, 111],
pads=[[54, 54, 54], [55, 55, 55], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 3, 3, 3], method=method),
dict(kernel_size=[111, 111, 113], strides=[111, 111, 113],
pads=[[54, 54, 1], [55, 55, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 3, 3, 2], method=method),
dict(kernel_size=[113, 113, 113], strides=[113, 113, 113],
pads=[[1, 1, 1], [1, 1, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 2, 2, 2], method=method),
dict(kernel_size=[113, 113, 113], strides=[111, 111, 111],
pads=[[55, 55, 55], [56, 56, 56], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 3, 3, 3], method=method)])
test_data_5D.extend(
[dict(kernel_size=[1, 1, 1], strides=[1, 1, 1], pads=[[0, 0, 0], [0, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 224, 224, 224], method=method),
dict(kernel_size=[2, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method),
dict(kernel_size=[2, 2, 4], strides=[2, 2, 4], pads=[[0, 0, 0], [0, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 56], method=method),
dict(kernel_size=[4, 2, 2], strides=[4, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 56, 112, 112], method=method),
dict(kernel_size=[2, 2, 3], strides=[2, 2, 3], pads=[[0, 0, 0], [0, 0, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 75], method=method),
dict(kernel_size=[3, 2, 2], strides=[3, 2, 2], pads=[[0, 0, 0], [1, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 75, 112, 112], method=method),
dict(kernel_size=[3, 3, 3], strides=[2, 2, 2], pads=[[0, 0, 0], [1, 1, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method),
dict(kernel_size=[3, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [1, 0, 0], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method),
dict(kernel_size=[2, 2, 3], strides=[2, 2, 3], pads=[[0, 0, 0], [0, 0, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 75], method=method),
dict(kernel_size=[111, 111, 111], strides=[111, 111, 111],
pads=[[54, 54, 54], [55, 55, 55], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 3, 3, 3], method=method),
dict(kernel_size=[111, 111, 113], strides=[111, 111, 113],
pads=[[54, 54, 1], [55, 55, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 3, 3, 2], method=method),
dict(kernel_size=[113, 113, 113], strides=[113, 113, 113],
pads=[[1, 1, 1], [1, 1, 1], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 2, 2, 2], method=method),
dict(kernel_size=[113, 113, 113], strides=[111, 111, 111],
pads=[[55, 55, 55], [56, 56, 56], 'SAME'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 3, 3, 3], method=method)])
test_data_5D.extend([dict(kernel_size=[1, 1, 1], strides=[1, 1, 1], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 224, 224, 224], method=method),
dict(kernel_size=[2, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method),
dict(kernel_size=[2, 2, 4], strides=[2, 2, 4], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 56], method=method),
dict(kernel_size=[4, 2, 2], strides=[4, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 56, 112, 112], method=method),
dict(kernel_size=[2, 2, 3], strides=[2, 2, 3], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 74], method=method),
dict(kernel_size=[3, 2, 2], strides=[3, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 74, 112, 112], method=method),
dict(kernel_size=[3, 3, 3], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 111, 111, 111], method=method),
dict(kernel_size=[3, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 111, 112, 112], method=method),
dict(kernel_size=[2, 2, 3], strides=[2, 2, 3], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 74], method=method),
dict(kernel_size=[111, 111, 111], strides=[111, 111, 111],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 2, 2, 2], method=method),
dict(kernel_size=[111, 111, 113], strides=[111, 111, 113],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 2, 2, 1], method=method),
dict(kernel_size=[113, 113, 113], strides=[113, 113, 113],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 1, 1, 1], method=method),
dict(kernel_size=[113, 113, 113], strides=[111, 111, 111],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 2, 2, 2], method=method),
dict(kernel_size=[224, 224, 224], strides=[1, 1, 1],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 1, 1, 1], method=method)])
test_data_5D.extend(
[dict(kernel_size=[1, 1, 1], strides=[1, 1, 1], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 224, 224, 224], method=method),
dict(kernel_size=[2, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 112], method=method),
dict(kernel_size=[2, 2, 4], strides=[2, 2, 4], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 56], method=method),
dict(kernel_size=[4, 2, 2], strides=[4, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 56, 112, 112], method=method),
dict(kernel_size=[2, 2, 3], strides=[2, 2, 3], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 74], method=method),
dict(kernel_size=[3, 2, 2], strides=[3, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 74, 112, 112], method=method),
dict(kernel_size=[3, 3, 3], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 111, 111, 111], method=method),
dict(kernel_size=[3, 2, 2], strides=[2, 2, 2], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 111, 112, 112], method=method),
dict(kernel_size=[2, 2, 3], strides=[2, 2, 3], pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 112, 112, 74], method=method),
dict(kernel_size=[111, 111, 111], strides=[111, 111, 111],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 2, 2, 2], method=method),
dict(kernel_size=[111, 111, 113], strides=[111, 111, 113],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 2, 2, 1], method=method),
dict(kernel_size=[113, 113, 113], strides=[113, 113, 113],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 1, 1, 1], method=method),
dict(kernel_size=[113, 113, 113], strides=[111, 111, 111],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 2, 2, 2], method=method),
dict(kernel_size=[224, 224, 224], strides=[1, 1, 1],
pads=[[0, 0, 0], [0, 0, 0], 'VALID'],
in_shape=[1, 3, 224, 224, 224], out_shape=[1, 3, 1, 1, 1], method=method)])
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_pool_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
def test_pool_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
if ie_device == 'GPU':
pytest.skip("5D tensors is not supported on GPU")
self._test(*self.create_pooling_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
self._test(*self.create_pooling_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -3,17 +3,18 @@
import pytest
import tensorflow as tf
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, connect, \
shaped_data, connect_front
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, connect, \
shaped_data, connect_front
class TestTFRandomUniform(CommonTFLayerTest):
def create_tf_random_uniform_net(self, global_seed, op_seed, x_shape, min_val, max_val, input_type, precision,
def create_tf_random_uniform_net(self, global_seed, op_seed, x_shape, min_val, max_val,
input_type, precision,
ir_version, use_new_frontend):
tf.compat.v1.reset_default_graph()
@@ -26,7 +27,8 @@ class TestTFRandomUniform(CommonTFLayerTest):
x = tf.compat.v1.placeholder(input_type, tf_x_shape, 'Input')
if global_seed is not None:
tf.compat.v1.random.set_random_seed(global_seed)
random_uniform = tf.random.uniform(tf_x_shape, seed=op_seed, dtype=input_type, minval=min_val,
random_uniform = tf.random.uniform(tf_x_shape, seed=op_seed, dtype=input_type,
minval=min_val,
maxval=max_val) + x
tf.compat.v1.global_variables_initializer()
@@ -46,7 +48,8 @@ class TestTFRandomUniform(CommonTFLayerTest):
nodes_attributes = {
**regular_op_with_shaped_data('input', x_shape, {'type': 'Parameter'}),
**const_for_layer_tests('shape', x_shape, int64_array([len(x_shape)]), int64_array([len(x_shape)])),
**const_for_layer_tests('shape', x_shape, int64_array([len(x_shape)]),
int64_array([len(x_shape)])),
**const_for_layer_tests('min_val', min_val, int64_array([]), int64_array([1])),
**const_for_layer_tests('max_val', max_val, int64_array([]), int64_array([1])),
**regular_op_with_shaped_data('random_uniform', x_shape, {'type': 'RandomUniform'}),
@@ -59,8 +62,10 @@ class TestTFRandomUniform(CommonTFLayerTest):
if precision == 'FP16' and input_type == tf.float32:
ref_net = build_graph(nodes_attributes,
[*connect_const_for_layer_tests('shape', '0:random_uniform'),
*connect_const_for_layer_tests('min_val', '1:random_uniform'),
*connect_const_for_layer_tests('max_val', '2:random_uniform'),
*connect_const_for_layer_tests('min_val',
'1:random_uniform'),
*connect_const_for_layer_tests('max_val',
'2:random_uniform'),
*connect('random_uniform', 'convert'),
*connect('convert', '0:add'),
*connect('input', '1:add'),
@@ -68,8 +73,10 @@ class TestTFRandomUniform(CommonTFLayerTest):
else:
ref_net = build_graph(nodes_attributes,
[*connect_const_for_layer_tests('shape', '0:random_uniform'),
*connect_const_for_layer_tests('min_val', '1:random_uniform'),
*connect_const_for_layer_tests('max_val', '2:random_uniform'),
*connect_const_for_layer_tests('min_val',
'1:random_uniform'),
*connect_const_for_layer_tests('max_val',
'2:random_uniform'),
*connect('random_uniform', '0:add'),
*connect('input', '1:add'),
*connect('add', 'result')])
@@ -77,19 +84,28 @@ class TestTFRandomUniform(CommonTFLayerTest):
return tf_net, ref_net
test_data = [pytest.param(
dict(global_seed=32465, op_seed=48971, min_val=0.0, max_val=1.0, x_shape=[3, 7], input_type=tf.float32),
dict(global_seed=32465, op_seed=48971, min_val=0.0, max_val=1.0, x_shape=[3, 7],
input_type=tf.float32),
marks=pytest.mark.precommit),
dict(global_seed=None, op_seed=56197, min_val=-100, max_val=100, x_shape=[6], input_type=tf.float32),
dict(global_seed=None, op_seed=56197, min_val=-100, max_val=100, x_shape=[1, 2, 1, 1], input_type=tf.float32),
dict(global_seed=78132, op_seed=None, min_val=-200, max_val=-50, x_shape=[5, 8], input_type=tf.int32),
dict(global_seed=4571, op_seed=48971, min_val=1.5, max_val=2.3, x_shape=[7], input_type=tf.float32),
dict(global_seed=32465, op_seed=12335, min_val=-150, max_val=-100, x_shape=[18], input_type=tf.int32)]
dict(global_seed=None, op_seed=56197, min_val=-100, max_val=100, x_shape=[6],
input_type=tf.float32),
dict(global_seed=None, op_seed=56197, min_val=-100, max_val=100, x_shape=[1, 2, 1, 1],
input_type=tf.float32),
dict(global_seed=78132, op_seed=None, min_val=-200, max_val=-50, x_shape=[5, 8],
input_type=tf.int32),
dict(global_seed=4571, op_seed=48971, min_val=1.5, max_val=2.3, x_shape=[7],
input_type=tf.float32),
dict(global_seed=32465, op_seed=12335, min_val=-150, max_val=-100, x_shape=[18],
input_type=tf.int32)]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_tf_random_uniform(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
def test_tf_random_uniform(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
if ie_device == 'GPU':
pytest.skip("RandomUniform is not supported on GPU")
self._test(*self.create_tf_random_uniform_net(**params, precision=precision, ir_version=ir_version,
use_new_frontend=use_new_frontend), ie_device,
precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend, **params)
self._test(
*self.create_tf_random_uniform_net(**params, precision=precision, ir_version=ir_version,
use_new_frontend=use_new_frontend), ie_device,
precision, temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend,
api_2=api_2, **params)

View File

@@ -2,10 +2,10 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
from unit_tests.utils.graph import build_graph
@@ -68,9 +68,12 @@ class TestReLU6(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_relu6_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_relu6_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_relu6_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_relu6_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data = [dict(shape=[1]),
dict(shape=[1, 224]),
@@ -80,6 +83,9 @@ class TestReLU6(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_relu6(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_relu6_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_relu6(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_relu6_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -1,9 +1,8 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
import numpy as np
import pytest
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
@@ -37,27 +36,33 @@ class TestReduceOps(CommonTFLayerTest):
test_data = []
for operation in ['sum', 'max', 'prod', 'min', 'mean']:
test_data.extend([
dict(shape=[2, 3], operation=operation, axis=1),
dict(shape=[2, 3, 5], operation=operation, axis=-2),
dict(shape=[2, 3, 5, 7], operation=operation, axis=2),
dict(shape=[2, 3, 5, 7, 9], operation=operation, axis=[2, -1]),
])
dict(shape=[2, 3], operation=operation, axis=1),
dict(shape=[2, 3, 5], operation=operation, axis=-2),
dict(shape=[2, 3, 5, 7], operation=operation, axis=2),
dict(shape=[2, 3, 5, 7, 9], operation=operation, axis=[2, -1]),
])
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.nightly
def test_reduce(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_reduce_net(**params, keep_dims=keep_dims, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_reduce(self, params, keep_dims, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_reduce_net(**params, keep_dims=keep_dims, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_pre_commit = []
for operation in ['sum', 'max', 'prod', 'min', 'mean']:
test_data_pre_commit.extend([dict(shape=[2, 3, 5, 7], operation=operation, axis=-2),
])
])
@pytest.mark.parametrize("params", test_data_pre_commit)
@pytest.mark.parametrize("keep_dims", [False])
@pytest.mark.precommit
def test_reduce_precommit(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_reduce_net(**params, keep_dims=keep_dims, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_reduce_precommit(self, params, keep_dims, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_reduce_net(**params, keep_dims=keep_dims, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -3,8 +3,8 @@
import numpy as np
import pytest
from common.tf_layer_test_class import CommonTFLayerTest
from unit_tests.utils.graph import build_graph
@@ -58,7 +58,8 @@ class TestResamplePattern(CommonTFLayerTest):
nodes_attributes = {
'input': {'kind': 'op', 'type': 'Input'},
'input_data': {'shape': shape, 'kind': 'data'},
'resample': {'kind': 'op', 'type': 'caffe.ResampleParameter.NEAREST', "factor": factor,
'resample': {'kind': 'op', 'type': 'caffe.ResampleParameter.NEAREST',
"factor": factor,
"height": 0, "width": 0, "antialias": 0},
'resample_data': {'shape': new_shape, 'kind': 'data'},
}
@@ -78,6 +79,8 @@ class TestResamplePattern(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
@pytest.mark.xfail(reason="*-22273")
def test_resample(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
def test_resample(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_resample_net(params['shape'], params['factor'], use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -1,9 +1,8 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
import numpy as np
import pytest
from common.tf_layer_test_class import CommonTFLayerTest
@@ -30,27 +29,28 @@ class TestReverseV2Ops(CommonTFLayerTest):
test_data = []
test_data.extend([
dict(shape=[5], axis=[0]),
dict(shape=[2, 3], axis=[1]),
dict(shape=[2, 3, 5], axis=[-2]),
dict(shape=[2, 3, 5, 7], axis=[0]),
])
dict(shape=[5], axis=[0]),
dict(shape=[2, 3], axis=[1]),
dict(shape=[2, 3, 5], axis=[-2]),
dict(shape=[2, 3, 5, 7], axis=[0]),
])
@pytest.mark.parametrize("params", test_data)
@pytest.mark.parametrize("keep_dims", [True, False])
@pytest.mark.nightly
def test_reversev2(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
def test_reversev2(self, params, keep_dims, ie_device, precision, ir_version, temp_dir, api_2):
self._test(*self.create_reversev2_net(**params, keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)
test_data_pre_commit = []
test_data_pre_commit.extend([dict(shape=[5], axis=[0]),
dict(shape=[2, 3, 5], axis=[-2])
])
])
@pytest.mark.parametrize("params", test_data_pre_commit)
@pytest.mark.parametrize("keep_dims", [True])
@pytest.mark.precommit
def test_reversev2_precommit(self, params, keep_dims, ie_device, precision, ir_version, temp_dir):
def test_reversev2_precommit(self, params, keep_dims, ie_device, precision, ir_version,
temp_dir, api_2):
self._test(*self.create_reversev2_net(**params, keep_dims=keep_dims, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir)
ie_device, precision, ir_version, temp_dir=temp_dir, api_2=api_2)

View File

@@ -31,15 +31,21 @@ class TestTFRoll(CommonTFLayerTest):
test_data = [dict(shift=[1], axis=[-1], x_shape=[4, 3], input_type=tf.float32),
dict(shift=[1, 5, -7], axis=[0, 1, 1], x_shape=[2, 3, 5], input_type=tf.float16),
dict(shift=[11, -8], axis=[-1, -2], x_shape=[3, 4, 3, 1], input_type=tf.int32),
dict(shift=[7, -2, 5], axis=[0, -1, -1], x_shape=[5, 2, 3, 7], input_type=tf.int64),
dict(shift=[7, -2, 5], axis=[0, -1, -1], x_shape=[5, 2, 3, 7],
input_type=tf.int64),
dict(shift=[3, 7], axis=[0, 1], x_shape=[2, 4, 3, 5, 4], input_type=tf.half),
pytest.param(dict(shift=[1, -2], axis=[0, 1], x_shape=[2, 4, 3, 5], input_type=tf.float32),
marks=pytest.mark.precommit)]
pytest.param(
dict(shift=[1, -2], axis=[0, 1], x_shape=[2, 4, 3, 5], input_type=tf.float32),
marks=pytest.mark.precommit)]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_tf_roll(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
def test_tf_roll(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
if ie_device == 'GPU':
pytest.skip("Roll is not supported on GPU")
self._test(*self.create_tf_roll_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend), ie_device, precision,
temp_dir=temp_dir, ir_version=ir_version, **params, use_new_frontend=use_new_frontend)
self._test(*self.create_tf_roll_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend), ie_device,
precision,
temp_dir=temp_dir, ir_version=ir_version, use_new_frontend=use_new_frontend,
api_2=api_2, **params)

View File

@@ -56,9 +56,12 @@ class TestRsqrt(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_rsqrt_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_rsqrt_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_rsqrt_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_rsqrt_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data = [dict(shape=[1]),
dict(shape=[1, 224]),
@@ -68,6 +71,9 @@ class TestRsqrt(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_rsqrt(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_rsqrt_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_rsqrt(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_rsqrt_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -1,19 +1,15 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array
from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, connect, \
shaped_data, connect_front
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
class TestTFScatterND(CommonTFLayerTest):
def create_tf_scatternd_placeholder_const_net(self, x_shape, indices, updates, ir_version, use_new_frontend):
def create_tf_scatternd_placeholder_const_net(self, x_shape, indices, updates, ir_version,
use_new_frontend):
#
# Create Tensorflow model
#
@@ -43,20 +39,27 @@ class TestTFScatterND(CommonTFLayerTest):
return tf_net, ref_net
test_data = [
pytest.param(dict(x_shape=[8], indices=[[4], [3], [1], [7]], updates=[9.0, 10.0, 11.0, 12.0]),
marks=pytest.mark.precommit),
pytest.param(
dict(x_shape=[8], indices=[[4], [3], [1], [7]], updates=[9.0, 10.0, 11.0, 12.0]),
marks=pytest.mark.precommit),
pytest.param(dict(x_shape=[4, 4, 4], indices=[[0], [2]], updates= \
[[[5.0, 5.0, 5.0, 5.0], [6.0, 6.0, 6.0, 6.0], [7.0, 7.0, 7.0, 7.0], [8.0, 8.0, 8.0, 8.0]],
[[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0], [3.0, 3.0, 3.0, 3.0], [4.0, 4.0, 4.0, 4.0]]])),
[[[5.0, 5.0, 5.0, 5.0], [6.0, 6.0, 6.0, 6.0], [7.0, 7.0, 7.0, 7.0],
[8.0, 8.0, 8.0, 8.0]],
[[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0], [3.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 4.0]]])),
pytest.param(dict(x_shape=[2, 2], indices=[[0]], updates=[[5.0, 3.0]])),
pytest.param(dict(x_shape=[2, 2], indices=[[1, 1]], updates=[5.0])),
dict(x_shape=[1], indices=[[0]], updates=[3.0]),
dict(x_shape=[20], indices=[[0], [6], [9], [19], [13]], updates=[3.0, 7.0, -12.0, 4.0, -99.0]),
dict(x_shape=[20], indices=[[0], [6], [9], [19], [13]],
updates=[3.0, 7.0, -12.0, 4.0, -99.0]),
dict(x_shape=[4, 2], indices=[[1], [2]], updates=[[9.0, 14.0], [-76.0, 0.0]]),
dict(x_shape=[4, 4, 4], indices=[[0], [1], [3]], updates=[
[[5.0, 1.0, 5.0, 13.0], [8.0, 6.0, 6.0, 8.0], [7.0, 0.0, 0.0, 7.0], [8.0, 8.0, 8.0, 8.0]],
[[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 10.0, 11.0, 12.0]],
[[5.0, 5.0, 5.0, 5.0], [6.0, 6.0, 6.0, 6.0], [7.0, 7.0, 7.0, 7.0], [8.0, 8.0, 8.0, 8.0]]]),
[[5.0, 1.0, 5.0, 13.0], [8.0, 6.0, 6.0, 8.0], [7.0, 0.0, 0.0, 7.0],
[8.0, 8.0, 8.0, 8.0]],
[[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]],
[[5.0, 5.0, 5.0, 5.0], [6.0, 6.0, 6.0, 6.0], [7.0, 7.0, 7.0, 7.0],
[8.0, 8.0, 8.0, 8.0]]]),
dict(x_shape=[2, 2, 2], indices=[[1, 1, 1], [0, 1, 0]], updates=[9.0, 6.3]),
dict(x_shape=[2, 2, 2], indices=[[0, 0], [0, 1]], updates=[[6.7, 9.0], [45.0, 8.3]]),
dict(x_shape=[2, 2, 2], indices=[[1]], updates=[[[6.7, 9.0], [45.0, 8.3]]]),
@@ -65,8 +68,9 @@ class TestTFScatterND(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_tf_scatter_nd(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
def test_tf_scatter_nd(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_tf_scatternd_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, temp_dir=temp_dir, ir_version=ir_version,
use_new_frontend=use_new_frontend, **params)
use_new_frontend=use_new_frontend, api_2=api_2, **params)

View File

@@ -56,9 +56,12 @@ class TestSelect(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_1D)
@pytest.mark.nightly
def test_select_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_select_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_select_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_2D = [
dict(shape_condition=[2], shape_input=[2, 3]),
@@ -67,9 +70,12 @@ class TestSelect(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_select_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_select_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_select_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [
dict(shape_condition=[3], shape_input=[3, 4, 5]),
@@ -78,9 +84,12 @@ class TestSelect(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_select_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_select_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_select_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [
dict(shape_condition=[3], shape_input=[3, 4, 5, 6]),
@@ -90,9 +99,12 @@ class TestSelect(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
@pytest.mark.precommit
def test_select_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_select_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_select_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
dict(shape_condition=[3], shape_input=[3, 4, 5, 6, 7]),
@@ -102,6 +114,9 @@ class TestSelect(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_select_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_select_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_select_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_select_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -7,7 +7,8 @@ from common.tf_layer_test_class import CommonTFLayerTest
class TestSpaceToBatch(CommonTFLayerTest):
def create_space_to_batch_net(self, in_shape, pads_value, block_shape_value, out_shape, ir_version, use_new_frontend):
def create_space_to_batch_net(self, in_shape, pads_value, block_shape_value, out_shape,
ir_version, use_new_frontend):
"""
Tensorflow net IR net
@@ -61,12 +62,16 @@ class TestSpaceToBatch(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_space_to_batch_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_space_to_batch_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_space_to_batch_4D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_space_to_batch_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
dict(in_shape=[3, 3, 4, 5, 2], block_shape_value=[3, 4, 2], pads_value=[[1, 2], [0, 0], [3, 0]],
dict(in_shape=[3, 3, 4, 5, 2], block_shape_value=[3, 4, 2],
pads_value=[[1, 2], [0, 0], [3, 0]],
out_shape=[72, 2, 1, 4, 2]),
# todo: enable these tests after supporting the general case on CPU
# dict(in_shape=[3, 3, 4, 5, 2], block_shape_value=[3, 4, 2, 2],
@@ -75,6 +80,9 @@ class TestSpaceToBatch(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_space_to_batch_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_space_to_batch_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_space_to_batch_5D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_space_to_batch_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -54,9 +54,12 @@ class TestSqueeze(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_1D)
@pytest.mark.nightly
def test_squeeze_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_squeeze_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_2D = [
pytest.param(dict(shape=[1, 1], axis=[]), marks=pytest.mark.xfail(reason="*-18807")),
@@ -66,13 +69,17 @@ class TestSqueeze(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_squeeze_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_squeeze_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [
pytest.param(dict(shape=[1, 1, 3], axis=[]),
marks=[pytest.mark.xfail(reason="*-18807"), pytest.mark.xfail(reason="*-19053")]),
marks=[pytest.mark.xfail(reason="*-18807"),
pytest.mark.xfail(reason="*-19053")]),
pytest.param(dict(shape=[1, 1, 3], axis=[0]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(shape=[1, 1, 3], axis=[-1]), marks=pytest.mark.xfail(reason="*-19053"))
]
@@ -80,12 +87,16 @@ class TestSqueeze(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_squeeze_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [
pytest.param(dict(shape=[1, 1, 50, 100], axis=[]), marks=pytest.mark.xfail(reason="*-18807")),
pytest.param(dict(shape=[1, 1, 50, 100], axis=[]),
marks=pytest.mark.xfail(reason="*-18807")),
dict(shape=[1, 1, 50, 100], axis=[0]),
dict(shape=[1, 1, 50, 100], axis=[-1]),
dict(shape=[1, 100, 50, 1], axis=[0, 2])
@@ -94,14 +105,20 @@ class TestSqueeze(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_squeeze_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
pytest.param(dict(shape=[1, 1, 50, 100, 224], axis=[]), marks=pytest.mark.xfail(reason="*-18807")),
pytest.param(dict(shape=[1, 1, 50, 100, 224], axis=[0]), marks=pytest.mark.xfail(reason="*-18879")),
pytest.param(dict(shape=[1, 1, 50, 100, 224], axis=[-1]), marks=pytest.mark.xfail(reason="*-18879")),
pytest.param(dict(shape=[1, 1, 50, 100, 224], axis=[]),
marks=pytest.mark.xfail(reason="*-18807")),
pytest.param(dict(shape=[1, 1, 50, 100, 224], axis=[0]),
marks=pytest.mark.xfail(reason="*-18879")),
pytest.param(dict(shape=[1, 1, 50, 100, 224], axis=[-1]),
marks=pytest.mark.xfail(reason="*-18879")),
dict(shape=[1, 224, 1, 100, 1], axis=[0, 3]),
dict(shape=[1, 224, 1, 100, 1], axis=[0, 1, 3]),
dict(shape=[1, 224, 1, 1, 100], axis=[0, 1, 2]),
@@ -109,12 +126,17 @@ class TestSqueeze(CommonTFLayerTest):
]
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.special_xfail(args={'ie_device': 'GPU', 'precision': 'FP16', 'params': {'axis': [0, 3]}},
reason="*-19394")
@pytest.mark.special_xfail(args={'ie_device': 'GPU', 'precision': 'FP16', 'params': {'axis': [0, 1, 3]}},
reason="*-19394")
@pytest.mark.special_xfail(
args={'ie_device': 'GPU', 'precision': 'FP16', 'params': {'axis': [0, 3]}},
reason="*-19394")
@pytest.mark.special_xfail(
args={'ie_device': 'GPU', 'precision': 'FP16', 'params': {'axis': [0, 1, 3]}},
reason="*-19394")
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_squeeze_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_squeeze_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -9,9 +9,9 @@ from common.tf_layer_test_class import CommonTFLayerTest
class TestStridedSlice(CommonTFLayerTest):
@staticmethod
def create_strided_slice_net(input_shape, begin, end, strides, begin_mask, end_mask, ellipsis_mask,
def create_strided_slice_net(input_shape, begin, end, strides, begin_mask, end_mask,
ellipsis_mask,
new_axis_mask, shrink_axis_mask, ir_version, use_new_frontend):
#
# Create Tensorflow model
#
@@ -21,9 +21,11 @@ class TestStridedSlice(CommonTFLayerTest):
with tf.compat.v1.Session() as sess:
input_node = tf.compat.v1.placeholder(tf.float32, input_shape, 'Input')
strided_slice = tf.compat.v1.strided_slice(input_node, begin=begin, end=end, strides=strides,
strided_slice = tf.compat.v1.strided_slice(input_node, begin=begin, end=end,
strides=strides,
begin_mask=begin_mask, end_mask=end_mask,
ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
tf.compat.v1.global_variables_initializer()
tf_net = sess.graph_def
@@ -42,27 +44,37 @@ class TestStridedSlice(CommonTFLayerTest):
end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=2),
dict(input_shape=[1, 5, 1], begin=[0, 0, 0], end=[1, 5, 1], strides=[1, 1, 1], begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=4),
dict(input_shape=[1, 5, 5, 3], begin=[0, 0, 0, 0], end=[1, 5, 5, 3], strides=[1, 1, 1, 1], begin_mask=0,
dict(input_shape=[1, 5, 5, 3], begin=[0, 0, 0, 0], end=[1, 5, 5, 3], strides=[1, 1, 1, 1],
begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=1),
dict(input_shape=[1, 1, 5, 3], begin=[0, 0, 0, 0], end=[1, 1, 5, 3], strides=[1, 1, 1, 1], begin_mask=0,
dict(input_shape=[1, 1, 5, 3], begin=[0, 0, 0, 0], end=[1, 1, 5, 3], strides=[1, 1, 1, 1],
begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=2),
dict(input_shape=[1, 5, 1, 3], begin=[0, 0, 0, 0], end=[1, 5, 1, 3], strides=[1, 1, 1, 1], begin_mask=0,
dict(input_shape=[1, 5, 1, 3], begin=[0, 0, 0, 0], end=[1, 5, 1, 3], strides=[1, 1, 1, 1],
begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=4),
dict(input_shape=[1, 5, 5, 1], begin=[0, 0, 0, 0], end=[1, 5, 1, 1], strides=[1, 1, 1, 1], begin_mask=0,
dict(input_shape=[1, 5, 5, 1], begin=[0, 0, 0, 0], end=[1, 5, 1, 1], strides=[1, 1, 1, 1],
begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=8),
dict(input_shape=[1, 1, 5, 5, 3], begin=[0, 0, 0, 0, 0], end=[1, 1, 5, 5, 3], strides=[1, 1, 1, 1, 1],
dict(input_shape=[1, 1, 5, 5, 3], begin=[0, 0, 0, 0, 0], end=[1, 1, 5, 5, 3],
strides=[1, 1, 1, 1, 1],
begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=3),
dict(input_shape=[1, 5, 1, 5, 3], begin=[0, 0, 0, 0, 0], end=[1, 5, 1, 5, 3], strides=[1, 1, 1, 1, 1],
dict(input_shape=[1, 5, 1, 5, 3], begin=[0, 0, 0, 0, 0], end=[1, 5, 1, 5, 3],
strides=[1, 1, 1, 1, 1],
begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=5),
dict(input_shape=[1, 5, 1, 5, 1], begin=[0, 0, 0, 0, 0], end=[1, 5, 1, 5, 1], strides=[1, 1, 1, 1, 1],
dict(input_shape=[1, 5, 1, 5, 1], begin=[0, 0, 0, 0, 0], end=[1, 5, 1, 5, 1],
strides=[1, 1, 1, 1, 1],
begin_mask=0, end_mask=0, ellipsis_mask=0, new_axis_mask=0, shrink_axis_mask=21),
]
@pytest.mark.parametrize('params', test_squeeze_data)
@pytest.mark.nightly
def test_strided_slice_replace_with_squeeze(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_strided_slice_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_strided_slice_replace_with_squeeze(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_strided_slice_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_unsqueeze_data = [
dict(input_shape=[1, 5], begin=[0, 0], end=[1, 5], strides=[1, 1], begin_mask=0,
@@ -75,16 +87,22 @@ class TestStridedSlice(CommonTFLayerTest):
end_mask=0, ellipsis_mask=0, new_axis_mask=4, shrink_axis_mask=0),
dict(input_shape=[1, 5, 3], begin=[0, 0, 0], end=[1, 5, 3], strides=[1, 1, 1], begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=5, shrink_axis_mask=0),
dict(input_shape=[1, 5, 5, 3], begin=[0, 0, 0, 0], end=[1, 5, 5, 3], strides=[1, 1, 1, 1], begin_mask=0,
dict(input_shape=[1, 5, 5, 3], begin=[0, 0, 0, 0], end=[1, 5, 5, 3], strides=[1, 1, 1, 1],
begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=8, shrink_axis_mask=0),
dict(input_shape=[1, 5, 5, 3], begin=[0, 0, 0, 0], end=[1, 5, 5, 3], strides=[1, 1, 1, 1], begin_mask=0,
dict(input_shape=[1, 5, 5, 3], begin=[0, 0, 0, 0], end=[1, 5, 5, 3], strides=[1, 1, 1, 1],
begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=4, shrink_axis_mask=0),
dict(input_shape=[1, 5, 5, 3], begin=[0, 0, 0, 0], end=[1, 5, 5, 3], strides=[1, 1, 1, 1], begin_mask=0,
dict(input_shape=[1, 5, 5, 3], begin=[0, 0, 0, 0], end=[1, 5, 5, 3], strides=[1, 1, 1, 1],
begin_mask=0,
end_mask=0, ellipsis_mask=0, new_axis_mask=2, shrink_axis_mask=0),
]
@pytest.mark.parametrize('params', test_unsqueeze_data)
@pytest.mark.nightly
def test_strided_slice_replace_with_unsqueeze(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_strided_slice_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_strided_slice_replace_with_unsqueeze(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_strided_slice_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -69,10 +69,12 @@ class TestSub(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_1D)
@pytest.mark.nightly
def test_sub_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_1D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_2D = [
# Power
@@ -80,37 +82,45 @@ class TestSub(CommonTFLayerTest):
# ScaleShift
dict(x_shape=[1, 3], y_shape=[1, 3]),
# Eltwise
pytest.param(dict(x_shape=[3, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19180")),
pytest.param(dict(x_shape=[3, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19180")),
# Eltwise
dict(x_shape=[2, 3], y_shape=[2, 3])
]
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_sub_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_2D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [
# Power
dict(x_shape=[1, 1, 1], y_shape=[1, 1, 1]),
# ScaleShift
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1, 3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1, 3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 3], y_shape=[1, 1, 3]),
marks=[pytest.mark.xfail(reason="*-19053"), pytest.mark.xfail(reason="*-18830")]),
marks=[pytest.mark.xfail(reason="*-19053"),
pytest.mark.xfail(reason="*-18830")]),
# Eltwise
pytest.param(dict(x_shape=[1, 3, 224], y_shape=[1, 3, 224]), marks=pytest.mark.xfail(reason="*-19053"))
pytest.param(dict(x_shape=[1, 3, 224], y_shape=[1, 3, 224]),
marks=pytest.mark.xfail(reason="*-19053"))
]
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_sub_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_3D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [
# Power
@@ -118,7 +128,8 @@ class TestSub(CommonTFLayerTest):
# ScaleShift
dict(x_shape=[1, 3, 1, 1], y_shape=[1, 3, 1, 1]),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 1, 3], y_shape=[1, 1, 1, 3]), marks=pytest.mark.xfail(reason="*-19180")),
pytest.param(dict(x_shape=[1, 1, 1, 3], y_shape=[1, 1, 1, 3]),
marks=pytest.mark.xfail(reason="*-19180")),
# Eltwise
dict(x_shape=[1, 3, 222, 224], y_shape=[1, 3, 222, 224])
]
@@ -126,10 +137,12 @@ class TestSub(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_sub_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_4D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
# Power
@@ -146,10 +159,12 @@ class TestSub(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_sub_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_5D(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
###############################################################################################
# #
@@ -163,10 +178,12 @@ class TestSub(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_broadcast_1D)
@pytest.mark.nightly
def test_sub_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_broadcast_1D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_2D = [
# Power
@@ -178,40 +195,51 @@ class TestSub(CommonTFLayerTest):
# Eltwise
dict(x_shape=[3, 1], y_shape=[3]),
# Eltwise
pytest.param(dict(x_shape=[3, 1], y_shape=[1, 3, 1, 1]), marks=pytest.mark.xfail(reason="*-19051"))
pytest.param(dict(x_shape=[3, 1], y_shape=[1, 3, 1, 1]),
marks=pytest.mark.xfail(reason="*-19051"))
]
@pytest.mark.parametrize("params", test_data_broadcast_2D)
@pytest.mark.nightly
def test_sub_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_broadcast_2D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_3D = [
# Power
dict(x_shape=[1, 1, 1], y_shape=[1]),
# Power
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[1]),
marks=pytest.mark.xfail(reason="*-19053")),
# ScaleShift
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 3, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[1, 1, 1], y_shape=[3, 1]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[1, 1, 1], y_shape=[3, 1]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[3, 1, 224], y_shape=[1, 3, 224]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[3, 1, 224], y_shape=[1, 3, 224]),
marks=pytest.mark.xfail(reason="*-19053")),
# Eltwise
pytest.param(dict(x_shape=[2, 3, 1], y_shape=[1, 3, 2]), marks=pytest.mark.xfail(reason="*-19053")),
pytest.param(dict(x_shape=[2, 3, 1], y_shape=[1, 3, 2]),
marks=pytest.mark.xfail(reason="*-19053")),
]
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_broadcast_3D)
@pytest.mark.nightly
def test_sub_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_broadcast_3D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_4D = [
# Power
@@ -240,10 +268,12 @@ class TestSub(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_broadcast_4D)
@pytest.mark.nightly
def test_sub_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_broadcast_4D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)
test_data_broadcast_5D = [
# Power
@@ -270,7 +300,9 @@ class TestSub(CommonTFLayerTest):
# TODO mark as precommit (after successfully passing in nightly)
@pytest.mark.parametrize("params", test_data_broadcast_5D)
@pytest.mark.nightly
def test_sub_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
def test_sub_placeholder_const_broadcast_5D(self, params, ie_device, precision, ir_version,
temp_dir, use_new_frontend, api_2):
self._test(*self.create_sub_placeholder_const_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version,
temp_dir=temp_dir, use_new_frontend=use_new_frontend)
temp_dir=temp_dir, use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -2,10 +2,10 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc
from unit_tests.utils.graph import build_graph
@@ -71,9 +71,12 @@ class TestSwish(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_precommit)
@pytest.mark.precommit
def test_swish_precommit(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_swish_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_swish_precommit(self, params, ie_device, precision, ir_version, temp_dir,
use_new_frontend, api_2):
self._test(*self.create_swish_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data = [dict(shape=[1]),
dict(shape=[1, 224]),
@@ -83,6 +86,9 @@ class TestSwish(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
def test_swish(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_swish_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_swish(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_swish_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -2,12 +2,12 @@
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from common.utils.tf_utils import permute_nchw_to_nhwc, permute_axis
from openvino.tools.mo.ops.op import PermuteAttrs
from unit_tests.utils.graph import build_graph
from common.utils.tf_utils import permute_nchw_to_nhwc, permute_axis
class Test_TopK(CommonTFLayerTest):
@@ -52,7 +52,8 @@ class Test_TopK(CommonTFLayerTest):
#
topk_output_shape = shape.copy()
inverse_nhwc_nchw = PermuteAttrs.get_nhwc_to_nchw_permutation(len(topk_output_shape)).inv
topk_axis = permute_axis(len(topk_output_shape) - 1, inverse_nhwc_nchw) # we need to permute axis attribute
topk_axis = permute_axis(len(topk_output_shape) - 1,
inverse_nhwc_nchw) # we need to permute axis attribute
topk_output_shape[topk_axis] = k
ref_net = None
@@ -64,7 +65,8 @@ class Test_TopK(CommonTFLayerTest):
'Const_k_input_data': {'shape': [], 'kind': 'data'},
'Const_k': {'kind': 'op', 'type': 'Const'},
'Const_k_data': {'shape': [], 'kind': 'data'},
'TopK': {'kind': 'op', 'type': 'TopK', 'axis': topk_axis, 'mode': 'max', 'sort': 'value'},
'TopK': {'kind': 'op', 'type': 'TopK', 'axis': topk_axis, 'mode': 'max',
'sort': 'value'},
'TopK_data_1': {'shape': topk_output_shape, 'kind': 'data'},
'TopK_data_2': {'shape': topk_output_shape, 'kind': 'data'},
'result_1': {'kind': 'op', 'type': 'Result'},
@@ -94,9 +96,12 @@ class Test_TopK(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_1D)
@pytest.mark.nightly
def test_TopK_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_TopK_1D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_topK_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_2D = [
dict(shape=[14, 15], k=10),
@@ -105,9 +110,12 @@ class Test_TopK(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_2D)
@pytest.mark.nightly
def test_TopK_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_TopK_2D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_topK_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_3D = [
dict(shape=[13, 14, 15], k=10),
@@ -116,9 +124,12 @@ class Test_TopK(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_3D)
@pytest.mark.nightly
def test_TopK_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_TopK_3D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_topK_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_4D = [
dict(shape=[12, 13, 14, 15], k=10),
@@ -127,9 +138,12 @@ class Test_TopK(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_4D)
@pytest.mark.nightly
def test_TopK_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_TopK_4D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_topK_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data_5D = [
dict(shape=[11, 12, 13, 14, 15], k=10),
@@ -138,6 +152,9 @@ class Test_TopK(CommonTFLayerTest):
@pytest.mark.parametrize("params", test_data_5D)
@pytest.mark.nightly
def test_TopK_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend):
self._test(*self.create_topK_net(**params, ir_version=ir_version, use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
def test_TopK_5D(self, params, ie_device, precision, ir_version, temp_dir, use_new_frontend,
api_2):
self._test(*self.create_topK_net(**params, ir_version=ir_version,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)

View File

@@ -1,13 +1,13 @@
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from common.layer_test_class import check_ir_version
from common.tf_layer_test_class import CommonTFLayerTest
from unit_tests.utils.graph import build_graph
from common.utils.tf_utils import permute_nchw_to_nhwc
import numpy as np
from unit_tests.utils.graph import build_graph
class TestUnaryOps(CommonTFLayerTest):
@@ -41,7 +41,8 @@ class TestUnaryOps(CommonTFLayerTest):
if self.current_op_type in logical_type:
inputs_dict[input] = np.random.randint(0, 1, inputs_dict[input]).astype(np.bool)
else:
inputs_dict[input] = np.random.uniform(lower, upper, inputs_dict[input]).astype(np.float32)
inputs_dict[input] = np.random.uniform(lower, upper, inputs_dict[input]).astype(
np.float32)
return inputs_dict
@@ -156,12 +157,14 @@ class TestUnaryOps(CommonTFLayerTest):
'LogicalNot',
])
@pytest.mark.precommit
def test_unary_op_precommit(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_new_frontend):
def test_unary_op_precommit(self, params, ie_device, precision, ir_version, temp_dir, op_type,
use_new_frontend, api_2):
if ie_device == 'GPU':
pytest.skip("5D tensors is not supported on GPU")
self._test(*self.create_net_with_unary_op(**params, ir_version=ir_version, op_type=op_type,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)
test_data = [dict(shape=[10, 12]),
dict(shape=[8, 10, 12]),
@@ -194,9 +197,11 @@ class TestUnaryOps(CommonTFLayerTest):
'Acosh',
'Asinh'])
@pytest.mark.nightly
def test_unary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_new_frontend):
def test_unary_op(self, params, ie_device, precision, ir_version, temp_dir, op_type,
use_new_frontend, api_2):
if ie_device == 'GPU':
pytest.skip("5D tensors is not supported on GPU")
self._test(*self.create_net_with_unary_op(**params, ir_version=ir_version, op_type=op_type,
use_new_frontend=use_new_frontend),
ie_device, precision, ir_version, temp_dir=temp_dir, use_new_frontend=use_new_frontend)
ie_device, precision, ir_version, temp_dir=temp_dir,
use_new_frontend=use_new_frontend, api_2=api_2)