Files
openvino/tests/layer_tests/onnx_tests/test_scale.py
Daria Ilina e4f44b19fd Mark all failed ONNX layer tests as skip (#16188)
* Mark all failed ONNX layer tests as XFail

* Add additional xfailed marks

* Add one more failed tests into XFail

* Add conditions for CPU/GPU failures

* Revert "Add conditions for CPU/GPU failures"

This reverts commit 790524c59c.

* Add failures separation for CPU/GPU

* Replace all xfail with skip
2023-03-15 12:22:32 +06:00

148 lines
4.2 KiB
Python

# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from common.onnx_layer_test_class import OnnxRuntimeLayerTest
class TestScale(OnnxRuntimeLayerTest):
def create_net(self, shape, scale, ir_version):
"""
ONNX net IR net
Input->Scale->Output => Input->Power
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape)
node_def = onnx.helper.make_node(
'Scale',
inputs=['input'],
outputs=['output'],
scale=scale
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ref_net = None
return onnx_net, ref_net
def create_net_const(self, shape, scale, precision, ir_version):
"""
ONNX net IR net
Input->Concat(+scaled const)->Output => Input->Concat(+const)
"""
#
# Create ONNX model
#
import onnx
from onnx import helper
from onnx import TensorProto
import numpy as np
concat_axis = 0
output_shape = shape.copy()
output_shape[concat_axis] *= 2
input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape)
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, output_shape)
constant = np.random.randint(-127, 127, shape).astype(float)
node_const_def = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['const1'],
value=helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=constant.shape,
vals=constant.flatten(),
),
)
node_def = onnx.helper.make_node(
'Scale',
inputs=['const1'],
outputs=['scale'],
scale=scale
)
node_concat_def = onnx.helper.make_node(
'Concat',
inputs=['input', 'scale'],
outputs=['output'],
axis=concat_axis
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_const_def, node_def, node_concat_def],
'test_model',
[input],
[output],
)
# Create the model (ModelProto)
onnx_net = helper.make_model(graph_def, producer_name='test_model')
#
# Create reference IR net
#
ir_const = constant.flatten() * scale
if precision == 'FP16':
ir_const = ir_const.astype(np.float16)
ref_net = None
return onnx_net, ref_net
test_data = [dict(shape=[10, 12], scale=0.1),
dict(shape=[8, 10, 12], scale=0.9),
dict(shape=[6, 8, 10, 12], scale=1.5),
dict(shape=[4, 6, 8, 10, 12], scale=4.5)]
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
@pytest.mark.skip(reason='GREEN_SUITE')
def test_scale(self, params, ie_device, precision, ir_version, temp_dir, use_old_api):
self._test(*self.create_net(**params, ir_version=ir_version), ie_device, precision,
ir_version,
temp_dir=temp_dir, use_old_api=use_old_api)
@pytest.mark.parametrize("params", test_data)
@pytest.mark.nightly
@pytest.mark.skip(reason='GREEN_SUITE')
def test_scale_const(self, params, ie_device, precision, ir_version, temp_dir, use_old_api):
self._test(*self.create_net_const(**params, precision=precision, ir_version=ir_version),
ie_device, precision, ir_version, temp_dir=temp_dir, use_old_api=use_old_api)