[PyOV] Make graph tests hardware agnostic - part 5 (#14743)
This commit is contained in:
parent
5d2d2ec623
commit
fa61aed443
@ -3,68 +3,50 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from openvino.runtime import OVAny
|
||||
import pytest
|
||||
|
||||
|
||||
def test_any_str():
|
||||
string = OVAny("test_string")
|
||||
assert isinstance(string.value, str)
|
||||
assert string == "test_string"
|
||||
@pytest.mark.parametrize(("value", "data_type"), [
|
||||
("test_string", str),
|
||||
(2137, int),
|
||||
(21.37, float),
|
||||
(False, bool),
|
||||
])
|
||||
def test_any(value, data_type):
|
||||
ovany = OVAny(value)
|
||||
assert isinstance(ovany.value, data_type)
|
||||
assert ovany == value
|
||||
assert ovany.get() == value
|
||||
|
||||
|
||||
def test_any_int():
|
||||
value = OVAny(2137)
|
||||
assert isinstance(value.value, int)
|
||||
assert value == 2137
|
||||
@pytest.mark.parametrize(("values", "data_type"), [
|
||||
(["test", "string"], str),
|
||||
([21, 37], int),
|
||||
([21.0, 37.0], float),
|
||||
])
|
||||
def test_any_list(values, data_type):
|
||||
ovany = OVAny(values)
|
||||
assert isinstance(ovany.value, list)
|
||||
assert isinstance(ovany[0], data_type)
|
||||
assert isinstance(ovany[1], data_type)
|
||||
assert len(values) == 2
|
||||
assert ovany.get() == values
|
||||
|
||||
|
||||
def test_any_float():
|
||||
value = OVAny(21.37)
|
||||
assert isinstance(value.value, float)
|
||||
|
||||
|
||||
def test_any_string_list():
|
||||
str_list = OVAny(["test", "string"])
|
||||
assert isinstance(str_list.value, list)
|
||||
assert isinstance(str_list[0], str)
|
||||
assert str_list[0] == "test"
|
||||
|
||||
|
||||
def test_any_int_list():
|
||||
value = OVAny([21, 37])
|
||||
assert isinstance(value.value, list)
|
||||
assert len(value) == 2
|
||||
assert isinstance(value[0], int)
|
||||
|
||||
|
||||
def test_any_float_list():
|
||||
value = OVAny([21.0, 37.0])
|
||||
assert isinstance(value.value, list)
|
||||
assert len(value) == 2
|
||||
assert isinstance(value[0], float)
|
||||
|
||||
|
||||
def test_any_bool():
|
||||
value = OVAny(False)
|
||||
assert isinstance(value.value, bool)
|
||||
assert value is not True
|
||||
|
||||
|
||||
def test_any_dict_str():
|
||||
value = OVAny({"key": "value"})
|
||||
assert isinstance(value.value, dict)
|
||||
assert value["key"] == "value"
|
||||
|
||||
|
||||
def test_any_dict_str_int():
|
||||
value = OVAny({"key": 2})
|
||||
assert isinstance(value.value, dict)
|
||||
assert value["key"] == 2
|
||||
|
||||
|
||||
def test_any_int_dict():
|
||||
value = OVAny({1: 2})
|
||||
assert isinstance(value.value, dict)
|
||||
assert value[1] == 2
|
||||
@pytest.mark.parametrize(("value_dict", "data_type"), [
|
||||
({"key": "value"}, str),
|
||||
({21: 37}, int),
|
||||
({21.0: 37.0}, float),
|
||||
])
|
||||
def test_any_dict(value_dict, data_type):
|
||||
ovany = OVAny(value_dict)
|
||||
key = list(value_dict.keys())[0]
|
||||
assert isinstance(ovany.value, dict)
|
||||
assert ovany[key] == list(value_dict.values())[0]
|
||||
assert len(ovany.value) == 1
|
||||
assert type(ovany.value[key]) == data_type
|
||||
assert type(list(value_dict.values())[0]) == data_type
|
||||
assert ovany.get() == value_dict
|
||||
|
||||
|
||||
def test_any_set_new_value():
|
||||
|
@ -98,36 +98,21 @@ def test_simple_model_on_parameters(dtype):
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
|
||||
|
||||
def test_broadcast_1():
|
||||
input_data = ops.parameter((3,), name="input_data", dtype=np.int32)
|
||||
new_shape = [3, 3]
|
||||
node = ops.broadcast(input_data, new_shape)
|
||||
@pytest.mark.parametrize(
|
||||
("input_shape", "dtype", "new_shape", "axis_mapping", "mode"),
|
||||
[
|
||||
((3,), np.int32, [3, 3], [], []),
|
||||
((4,), np.float32, [3, 4, 2, 4], [], []),
|
||||
((3,), np.int8, [3, 3], [[0]], ["EXPLICIT"]),
|
||||
],
|
||||
)
|
||||
def test_broadcast(input_shape, dtype, new_shape, axis_mapping, mode):
|
||||
input_data = ops.parameter(input_shape, name="input_data", dtype=dtype)
|
||||
node = ops.broadcast(input_data, new_shape, *axis_mapping, *mode)
|
||||
assert node.get_type_name() == "Broadcast"
|
||||
assert node.get_output_size() == 1
|
||||
assert node.get_output_element_type(0) == Type.i32
|
||||
assert list(node.get_output_shape(0)) == [3, 3]
|
||||
|
||||
|
||||
def test_broadcast_2():
|
||||
input_data = ops.parameter((4,), name="input_data", dtype=np.int32)
|
||||
new_shape = [3, 4, 2, 4]
|
||||
expected_shape = np.broadcast_to(input_data, new_shape).shape
|
||||
node = ops.broadcast(input_data, new_shape)
|
||||
assert node.get_type_name() == "Broadcast"
|
||||
assert node.get_output_size() == 1
|
||||
assert node.get_output_element_type(0) == Type.i32
|
||||
assert list(node.get_output_shape(0)) == list(expected_shape)
|
||||
|
||||
|
||||
def test_broadcast_3():
|
||||
input_data = ops.parameter((3,), name="input_data", dtype=np.int32)
|
||||
new_shape = [3, 3]
|
||||
axis_mapping = [0]
|
||||
node = ops.broadcast(input_data, new_shape, axis_mapping, "EXPLICIT")
|
||||
assert node.get_type_name() == "Broadcast"
|
||||
assert node.get_output_size() == 1
|
||||
assert node.get_output_element_type(0) == Type.i32
|
||||
assert list(node.get_output_shape(0)) == [3, 3]
|
||||
assert node.get_output_element_type(0) == get_element_type(dtype)
|
||||
assert list(node.get_output_shape(0)) == new_shape
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -493,20 +478,6 @@ def test_node_target_inputs_soruce_output():
|
||||
assert np.equal([in_model1.get_shape()], [model.get_output_shape(0)]).all()
|
||||
|
||||
|
||||
def test_any():
|
||||
any_int = OVAny(32)
|
||||
any_str = OVAny("test_text")
|
||||
|
||||
assert any_int.get() == 32
|
||||
assert any_str.get() == "test_text"
|
||||
|
||||
any_int.set(777)
|
||||
any_str.set("another_text")
|
||||
|
||||
assert any_int.get() == 777
|
||||
assert any_str.get() == "another_text"
|
||||
|
||||
|
||||
def test_runtime_info():
|
||||
test_shape = PartialShape([1, 1, 1, 1])
|
||||
test_type = Type.f32
|
||||
|
@ -40,26 +40,9 @@ def test_convolution_backprop_data():
|
||||
data_node = ov.parameter(shape=data_shape)
|
||||
filter_node = ov.parameter(shape=filter_shape)
|
||||
output_shape_node = ov.constant(np.array(output_spatial_shape, dtype=np.int64))
|
||||
expected_shape = [1, 1, 9, 9]
|
||||
|
||||
deconvolution = ov.convolution_backprop_data(data_node, filter_node, strides, output_shape_node)
|
||||
assert deconvolution.get_type_name() == "ConvolutionBackpropData"
|
||||
assert deconvolution.get_output_size() == 1
|
||||
assert list(deconvolution.get_output_shape(0)) == expected_shape
|
||||
assert list(deconvolution.get_output_shape(0)) == [1, 1, 9, 9]
|
||||
assert deconvolution.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_convolution_v1():
|
||||
input_tensor = ov.parameter((1, 1, 16, 16), name="input_tensor", dtype=np.float32)
|
||||
filters = ov.parameter((1, 1, 3, 3), name="filters", dtype=np.float32)
|
||||
strides = np.array([1, 1])
|
||||
pads_begin = np.array([0, 0])
|
||||
pads_end = np.array([0, 0])
|
||||
dilations = np.array([1, 1])
|
||||
expected_shape = [1, 1, 14, 14]
|
||||
|
||||
node = ov.convolution(input_tensor, filters, strides, pads_begin, pads_end, dilations)
|
||||
assert node.get_type_name() == "Convolution"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
@ -64,12 +64,8 @@ def test_binary_convolution(dtype):
|
||||
mode = "xnor-popcount"
|
||||
pad_value = 0.0
|
||||
|
||||
input0_shape = [1, 1, 9, 9]
|
||||
input1_shape = [1, 1, 3, 3]
|
||||
expected_shape = [1, 1, 7, 7]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
parameter_input0 = ov.parameter([1, 1, 9, 9], name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter([1, 1, 3, 3], name="Input1", dtype=dtype)
|
||||
|
||||
node = ov.binary_convolution(
|
||||
parameter_input0, parameter_input1, strides, pads_begin, pads_end, dilations, mode, pad_value,
|
||||
@ -77,14 +73,13 @@ def test_binary_convolution(dtype):
|
||||
|
||||
assert node.get_type_name() == "BinaryConvolution"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1, 1, 7, 7]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", np_types)
|
||||
def test_ctc_greedy_decoder(dtype):
|
||||
input0_shape = [20, 8, 128]
|
||||
input1_shape = [20, 8]
|
||||
expected_shape = [8, 20, 1, 1]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
@ -93,7 +88,7 @@ def test_ctc_greedy_decoder(dtype):
|
||||
|
||||
assert node.get_type_name() == "CTCGreedyDecoder"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [8, 20, 1, 1]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -118,16 +113,12 @@ def test_ctc_greedy_decoder(dtype):
|
||||
],
|
||||
)
|
||||
def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_repeated, blank_index):
|
||||
input0_shape = [8, 20, 128]
|
||||
input1_shape = [8]
|
||||
input2_shape = [1]
|
||||
expected_shape = [8, 20]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=fp_dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=int_dtype)
|
||||
parameter_input0 = ov.parameter([8, 20, 128], name="Input0", dtype=fp_dtype)
|
||||
parameter_input1 = ov.parameter([8], name="Input1", dtype=int_dtype)
|
||||
parameter_input2 = None
|
||||
if blank_index:
|
||||
parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=int_dtype)
|
||||
parameter_input2 = ov.parameter([1], name="Input2", dtype=int_dtype)
|
||||
|
||||
node = ov.ctc_greedy_decoder_seq_len(
|
||||
parameter_input0, parameter_input1, parameter_input2, merge_repeated, int_ci, int_sl,
|
||||
@ -135,7 +126,7 @@ def test_ctc_greedy_decoder_seq_len(fp_dtype, int_dtype, int_ci, int_sl, merge_r
|
||||
|
||||
assert node.get_type_name() == "CTCGreedyDecoderSeqLen"
|
||||
assert node.get_output_size() == 2
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [8, 20]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", np_types)
|
||||
@ -145,14 +136,9 @@ def test_deformable_convolution_opset1(dtype):
|
||||
pads_end = np.array([0, 0])
|
||||
dilations = np.array([1, 1])
|
||||
|
||||
input0_shape = [1, 1, 9, 9]
|
||||
input1_shape = [1, 18, 7, 7]
|
||||
input2_shape = [1, 1, 3, 3]
|
||||
expected_shape = [1, 1, 7, 7]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
parameter_input0 = ov.parameter([1, 1, 9, 9], name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter([1, 18, 7, 7], name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter([1, 1, 3, 3], name="Input2", dtype=dtype)
|
||||
|
||||
node = ov_opset1.deformable_convolution(
|
||||
parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations,
|
||||
@ -160,7 +146,7 @@ def test_deformable_convolution_opset1(dtype):
|
||||
|
||||
assert node.get_type_name() == "DeformableConvolution"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1, 1, 7, 7]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", np_types)
|
||||
@ -170,14 +156,9 @@ def test_deformable_convolution(dtype):
|
||||
pads_end = np.array([0, 0])
|
||||
dilations = np.array([1, 1])
|
||||
|
||||
input0_shape = [1, 1, 9, 9]
|
||||
input1_shape = [1, 18, 7, 7]
|
||||
input2_shape = [1, 1, 3, 3]
|
||||
expected_shape = [1, 1, 7, 7]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
parameter_input0 = ov.parameter([1, 1, 9, 9], name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter([1, 18, 7, 7], name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter([1, 1, 3, 3], name="Input2", dtype=dtype)
|
||||
|
||||
node = ov.deformable_convolution(
|
||||
parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations,
|
||||
@ -185,7 +166,7 @@ def test_deformable_convolution(dtype):
|
||||
|
||||
assert node.get_type_name() == "DeformableConvolution"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1, 1, 7, 7]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", np_types)
|
||||
@ -195,16 +176,10 @@ def test_deformable_convolution_mask(dtype):
|
||||
pads_end = np.array([0, 0])
|
||||
dilations = np.array([1, 1])
|
||||
|
||||
input0_shape = [1, 1, 9, 9]
|
||||
input1_shape = [1, 18, 7, 7]
|
||||
input2_shape = [1, 1, 3, 3]
|
||||
input3_shape = [1, 9, 7, 7]
|
||||
expected_shape = [1, 1, 7, 7]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
parameter_input3 = ov.parameter(input3_shape, name="Input3", dtype=dtype)
|
||||
parameter_input0 = ov.parameter([1, 1, 9, 9], name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter([1, 18, 7, 7], name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter([1, 1, 3, 3], name="Input2", dtype=dtype)
|
||||
parameter_input3 = ov.parameter([1, 9, 7, 7], name="Input3", dtype=dtype)
|
||||
|
||||
node = ov.deformable_convolution(
|
||||
parameter_input0, parameter_input1, parameter_input2, strides,
|
||||
@ -213,7 +188,7 @@ def test_deformable_convolution_mask(dtype):
|
||||
|
||||
assert node.get_type_name() == "DeformableConvolution"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1, 1, 7, 7]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", np_types)
|
||||
@ -227,14 +202,9 @@ def test_deformable_psroi_pooling(dtype):
|
||||
trans_std = 0.1
|
||||
part_size = 7
|
||||
|
||||
input0_shape = [1, 392, 38, 63]
|
||||
input1_shape = [300, 5]
|
||||
input2_shape = [300, 2, 7, 7]
|
||||
expected_shape = [300, 8, 7, 7]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
parameter_input0 = ov.parameter([1, 392, 38, 63], name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter([300, 5], name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter([300, 2, 7, 7], name="Input2", dtype=dtype)
|
||||
|
||||
node = ov.deformable_psroi_pooling(
|
||||
parameter_input0,
|
||||
@ -252,43 +222,33 @@ def test_deformable_psroi_pooling(dtype):
|
||||
|
||||
assert node.get_type_name() == "DeformablePSROIPooling"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [300, 8, 7, 7]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", np_types)
|
||||
def test_floor_mod(dtype):
|
||||
input0_shape = [8, 1, 6, 1]
|
||||
input1_shape = [7, 1, 5]
|
||||
expected_shape = [8, 7, 6, 5]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
parameter_input0 = ov.parameter([8, 1, 6, 1], name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter([7, 1, 5], name="Input1", dtype=dtype)
|
||||
|
||||
node = ov.floor_mod(parameter_input0, parameter_input1)
|
||||
|
||||
assert node.get_type_name() == "FloorMod"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [8, 7, 6, 5]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", np_types)
|
||||
def test_gather_tree(dtype):
|
||||
input0_shape = [100, 1, 10]
|
||||
input1_shape = [100, 1, 10]
|
||||
input2_shape = [1]
|
||||
input3_shape = []
|
||||
expected_shape = [100, 1, 10]
|
||||
|
||||
parameter_input0 = ov.parameter(input0_shape, name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter(input1_shape, name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter(input2_shape, name="Input2", dtype=dtype)
|
||||
parameter_input3 = ov.parameter(input3_shape, name="Input3", dtype=dtype)
|
||||
parameter_input0 = ov.parameter([100, 1, 10], name="Input0", dtype=dtype)
|
||||
parameter_input1 = ov.parameter([100, 1, 10], name="Input1", dtype=dtype)
|
||||
parameter_input2 = ov.parameter([1], name="Input2", dtype=dtype)
|
||||
parameter_input3 = ov.parameter([], name="Input3", dtype=dtype)
|
||||
|
||||
node = ov.gather_tree(parameter_input0, parameter_input1, parameter_input2, parameter_input3)
|
||||
|
||||
assert node.get_type_name() == "GatherTree"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [100, 1, 10]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
||||
@ -311,16 +271,14 @@ def test_lstm_cell_operator(dtype):
|
||||
parameter_r = ov.parameter(r_shape, name="R", dtype=dtype)
|
||||
parameter_b = ov.parameter(b_shape, name="B", dtype=dtype)
|
||||
|
||||
expected_shape = [1, 128]
|
||||
|
||||
node_default = ov.lstm_cell(
|
||||
parameter_x, parameter_h_t, parameter_c_t, parameter_w, parameter_r, parameter_b, hidden_size,
|
||||
)
|
||||
|
||||
assert node_default.get_type_name() == "LSTMCell"
|
||||
assert node_default.get_output_size() == 2
|
||||
assert list(node_default.get_output_shape(0)) == expected_shape
|
||||
assert list(node_default.get_output_shape(1)) == expected_shape
|
||||
assert list(node_default.get_output_shape(0)) == [1, 128]
|
||||
assert list(node_default.get_output_shape(1)) == [1, 128]
|
||||
|
||||
activations = ["tanh", "Sigmoid", "RELU"]
|
||||
activation_alpha = [1.0, 2.0, 3.0]
|
||||
@ -343,8 +301,8 @@ def test_lstm_cell_operator(dtype):
|
||||
|
||||
assert node_param.get_type_name() == "LSTMCell"
|
||||
assert node_param.get_output_size() == 2
|
||||
assert list(node_param.get_output_shape(0)) == expected_shape
|
||||
assert list(node_param.get_output_shape(1)) == expected_shape
|
||||
assert list(node_param.get_output_shape(0)) == [1, 128]
|
||||
assert list(node_param.get_output_shape(1)) == [1, 128]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
||||
@ -367,16 +325,14 @@ def test_lstm_cell_operator_opset1(dtype):
|
||||
parameter_r = ov.parameter(r_shape, name="R", dtype=dtype)
|
||||
parameter_b = ov.parameter(b_shape, name="B", dtype=dtype)
|
||||
|
||||
expected_shape = [1, 128]
|
||||
|
||||
node_default = ov_opset1.lstm_cell(
|
||||
parameter_x, parameter_h_t, parameter_c_t, parameter_w, parameter_r, parameter_b, hidden_size,
|
||||
)
|
||||
|
||||
assert node_default.get_type_name() == "LSTMCell"
|
||||
assert node_default.get_output_size() == 2
|
||||
assert list(node_default.get_output_shape(0)) == expected_shape
|
||||
assert list(node_default.get_output_shape(1)) == expected_shape
|
||||
assert list(node_default.get_output_shape(0)) == [1, 128]
|
||||
assert list(node_default.get_output_shape(1)) == [1, 128]
|
||||
|
||||
activations = ["tanh", "Sigmoid", "RELU"]
|
||||
activation_alpha = [1.0, 2.0, 3.0]
|
||||
@ -399,8 +355,8 @@ def test_lstm_cell_operator_opset1(dtype):
|
||||
|
||||
assert node_param.get_type_name() == "LSTMCell"
|
||||
assert node_param.get_output_size() == 2
|
||||
assert list(node_param.get_output_shape(0)) == expected_shape
|
||||
assert list(node_param.get_output_shape(1)) == expected_shape
|
||||
assert list(node_param.get_output_shape(0)) == [1, 128]
|
||||
assert list(node_param.get_output_shape(1)) == [1, 128]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
|
||||
@ -617,13 +573,11 @@ def test_gru_cell_operator():
|
||||
parameter_r = ov.parameter(r_shape, name="R", dtype=np.float32)
|
||||
parameter_b = ov.parameter(b_shape, name="B", dtype=np.float32)
|
||||
|
||||
expected_shape = [1, 128]
|
||||
|
||||
node_default = ov.gru_cell(parameter_x, parameter_h_t, parameter_w, parameter_r, parameter_b, hidden_size)
|
||||
|
||||
assert node_default.get_type_name() == "GRUCell"
|
||||
assert node_default.get_output_size() == 1
|
||||
assert list(node_default.get_output_shape(0)) == expected_shape
|
||||
assert list(node_default.get_output_shape(0)) == [1, 128]
|
||||
|
||||
activations = ["tanh", "relu"]
|
||||
activations_alpha = [1.0, 2.0]
|
||||
@ -651,7 +605,7 @@ def test_gru_cell_operator():
|
||||
|
||||
assert node_param.get_type_name() == "GRUCell"
|
||||
assert node_param.get_output_size() == 1
|
||||
assert list(node_param.get_output_shape(0)) == expected_shape
|
||||
assert list(node_param.get_output_shape(0)) == [1, 128]
|
||||
|
||||
|
||||
def test_gru_sequence():
|
||||
@ -1027,11 +981,10 @@ def test_interpolate_opset1(dtype):
|
||||
image_node = ov.parameter(image_shape, dtype, name="Image")
|
||||
|
||||
node = ov_opset1.interpolate(image_node, output_shape, attributes)
|
||||
expected_shape = [1, 3, 64, 64]
|
||||
|
||||
assert node.get_type_name() == "Interpolate"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1, 3, 64, 64]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -13,7 +13,6 @@ def test_reverse_sequence():
|
||||
seq_lengths = np.array([1, 2, 1, 2], dtype=np.int32)
|
||||
batch_axis = 2
|
||||
sequence_axis = 1
|
||||
expected_shape = [2, 3, 4, 2]
|
||||
|
||||
input_param = ov.parameter(input_data.shape, name="input", dtype=np.int32)
|
||||
seq_lengths_param = ov.parameter(seq_lengths.shape, name="sequence lengths", dtype=np.int32)
|
||||
@ -21,35 +20,20 @@ def test_reverse_sequence():
|
||||
|
||||
assert model.get_type_name() == "ReverseSequence"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [2, 3, 4, 2]
|
||||
assert model.get_output_element_type(0) == Type.i32
|
||||
|
||||
|
||||
def test_pad_edge():
|
||||
pads_begin = np.array([0, 1], dtype=np.int32)
|
||||
pads_end = np.array([2, 3], dtype=np.int32)
|
||||
expected_shape = [5, 8]
|
||||
|
||||
input_param = ov.parameter((3, 4), name="input", dtype=np.int32)
|
||||
model = ov.pad(input_param, pads_begin, pads_end, "edge")
|
||||
|
||||
assert model.get_type_name() == "Pad"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert model.get_output_element_type(0) == Type.i32
|
||||
|
||||
|
||||
def test_pad_constant():
|
||||
pads_begin = np.array([0, 1], dtype=np.int32)
|
||||
pads_end = np.array([2, 3], dtype=np.int32)
|
||||
expected_shape = [5, 8]
|
||||
|
||||
input_param = ov.parameter((3, 4), name="input", dtype=np.int32)
|
||||
model = ov.pad(input_param, pads_begin, pads_end, "constant", arg_pad_value=np.array(100, dtype=np.int32))
|
||||
|
||||
assert model.get_type_name() == "Pad"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [5, 8]
|
||||
assert model.get_output_element_type(0) == Type.i32
|
||||
|
||||
|
||||
@ -57,12 +41,11 @@ def test_select():
|
||||
cond = np.array([[False, False], [True, False], [True, True]])
|
||||
then_node = np.array([[-1, 0], [1, 2], [3, 4]], dtype=np.int32)
|
||||
else_node = np.array([[11, 10], [9, 8], [7, 6]], dtype=np.int32)
|
||||
expected_shape = [3, 2]
|
||||
|
||||
node = ov.select(cond, then_node, else_node)
|
||||
assert node.get_type_name() == "Select"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [3, 2]
|
||||
assert node.get_output_element_type(0) == Type.i32
|
||||
|
||||
|
||||
@ -70,12 +53,11 @@ def test_gather_v8_nd():
|
||||
data = ov.parameter([2, 10, 80, 30, 50], dtype=np.float32, name="data")
|
||||
indices = ov.parameter([2, 10, 30, 40, 2], dtype=np.int32, name="indices")
|
||||
batch_dims = 2
|
||||
expected_shape = [2, 10, 30, 40, 50]
|
||||
|
||||
node = ov.gather_nd(data, indices, batch_dims)
|
||||
assert node.get_type_name() == "GatherND"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [2, 10, 30, 40, 50]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
@ -85,10 +67,9 @@ def test_gather_elements():
|
||||
data = ov.parameter(Shape([2, 5]), dtype=data_dtype, name="data")
|
||||
indices = ov.parameter(Shape([2, 100]), dtype=indices_type, name="indices")
|
||||
axis = 1
|
||||
expected_shape = [2, 100]
|
||||
|
||||
node = ov.gather_elements(data, indices, axis)
|
||||
assert node.get_type_name() == "GatherElements"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [2, 100]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
@ -5,6 +5,7 @@
|
||||
from openvino.runtime import Type
|
||||
import openvino.runtime.opset9 as ov
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
|
||||
def build_fft_input_data():
|
||||
@ -12,109 +13,32 @@ def build_fft_input_data():
|
||||
return np.random.uniform(0, 1, (2, 10, 10, 2)).astype(np.float32)
|
||||
|
||||
|
||||
def test_dft_1d():
|
||||
@pytest.mark.parametrize("dims", [[2], [1, 2], [0, 1, 2]])
|
||||
def test_dft_dims(dims):
|
||||
input_data = build_fft_input_data()
|
||||
input_tensor = ov.constant(input_data)
|
||||
input_axes = ov.constant(np.array([2], dtype=np.int64))
|
||||
input_axes = ov.constant(np.array(dims, dtype=np.int64))
|
||||
|
||||
dft_node = ov.dft(input_tensor, input_axes)
|
||||
np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
|
||||
axis=2).astype(np.complex64)
|
||||
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
|
||||
assert dft_node.get_type_name() == "DFT"
|
||||
assert dft_node.get_output_size() == 1
|
||||
assert list(dft_node.get_output_shape(0)) == expected_shape
|
||||
assert list(dft_node.get_output_shape(0)) == [2, 10, 10, 2]
|
||||
assert dft_node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_dft_2d():
|
||||
@pytest.mark.parametrize(("dims", "signal_size", "expected_shape"), [
|
||||
([-2], [20], [2, 20, 10, 2]),
|
||||
([0, 2], [4, 5], [4, 10, 5, 2]),
|
||||
([1, 2], [4, 5], [2, 4, 5, 2]),
|
||||
([0, 1, 2], [4, 5, 16], [4, 5, 16, 2]),
|
||||
])
|
||||
def test_dft_signal_size(dims, signal_size, expected_shape):
|
||||
input_data = build_fft_input_data()
|
||||
input_tensor = ov.constant(input_data)
|
||||
input_axes = ov.constant(np.array([1, 2], dtype=np.int64))
|
||||
|
||||
dft_node = ov.dft(input_tensor, input_axes)
|
||||
np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
|
||||
axes=[1, 2]).astype(np.complex64)
|
||||
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
|
||||
assert dft_node.get_type_name() == "DFT"
|
||||
assert dft_node.get_output_size() == 1
|
||||
assert list(dft_node.get_output_shape(0)) == expected_shape
|
||||
assert dft_node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_dft_3d():
|
||||
input_data = build_fft_input_data()
|
||||
input_tensor = ov.constant(input_data)
|
||||
input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64))
|
||||
|
||||
dft_node = ov.dft(input_tensor, input_axes)
|
||||
np_results = np.fft.fftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
|
||||
axes=[0, 1, 2]).astype(np.complex64)
|
||||
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
|
||||
assert dft_node.get_type_name() == "DFT"
|
||||
assert dft_node.get_output_size() == 1
|
||||
assert list(dft_node.get_output_shape(0)) == expected_shape
|
||||
assert dft_node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_dft_1d_signal_size():
|
||||
input_data = build_fft_input_data()
|
||||
input_tensor = ov.constant(input_data)
|
||||
input_axes = ov.constant(np.array([-2], dtype=np.int64))
|
||||
input_signal_size = ov.constant(np.array([20], dtype=np.int64))
|
||||
input_axes = ov.constant(np.array(dims, dtype=np.int64))
|
||||
input_signal_size = ov.constant(np.array(signal_size, dtype=np.int64))
|
||||
|
||||
dft_node = ov.dft(input_tensor, input_axes, input_signal_size)
|
||||
np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), n=20,
|
||||
axis=-2).astype(np.complex64)
|
||||
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
|
||||
assert dft_node.get_type_name() == "DFT"
|
||||
assert dft_node.get_output_size() == 1
|
||||
assert list(dft_node.get_output_shape(0)) == expected_shape
|
||||
assert dft_node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_dft_2d_signal_size_1():
|
||||
input_data = build_fft_input_data()
|
||||
input_tensor = ov.constant(input_data)
|
||||
input_axes = ov.constant(np.array([0, 2], dtype=np.int64))
|
||||
input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64))
|
||||
|
||||
dft_node = ov.dft(input_tensor, input_axes, input_signal_size)
|
||||
np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5],
|
||||
axes=[0, 2]).astype(np.complex64)
|
||||
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
|
||||
assert dft_node.get_type_name() == "DFT"
|
||||
assert dft_node.get_output_size() == 1
|
||||
assert list(dft_node.get_output_shape(0)) == expected_shape
|
||||
assert dft_node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_dft_2d_signal_size_2():
|
||||
input_data = build_fft_input_data()
|
||||
input_tensor = ov.constant(input_data)
|
||||
input_axes = ov.constant(np.array([1, 2], dtype=np.int64))
|
||||
input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64))
|
||||
|
||||
dft_node = ov.dft(input_tensor, input_axes, input_signal_size)
|
||||
np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5],
|
||||
axes=[1, 2]).astype(np.complex64)
|
||||
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
|
||||
assert dft_node.get_type_name() == "DFT"
|
||||
assert dft_node.get_output_size() == 1
|
||||
assert list(dft_node.get_output_shape(0)) == expected_shape
|
||||
assert dft_node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_dft_3d_signal_size():
|
||||
input_data = build_fft_input_data()
|
||||
input_tensor = ov.constant(input_data)
|
||||
input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64))
|
||||
input_signal_size = ov.constant(np.array([4, 5, 16], dtype=np.int64))
|
||||
|
||||
dft_node = ov.dft(input_tensor, input_axes, input_signal_size)
|
||||
np_results = np.fft.fftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
|
||||
s=[4, 5, 16], axes=[0, 1, 2]).astype(np.complex64)
|
||||
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
|
||||
assert dft_node.get_type_name() == "DFT"
|
||||
assert dft_node.get_output_size() == 1
|
||||
assert list(dft_node.get_output_shape(0)) == expected_shape
|
||||
|
@ -7,11 +7,10 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from openvino.runtime.utils.types import get_element_type
|
||||
from tests import xfail_issue_58033
|
||||
|
||||
|
||||
def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype,
|
||||
seed=202104):
|
||||
def einsum_op_check(input_shapes: list, equation: str, data_type: np.dtype,
|
||||
seed=202104):
|
||||
"""Test Einsum operation for given input shapes, equation, and data type.
|
||||
|
||||
It generates input data of given shapes and type, receives reference results using numpy,
|
||||
@ -45,46 +44,44 @@ def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype,
|
||||
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_dot_product(data_type):
|
||||
einsum_op_exec([5, 5], "i,i->", data_type)
|
||||
einsum_op_check([5, 5], "i,i->", data_type)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_matrix_multiplication(data_type):
|
||||
einsum_op_exec([(2, 3), (3, 4)], "ab,bc->ac", data_type)
|
||||
einsum_op_check([(2, 3), (3, 4)], "ab,bc->ac", data_type)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_batch_trace(data_type):
|
||||
einsum_op_exec([(2, 3, 3)], "kii->k", data_type)
|
||||
einsum_op_check([(2, 3, 3)], "kii->k", data_type)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_diagonal_extraction(data_type):
|
||||
einsum_op_exec([(6, 5, 5)], "kii->ki", data_type)
|
||||
einsum_op_check([(6, 5, 5)], "kii->ki", data_type)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_transpose(data_type):
|
||||
einsum_op_exec([(1, 2, 3)], "ijk->kij", data_type)
|
||||
einsum_op_check([(1, 2, 3)], "ijk->kij", data_type)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_multiple_multiplication(data_type):
|
||||
einsum_op_exec([(2, 5), (5, 3, 6), (5, 3)], "ab,bcd,bc->ca", data_type)
|
||||
einsum_op_check([(2, 5), (5, 3, 6), (5, 3)], "ab,bcd,bc->ca", data_type)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_simple_ellipsis(data_type):
|
||||
einsum_op_exec([(5, 3, 4)], "a...->...", data_type)
|
||||
einsum_op_check([(5, 3, 4)], "a...->...", data_type)
|
||||
|
||||
|
||||
@xfail_issue_58033
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_multiple_ellipsis(data_type):
|
||||
einsum_op_exec([(3, 5), 1], "a...,...->a...", data_type, with_value=True)
|
||||
einsum_op_check([(3, 5), 1], "a...,...->a...", data_type)
|
||||
|
||||
|
||||
@xfail_issue_58033
|
||||
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
|
||||
def test_broadcasting_ellipsis(data_type):
|
||||
einsum_op_exec([(9, 1, 4, 3), (3, 11, 7, 1)], "a...b,b...->a...", data_type, with_value=True)
|
||||
einsum_op_check([(9, 1, 4, 3), (3, 11, 7, 1)], "a...b,b...->a...", data_type)
|
||||
|
@ -5,69 +5,21 @@
|
||||
from openvino.runtime import Type
|
||||
import openvino.runtime.opset8 as ov
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
|
||||
def test_gather():
|
||||
input_data = ov.parameter((3, 3), name="input_data", dtype=np.float32)
|
||||
input_indices = ov.parameter((1, 2), name="input_indices", dtype=np.int32)
|
||||
input_axis = np.array([1], np.int32)
|
||||
expected_shape = [3, 1, 2]
|
||||
@pytest.mark.parametrize(("input_shape", "indices", "axis", "expected_shape", "batch_dims"), [
|
||||
((3, 3), (1, 2), [1], [3, 1, 2], []),
|
||||
((3, 3), (1, 2), 1, [3, 1, 2], []),
|
||||
((2, 5), (2, 3), [1], [2, 3], [1]),
|
||||
((2, 5), (2, 3), [1], [2, 2, 3], []),
|
||||
])
|
||||
def test_gather(input_shape, indices, axis, expected_shape, batch_dims):
|
||||
input_data = ov.parameter(input_shape, name="input_data", dtype=np.float32)
|
||||
input_indices = ov.parameter(indices, name="input_indices", dtype=np.int32)
|
||||
input_axis = np.array(axis, np.int32)
|
||||
|
||||
node = ov.gather(input_data, input_indices, input_axis)
|
||||
assert node.get_type_name() == "Gather"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_gather_with_scalar_axis():
|
||||
input_data = ov.parameter((3, 3), name="input_data", dtype=np.float32)
|
||||
input_indices = ov.parameter((1, 2), name="input_indices", dtype=np.int32)
|
||||
input_axis = np.array(1, np.int32)
|
||||
expected_shape = [3, 1, 2]
|
||||
|
||||
node = ov.gather(input_data, input_indices, input_axis)
|
||||
assert node.get_type_name() == "Gather"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_gather_batch_dims_1():
|
||||
input_data = ov.parameter((2, 5), name="input_data", dtype=np.float32)
|
||||
input_indices = ov.parameter((2, 3), name="input_indices", dtype=np.int32)
|
||||
input_axis = np.array([1], np.int32)
|
||||
batch_dims = 1
|
||||
expected_shape = [2, 3]
|
||||
|
||||
node = ov.gather(input_data, input_indices, input_axis, batch_dims)
|
||||
assert node.get_type_name() == "Gather"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_gather_negative_indices():
|
||||
input_data = ov.parameter((3, 3), name="input_data", dtype=np.float32)
|
||||
input_indices = ov.parameter((1, 2), name="input_indices", dtype=np.int32)
|
||||
input_axis = np.array([1], np.int32)
|
||||
expected_shape = [3, 1, 2]
|
||||
|
||||
node = ov.gather(input_data, input_indices, input_axis)
|
||||
assert node.get_type_name() == "Gather"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_gather_batch_dims_1_negative_indices():
|
||||
input_data = ov.parameter((2, 5), name="input_data", dtype=np.float32)
|
||||
input_indices = ov.parameter((2, 3), name="input_indices", dtype=np.int32)
|
||||
input_axis = np.array([1], np.int32)
|
||||
batch_dims = 1
|
||||
expected_shape = [2, 3]
|
||||
|
||||
node = ov.gather(input_data, input_indices, input_axis, batch_dims)
|
||||
node = ov.gather(input_data, input_indices, input_axis, *batch_dims)
|
||||
assert node.get_type_name() == "Gather"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
|
@ -5,6 +5,7 @@
|
||||
from openvino.runtime import Type
|
||||
import openvino.runtime.opset8 as ov
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
|
||||
def get_data():
|
||||
@ -27,13 +28,17 @@ def test_idft_1d():
|
||||
assert dft_node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_idft_2d():
|
||||
@pytest.mark.parametrize(("axes"), [
|
||||
([1, 2]),
|
||||
([0, 1, 2]),
|
||||
])
|
||||
def test_idft_2d_3d(axes):
|
||||
expected_results = get_data()
|
||||
complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1),
|
||||
axes=[1, 2]).astype(np.complex64)
|
||||
axes=axes).astype(np.complex64)
|
||||
input_data = np.stack((complex_input_data.real, complex_input_data.imag), axis=-1)
|
||||
input_tensor = ov.constant(input_data)
|
||||
input_axes = ov.constant(np.array([1, 2], dtype=np.int64))
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
|
||||
dft_node = ov.idft(input_tensor, input_axes)
|
||||
assert dft_node.get_type_name() == "IDFT"
|
||||
|
@ -33,12 +33,11 @@ def test_lrn_factory():
|
||||
nsize = 3
|
||||
axis = np.array([1], dtype=np.int32)
|
||||
inputs = ov.parameter((1, 2, 3, 4), name="inputs", dtype=np.float32)
|
||||
expected_shape = [1, 2, 3, 4]
|
||||
|
||||
node = ov.lrn(inputs, axis, alpha, beta, bias, nsize)
|
||||
assert node.get_type_name() == "LRN"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1, 2, 3, 4]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
@ -49,12 +48,11 @@ def test_batch_norm():
|
||||
mean = ov.parameter((3,), name="mean", dtype=np.float32)
|
||||
variance = ov.parameter((3,), name="variance", dtype=np.float32)
|
||||
epsilon = 9.99e-06
|
||||
expected_shape = [2, 3]
|
||||
|
||||
node = ov.batch_norm_inference(data, gamma, beta, mean, variance, epsilon)
|
||||
assert node.get_type_name() == "BatchNormInference"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [2, 3]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
@ -64,13 +62,12 @@ def test_mvn_no_variance():
|
||||
epsilon = 1e-9
|
||||
normalize_variance = False
|
||||
eps_mode = "outside_sqrt"
|
||||
expected_shape = [1, 3, 3, 3]
|
||||
|
||||
node = ov.mvn(data, axes, normalize_variance, epsilon, eps_mode)
|
||||
|
||||
assert node.get_type_name() == "MVN"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1, 3, 3, 3]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
@ -80,11 +77,10 @@ def test_mvn():
|
||||
epsilon = 1e-9
|
||||
normalize_variance = True
|
||||
eps_mode = "outside_sqrt"
|
||||
expected_shape = [1, 3, 3, 3]
|
||||
|
||||
node = ov.mvn(data, axes, normalize_variance, epsilon, eps_mode)
|
||||
|
||||
assert node.get_type_name() == "MVN"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1, 3, 3, 3]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
@ -4,176 +4,42 @@
|
||||
# flake8: noqa
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import openvino.runtime.opset8 as ov
|
||||
from openvino.runtime import AxisSet, Shape, Type
|
||||
from openvino.runtime.op import Constant, Parameter
|
||||
|
||||
|
||||
def binary_op(op_str, a, b):
|
||||
|
||||
if op_str == "+":
|
||||
return a + b
|
||||
elif op_str == "Add":
|
||||
return ov.add(a, b)
|
||||
elif op_str == "-":
|
||||
return a - b
|
||||
elif op_str == "Sub":
|
||||
return ov.subtract(a, b)
|
||||
elif op_str == "*":
|
||||
return a * b
|
||||
elif op_str == "Mul":
|
||||
return ov.multiply(a, b)
|
||||
elif op_str == "/":
|
||||
return a / b
|
||||
elif op_str == "Div":
|
||||
return ov.divide(a, b)
|
||||
elif op_str == "Equal":
|
||||
return ov.equal(a, b)
|
||||
elif op_str == "Greater":
|
||||
return ov.greater(a, b)
|
||||
elif op_str == "GreaterEq":
|
||||
return ov.greater_equal(a, b)
|
||||
elif op_str == "Less":
|
||||
return ov.less(a, b)
|
||||
elif op_str == "LessEq":
|
||||
return ov.less_equal(a, b)
|
||||
elif op_str == "Maximum":
|
||||
return ov.maximum(a, b)
|
||||
elif op_str == "Minimum":
|
||||
return ov.minimum(a, b)
|
||||
elif op_str == "NotEqual":
|
||||
return ov.not_equal(a, b)
|
||||
elif op_str == "Power":
|
||||
return ov.power(a, b)
|
||||
|
||||
|
||||
def binary_op_ref(op_str, a, b):
|
||||
|
||||
if op_str == "+" or op_str == "Add":
|
||||
return a + b
|
||||
elif op_str == "-" or op_str == "Sub":
|
||||
return a - b
|
||||
elif op_str == "*" or op_str == "Mul":
|
||||
return a * b
|
||||
elif op_str == "/" or op_str == "Div":
|
||||
return a / b
|
||||
elif op_str == "Dot":
|
||||
return np.dot(a, b)
|
||||
elif op_str == "Equal":
|
||||
return np.equal(a, b)
|
||||
elif op_str == "Greater":
|
||||
return np.greater(a, b)
|
||||
elif op_str == "GreaterEq":
|
||||
return np.greater_equal(a, b)
|
||||
elif op_str == "Less":
|
||||
return np.less(a, b)
|
||||
elif op_str == "LessEq":
|
||||
return np.less_equal(a, b)
|
||||
elif op_str == "Maximum":
|
||||
return np.maximum(a, b)
|
||||
elif op_str == "Minimum":
|
||||
return np.minimum(a, b)
|
||||
elif op_str == "NotEqual":
|
||||
return np.not_equal(a, b)
|
||||
elif op_str == "Power":
|
||||
return np.power(a, b)
|
||||
|
||||
|
||||
def binary_op_exec(op_str, expected_ov_str=None):
|
||||
if not expected_ov_str:
|
||||
expected_ov_str = op_str
|
||||
|
||||
@pytest.mark.parametrize(("ov_op", "expected_ov_str", "expected_type"), [
|
||||
(lambda a, b: a + b, "Add", Type.f32),
|
||||
(ov.add, "Add", Type.f32),
|
||||
(lambda a, b: a - b, "Subtract", Type.f32),
|
||||
(ov.subtract, "Subtract", Type.f32),
|
||||
(lambda a, b: a * b, "Multiply", Type.f32),
|
||||
(ov.multiply, "Multiply", Type.f32),
|
||||
(lambda a, b: a / b, "Divide", Type.f32),
|
||||
(ov.divide, "Divide", Type.f32),
|
||||
(ov.maximum, "Maximum", Type.f32),
|
||||
(ov.minimum, "Minimum", Type.f32),
|
||||
(ov.power, "Power", Type.f32),
|
||||
(ov.equal, "Equal", Type.boolean),
|
||||
(ov.greater, "Greater", Type.boolean),
|
||||
(ov.greater_equal, "GreaterEqual", Type.boolean),
|
||||
(ov.less, "Less", Type.boolean),
|
||||
(ov.less_equal, "LessEqual", Type.boolean),
|
||||
(ov.not_equal, "NotEqual", Type.boolean),
|
||||
])
|
||||
def test_binary_op(ov_op, expected_ov_str, expected_type):
|
||||
element_type = Type.f32
|
||||
shape = Shape([2, 2])
|
||||
A = Parameter(element_type, shape)
|
||||
B = Parameter(element_type, shape)
|
||||
node = binary_op(op_str, A, B)
|
||||
node = ov_op(A, B)
|
||||
|
||||
assert node.get_type_name() == expected_ov_str
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [2, 2]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def binary_op_comparison(op_str, expected_ov_str=None):
|
||||
if not expected_ov_str:
|
||||
expected_ov_str = op_str
|
||||
|
||||
element_type = Type.f32
|
||||
shape = Shape([2, 2])
|
||||
A = Parameter(element_type, shape)
|
||||
B = Parameter(element_type, shape)
|
||||
node = binary_op(op_str, A, B)
|
||||
|
||||
assert node.get_type_name() == expected_ov_str
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [2, 2]
|
||||
assert node.get_output_element_type(0) == Type.boolean
|
||||
|
||||
|
||||
def test_add():
|
||||
binary_op_exec("+", "Add")
|
||||
|
||||
|
||||
def test_add_op():
|
||||
binary_op_exec("Add")
|
||||
|
||||
|
||||
def test_sub():
|
||||
binary_op_exec("-", "Subtract")
|
||||
|
||||
|
||||
def test_sub_op():
|
||||
binary_op_exec("Sub", "Subtract")
|
||||
|
||||
|
||||
def test_mul():
|
||||
binary_op_exec("*", "Multiply")
|
||||
|
||||
|
||||
def test_mul_op():
|
||||
binary_op_exec("Mul", "Multiply")
|
||||
|
||||
|
||||
def test_div():
|
||||
binary_op_exec("/", "Divide")
|
||||
|
||||
|
||||
def test_div_op():
|
||||
binary_op_exec("Div", "Divide")
|
||||
|
||||
|
||||
def test_maximum():
|
||||
binary_op_exec("Maximum")
|
||||
|
||||
|
||||
def test_minimum():
|
||||
binary_op_exec("Minimum")
|
||||
|
||||
|
||||
def test_power():
|
||||
binary_op_exec("Power")
|
||||
|
||||
|
||||
def test_greater():
|
||||
binary_op_comparison("Greater")
|
||||
|
||||
|
||||
def test_greater_eq():
|
||||
binary_op_comparison("GreaterEq", "GreaterEqual")
|
||||
|
||||
|
||||
def test_less():
|
||||
binary_op_comparison("Less")
|
||||
|
||||
|
||||
def test_less_eq():
|
||||
binary_op_comparison("LessEq", "LessEqual")
|
||||
|
||||
|
||||
def test_not_equal():
|
||||
binary_op_comparison("NotEqual")
|
||||
assert node.get_output_element_type(0) == expected_type
|
||||
|
||||
|
||||
def test_add_with_mul():
|
||||
@ -191,105 +57,34 @@ def test_add_with_mul():
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def unary_op(op_str, a):
|
||||
if op_str == "Abs":
|
||||
return ov.abs(a)
|
||||
elif op_str == "Acos":
|
||||
return ov.acos(a)
|
||||
elif op_str == "Acosh":
|
||||
return ov.acosh(a)
|
||||
elif op_str == "Asin":
|
||||
return ov.asin(a)
|
||||
elif op_str == "Asinh":
|
||||
return ov.asinh(a)
|
||||
elif op_str == "Atan":
|
||||
return ov.atan(a)
|
||||
elif op_str == "Atanh":
|
||||
return ov.atanh(a)
|
||||
elif op_str == "Ceiling":
|
||||
return ov.ceiling(a)
|
||||
elif op_str == "Cos":
|
||||
return ov.cos(a)
|
||||
elif op_str == "Cosh":
|
||||
return ov.cosh(a)
|
||||
elif op_str == "Floor":
|
||||
return ov.floor(a)
|
||||
elif op_str == "log":
|
||||
return ov.log(a)
|
||||
elif op_str == "exp":
|
||||
return ov.exp(a)
|
||||
elif op_str == "negative":
|
||||
return ov.negative(a)
|
||||
elif op_str == "Sign":
|
||||
return ov.sign(a)
|
||||
elif op_str == "Sin":
|
||||
return ov.sin(a)
|
||||
elif op_str == "Sinh":
|
||||
return ov.sinh(a)
|
||||
elif op_str == "Sqrt":
|
||||
return ov.sqrt(a)
|
||||
elif op_str == "Tan":
|
||||
return ov.tan(a)
|
||||
elif op_str == "Tanh":
|
||||
return ov.tanh(a)
|
||||
|
||||
|
||||
def unary_op_ref(op_str, a):
|
||||
if op_str == "Abs":
|
||||
return np.abs(a)
|
||||
elif op_str == "Acos":
|
||||
return np.arccos(a)
|
||||
elif op_str == "Acosh":
|
||||
return np.arccosh(a)
|
||||
elif op_str == "Asin":
|
||||
return np.arcsin(a)
|
||||
elif op_str == "Asinh":
|
||||
return np.arcsinh(a)
|
||||
elif op_str == "Atan":
|
||||
return np.arctan(a)
|
||||
elif op_str == "Atanh":
|
||||
return np.arctanh(a)
|
||||
elif op_str == "Ceiling":
|
||||
return np.ceil(a)
|
||||
elif op_str == "Cos":
|
||||
return np.cos(a)
|
||||
elif op_str == "Cosh":
|
||||
return np.cosh(a)
|
||||
elif op_str == "Floor":
|
||||
return np.floor(a)
|
||||
elif op_str == "log":
|
||||
return np.log(a)
|
||||
elif op_str == "exp":
|
||||
return np.exp(a)
|
||||
elif op_str == "negative":
|
||||
return np.negative(a)
|
||||
elif op_str == "Reverse":
|
||||
return np.fliplr(a)
|
||||
elif op_str == "Sign":
|
||||
return np.sign(a)
|
||||
elif op_str == "Sin":
|
||||
return np.sin(a)
|
||||
elif op_str == "Sinh":
|
||||
return np.sinh(a)
|
||||
elif op_str == "Sqrt":
|
||||
return np.sqrt(a)
|
||||
elif op_str == "Tan":
|
||||
return np.tan(a)
|
||||
elif op_str == "Tanh":
|
||||
return np.tanh(a)
|
||||
|
||||
|
||||
def unary_op_exec(op_str, input_list, expected_ov_str=None):
|
||||
"""
|
||||
input_list needs to have deep length of 4
|
||||
"""
|
||||
if not expected_ov_str:
|
||||
expected_ov_str = op_str
|
||||
@pytest.mark.parametrize(("ov_op", "expected_ov_str"), [
|
||||
(ov.abs, "Abs"),
|
||||
(ov.acos, "Acos"),
|
||||
(ov.acosh, "Acosh"),
|
||||
(ov.asin, "Asin"),
|
||||
(ov.asinh, "Asinh"),
|
||||
(ov.atan, "Atan"),
|
||||
(ov.atanh, "Atanh"),
|
||||
(ov.ceiling, "Ceiling"),
|
||||
(ov.cos, "Cos"),
|
||||
(ov.cosh, "Cosh"),
|
||||
(ov.floor, "Floor"),
|
||||
(ov.log, "Log"),
|
||||
(ov.exp, "Exp"),
|
||||
(ov.negative, "Negative"),
|
||||
(ov.sign, "Sign"),
|
||||
(ov.sin, "Sin"),
|
||||
(ov.sinh, "Sinh"),
|
||||
(ov.sqrt, "Sqrt"),
|
||||
(ov.tan, "Tan"),
|
||||
(ov.tanh, "Tanh"),
|
||||
])
|
||||
def test_unary_op(ov_op, expected_ov_str):
|
||||
|
||||
element_type = Type.f32
|
||||
shape = Shape(np.array(input_list).shape)
|
||||
shape = Shape([4])
|
||||
A = Parameter(element_type, shape)
|
||||
node = unary_op(op_str, A)
|
||||
node = ov_op(A)
|
||||
|
||||
assert node.get_type_name() == expected_ov_str
|
||||
assert node.get_output_size() == 1
|
||||
@ -297,126 +92,6 @@ def unary_op_exec(op_str, input_list, expected_ov_str=None):
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_abs():
|
||||
input_list = [-1, 0, 1, 2]
|
||||
op_str = "Abs"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_acos():
|
||||
input_list = [-1, 0, 0.5, 1]
|
||||
op_str = "Acos"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_acosh():
|
||||
input_list = [2., 3., 1.5, 1.0]
|
||||
op_str = "Acosh"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_asin():
|
||||
input_list = [-1, 0, 0.5, 1]
|
||||
op_str = "Asin"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_asinh():
|
||||
input_list = [-1, 0, 0.5, 1]
|
||||
op_str = "Asinh"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_atan():
|
||||
input_list = [-1, 0, 0.5, 1]
|
||||
op_str = "Atan"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_atanh():
|
||||
input_list = [-1, 0, 0.5, 1]
|
||||
op_str = "Atanh"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_ceiling():
|
||||
input_list = [0.5, 0, 0.4, 0.5]
|
||||
op_str = "Ceiling"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_cos():
|
||||
input_list = [0, 0.7, 1.7, 3.4]
|
||||
op_str = "Cos"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_cosh():
|
||||
input_list = [-1, 0.0, 0.5, 1]
|
||||
op_str = "Cosh"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_floor():
|
||||
input_list = [-0.5, 0, 0.4, 0.5]
|
||||
op_str = "Floor"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_log():
|
||||
input_list = [1, 2, 3, 4]
|
||||
op_str = "log"
|
||||
unary_op_exec(op_str, input_list, "Log")
|
||||
|
||||
|
||||
def test_exp():
|
||||
input_list = [-1, 0, 1, 2]
|
||||
op_str = "exp"
|
||||
unary_op_exec(op_str, input_list, "Exp")
|
||||
|
||||
|
||||
def test_negative():
|
||||
input_list = [-1, 0, 1, 2]
|
||||
op_str = "negative"
|
||||
unary_op_exec(op_str, input_list, "Negative")
|
||||
|
||||
|
||||
def test_sign():
|
||||
input_list = [-1, 0, 0.5, 1]
|
||||
op_str = "Sign"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_sin():
|
||||
input_list = [0, 0.7, 1.7, 3.4]
|
||||
op_str = "Sin"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_sinh():
|
||||
input_list = [-1, 0.0, 0.5, 1]
|
||||
op_str = "Sinh"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_sqrt():
|
||||
input_list = [0.0, 0.5, 1, 2]
|
||||
op_str = "Sqrt"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_tan():
|
||||
input_list = [-np.pi / 4, 0, np.pi / 8, np.pi / 8]
|
||||
op_str = "Tan"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_tanh():
|
||||
input_list = [-1, 0, 0.5, 1]
|
||||
op_str = "Tanh"
|
||||
unary_op_exec(op_str, input_list)
|
||||
|
||||
|
||||
def test_reshape():
|
||||
element_type = Type.f32
|
||||
shape = Shape([2, 3])
|
||||
@ -439,25 +114,12 @@ def test_broadcast():
|
||||
assert node.get_output_element_type(0) == element_type
|
||||
|
||||
|
||||
def test_constant():
|
||||
element_type = Type.f32
|
||||
node = Constant(element_type, Shape([3, 3]), list(range(9)))
|
||||
assert node.get_type_name() == "Constant"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [3, 3]
|
||||
assert node.get_output_element_type(0) == element_type
|
||||
|
||||
|
||||
def test_constant_opset_ov_type():
|
||||
node = ov.constant(np.arange(9).reshape(3, 3), Type.f32)
|
||||
assert node.get_type_name() == "Constant"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [3, 3]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_constant_opset_numpy_type():
|
||||
node = ov.constant(np.arange(9).reshape(3, 3), np.float32)
|
||||
@pytest.mark.parametrize("node", [
|
||||
Constant(Type.f32, Shape([3, 3]), list(range(9))),
|
||||
ov.constant(np.arange(9).reshape(3, 3), Type.f32),
|
||||
ov.constant(np.arange(9).reshape(3, 3), np.float32)
|
||||
])
|
||||
def test_constant(node):
|
||||
assert node.get_type_name() == "Constant"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [3, 3]
|
||||
|
@ -12,59 +12,54 @@ import openvino.runtime.opset8 as ov
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("graph_api_helper", "numpy_function", "expected_type"),
|
||||
("graph_api_helper", "expected_type"),
|
||||
[
|
||||
(ov.add, np.add, Type.f32),
|
||||
(ov.divide, np.divide, Type.f32),
|
||||
(ov.multiply, np.multiply, Type.f32),
|
||||
(ov.subtract, np.subtract, Type.f32),
|
||||
(ov.minimum, np.minimum, Type.f32),
|
||||
(ov.maximum, np.maximum, Type.f32),
|
||||
(ov.mod, np.mod, Type.f32),
|
||||
(ov.equal, np.equal, Type.boolean),
|
||||
(ov.not_equal, np.not_equal, Type.boolean),
|
||||
(ov.greater, np.greater, Type.boolean),
|
||||
(ov.greater_equal, np.greater_equal, Type.boolean),
|
||||
(ov.less, np.less, Type.boolean),
|
||||
(ov.less_equal, np.less_equal, Type.boolean),
|
||||
(ov.add, Type.f32),
|
||||
(ov.divide, Type.f32),
|
||||
(ov.multiply, Type.f32),
|
||||
(ov.subtract, Type.f32),
|
||||
(ov.minimum, Type.f32),
|
||||
(ov.maximum, Type.f32),
|
||||
(ov.mod, Type.f32),
|
||||
(ov.equal, Type.boolean),
|
||||
(ov.not_equal, Type.boolean),
|
||||
(ov.greater, Type.boolean),
|
||||
(ov.greater_equal, Type.boolean),
|
||||
(ov.less, Type.boolean),
|
||||
(ov.less_equal, Type.boolean),
|
||||
],
|
||||
)
|
||||
def test_binary_op(graph_api_helper, numpy_function, expected_type):
|
||||
def test_binary_op(graph_api_helper, expected_type):
|
||||
shape = [2, 2]
|
||||
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
|
||||
parameter_b = ov.parameter(shape, name="B", dtype=np.float32)
|
||||
|
||||
model = graph_api_helper(parameter_a, parameter_b)
|
||||
|
||||
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
|
||||
|
||||
expected_shape = numpy_function(value_a, value_b).shape
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(model.get_output_shape(0)) == shape
|
||||
assert model.get_output_element_type(0) == expected_type
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("graph_api_helper", "numpy_function", "expected_type"),
|
||||
("graph_api_helper", "expected_type"),
|
||||
[
|
||||
(ov.add, np.add, Type.f32),
|
||||
(ov.divide, np.divide, Type.f32),
|
||||
(ov.multiply, np.multiply, Type.f32),
|
||||
(ov.subtract, np.subtract, Type.f32),
|
||||
(ov.minimum, np.minimum, Type.f32),
|
||||
(ov.maximum, np.maximum, Type.f32),
|
||||
(ov.mod, np.mod, Type.f32),
|
||||
(ov.equal, np.equal, Type.boolean),
|
||||
(ov.not_equal, np.not_equal, Type.boolean),
|
||||
(ov.greater, np.greater, Type.boolean),
|
||||
(ov.greater_equal, np.greater_equal, Type.boolean),
|
||||
(ov.less, np.less, Type.boolean),
|
||||
(ov.less_equal, np.less_equal, Type.boolean),
|
||||
(ov.add, Type.f32),
|
||||
(ov.divide, Type.f32),
|
||||
(ov.multiply, Type.f32),
|
||||
(ov.subtract, Type.f32),
|
||||
(ov.minimum, Type.f32),
|
||||
(ov.maximum, Type.f32),
|
||||
(ov.mod, Type.f32),
|
||||
(ov.equal, Type.boolean),
|
||||
(ov.not_equal, Type.boolean),
|
||||
(ov.greater, Type.boolean),
|
||||
(ov.greater_equal, Type.boolean),
|
||||
(ov.less, Type.boolean),
|
||||
(ov.less_equal, Type.boolean),
|
||||
],
|
||||
)
|
||||
def test_binary_op_with_scalar(graph_api_helper, numpy_function, expected_type):
|
||||
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
def test_binary_op_with_scalar(graph_api_helper, expected_type):
|
||||
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
|
||||
|
||||
shape = [2, 2]
|
||||
@ -72,38 +67,32 @@ def test_binary_op_with_scalar(graph_api_helper, numpy_function, expected_type):
|
||||
|
||||
model = graph_api_helper(parameter_a, value_b)
|
||||
|
||||
expected_shape = numpy_function(value_a, value_b).shape
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(model.get_output_shape(0)) == shape
|
||||
assert model.get_output_element_type(0) == expected_type
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("graph_api_helper", "numpy_function"),
|
||||
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
|
||||
"graph_api_helper",
|
||||
[ov.logical_and, ov.logical_or, ov.logical_xor],
|
||||
)
|
||||
def test_binary_logical_op(graph_api_helper, numpy_function):
|
||||
def test_binary_logical_op(graph_api_helper):
|
||||
shape = [2, 2]
|
||||
parameter_a = ov.parameter(shape, name="A", dtype=bool)
|
||||
parameter_b = ov.parameter(shape, name="B", dtype=bool)
|
||||
|
||||
model = graph_api_helper(parameter_a, parameter_b)
|
||||
|
||||
value_a = np.array([[True, False], [False, True]], dtype=bool)
|
||||
value_b = np.array([[False, True], [False, True]], dtype=bool)
|
||||
|
||||
expected_shape = numpy_function(value_a, value_b).shape
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(model.get_output_shape(0)) == shape
|
||||
assert model.get_output_element_type(0) == Type.boolean
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("graph_api_helper", "numpy_function"),
|
||||
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
|
||||
"graph_api_helper",
|
||||
[ov.logical_and, ov.logical_or, ov.logical_xor],
|
||||
)
|
||||
def test_binary_logical_op_with_scalar(graph_api_helper, numpy_function):
|
||||
value_a = np.array([[True, False], [False, True]], dtype=bool)
|
||||
def test_binary_logical_op_with_scalar(graph_api_helper):
|
||||
value_b = np.array([[False, True], [False, True]], dtype=bool)
|
||||
|
||||
shape = [2, 2]
|
||||
@ -111,29 +100,27 @@ def test_binary_logical_op_with_scalar(graph_api_helper, numpy_function):
|
||||
|
||||
model = graph_api_helper(parameter_a, value_b)
|
||||
|
||||
expected_shape = numpy_function(value_a, value_b).shape
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(model.get_output_shape(0)) == shape
|
||||
assert model.get_output_element_type(0) == Type.boolean
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("operator", "numpy_function", "expected_type"),
|
||||
("operator", "expected_type"),
|
||||
[
|
||||
(operator.add, np.add, Type.f32),
|
||||
(operator.sub, np.subtract, Type.f32),
|
||||
(operator.mul, np.multiply, Type.f32),
|
||||
(operator.truediv, np.divide, Type.f32),
|
||||
(operator.eq, np.equal, Type.boolean),
|
||||
(operator.ne, np.not_equal, Type.boolean),
|
||||
(operator.gt, np.greater, Type.boolean),
|
||||
(operator.ge, np.greater_equal, Type.boolean),
|
||||
(operator.lt, np.less, Type.boolean),
|
||||
(operator.le, np.less_equal, Type.boolean),
|
||||
(operator.add, Type.f32),
|
||||
(operator.sub, Type.f32),
|
||||
(operator.mul, Type.f32),
|
||||
(operator.truediv, Type.f32),
|
||||
(operator.eq, Type.boolean),
|
||||
(operator.ne, Type.boolean),
|
||||
(operator.gt, Type.boolean),
|
||||
(operator.ge, Type.boolean),
|
||||
(operator.lt, Type.boolean),
|
||||
(operator.le, Type.boolean),
|
||||
],
|
||||
)
|
||||
def test_binary_operators(operator, numpy_function, expected_type):
|
||||
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
def test_binary_operators(operator, expected_type):
|
||||
value_b = np.array([[4, 5], [1, 7]], dtype=np.float32)
|
||||
|
||||
shape = [2, 2]
|
||||
@ -141,29 +128,27 @@ def test_binary_operators(operator, numpy_function, expected_type):
|
||||
|
||||
model = operator(parameter_a, value_b)
|
||||
|
||||
expected_shape = numpy_function(value_a, value_b).shape
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(model.get_output_shape(0)) == shape
|
||||
assert model.get_output_element_type(0) == expected_type
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("operator", "numpy_function", "expected_type"),
|
||||
("operator", "expected_type"),
|
||||
[
|
||||
(operator.add, np.add, Type.f32),
|
||||
(operator.sub, np.subtract, Type.f32),
|
||||
(operator.mul, np.multiply, Type.f32),
|
||||
(operator.truediv, np.divide, Type.f32),
|
||||
(operator.eq, np.equal, Type.boolean),
|
||||
(operator.ne, np.not_equal, Type.boolean),
|
||||
(operator.gt, np.greater, Type.boolean),
|
||||
(operator.ge, np.greater_equal, Type.boolean),
|
||||
(operator.lt, np.less, Type.boolean),
|
||||
(operator.le, np.less_equal, Type.boolean),
|
||||
(operator.add, Type.f32),
|
||||
(operator.sub, Type.f32),
|
||||
(operator.mul, Type.f32),
|
||||
(operator.truediv, Type.f32),
|
||||
(operator.eq, Type.boolean),
|
||||
(operator.ne, Type.boolean),
|
||||
(operator.gt, Type.boolean),
|
||||
(operator.ge, Type.boolean),
|
||||
(operator.lt, Type.boolean),
|
||||
(operator.le, Type.boolean),
|
||||
],
|
||||
)
|
||||
def test_binary_operators_with_scalar(operator, numpy_function, expected_type):
|
||||
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
def test_binary_operators_with_scalar(operator, expected_type):
|
||||
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
|
||||
|
||||
shape = [2, 2]
|
||||
@ -171,9 +156,8 @@ def test_binary_operators_with_scalar(operator, numpy_function, expected_type):
|
||||
|
||||
model = operator(parameter_a, value_b)
|
||||
|
||||
expected_shape = numpy_function(value_a, value_b).shape
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(model.get_output_shape(0)) == shape
|
||||
assert model.get_output_element_type(0) == expected_type
|
||||
|
||||
|
||||
@ -181,12 +165,11 @@ def test_multiply():
|
||||
param_a = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1))
|
||||
param_b = np.arange(35, dtype=np.int32).reshape((7, 1, 5))
|
||||
|
||||
expected_shape = np.multiply(param_a, param_b).shape
|
||||
node = ov.multiply(param_a, param_b)
|
||||
|
||||
assert node.get_type_name() == "Multiply"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(node.get_output_shape(0)) == [8, 7, 6, 5]
|
||||
assert node.get_output_element_type(0) == Type.i32
|
||||
|
||||
|
||||
@ -194,10 +177,9 @@ def test_power_v1():
|
||||
param_a = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1))
|
||||
param_b = np.arange(20, dtype=np.float32).reshape((4, 1, 5))
|
||||
|
||||
expected_shape = np.power(param_a, param_b).shape
|
||||
node = ov.power(param_a, param_b)
|
||||
|
||||
assert node.get_type_name() == "Power"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(node.get_output_shape(0)) == [8, 4, 6, 5]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
@ -15,10 +15,9 @@ def test_elu_operator_with_scalar_and_array():
|
||||
|
||||
model = ov.elu(data_value, alpha_value)
|
||||
|
||||
expected_shape = [2, 2]
|
||||
assert model.get_type_name() == "Elu"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
|
||||
|
||||
def test_elu_operator_with_scalar():
|
||||
@ -27,10 +26,9 @@ def test_elu_operator_with_scalar():
|
||||
|
||||
model = ov.elu(parameter_data, alpha_value)
|
||||
|
||||
expected_shape = [2, 2]
|
||||
assert model.get_type_name() == "Elu"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
|
||||
|
||||
def test_fake_quantize():
|
||||
@ -53,10 +51,9 @@ def test_fake_quantize():
|
||||
levels,
|
||||
)
|
||||
|
||||
expected_shape = [1, 2, 3, 4]
|
||||
assert model.get_type_name() == "FakeQuantize"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [1, 2, 3, 4]
|
||||
|
||||
|
||||
def test_depth_to_space():
|
||||
@ -67,10 +64,9 @@ def test_depth_to_space():
|
||||
|
||||
model = ov.depth_to_space(parameter_data, mode, block_size)
|
||||
|
||||
expected_shape = [1, 1, 4, 6]
|
||||
assert model.get_type_name() == "DepthToSpace"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [1, 1, 4, 6]
|
||||
|
||||
|
||||
def test_space_to_batch():
|
||||
@ -82,10 +78,9 @@ def test_space_to_batch():
|
||||
|
||||
model = ov.space_to_batch(parameter_data, block_shape, pads_begin, pads_end)
|
||||
|
||||
expected_shape = [12, 1, 1, 2]
|
||||
assert model.get_type_name() == "SpaceToBatch"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [12, 1, 1, 2]
|
||||
|
||||
|
||||
def test_batch_to_space():
|
||||
@ -97,10 +92,9 @@ def test_batch_to_space():
|
||||
|
||||
model = ov.batch_to_space(parameter_data, block_shape, crops_begin, crops_end)
|
||||
|
||||
expected_shape = [1, 2, 2, 3]
|
||||
assert model.get_type_name() == "BatchToSpace"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [1, 2, 2, 3]
|
||||
|
||||
|
||||
def test_clamp_operator():
|
||||
@ -111,10 +105,9 @@ def test_clamp_operator():
|
||||
|
||||
model = ov.clamp(parameter_data, min_value, max_value)
|
||||
|
||||
expected_shape = [2, 2]
|
||||
assert model.get_type_name() == "Clamp"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
|
||||
|
||||
def test_squeeze_operator():
|
||||
@ -123,10 +116,9 @@ def test_squeeze_operator():
|
||||
axes = [2, 4]
|
||||
model = ov.squeeze(parameter_data, axes)
|
||||
|
||||
expected_shape = [1, 2, 3, 1]
|
||||
assert model.get_type_name() == "Squeeze"
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == expected_shape
|
||||
assert list(model.get_output_shape(0)) == [1, 2, 3, 1]
|
||||
|
||||
|
||||
def test_squared_difference_operator():
|
||||
|
@ -14,14 +14,13 @@ def test_concat():
|
||||
input_a = np.array([[1, 2], [3, 4]]).astype(np.float32)
|
||||
input_b = np.array([[5, 6]]).astype(np.float32)
|
||||
axis = 0
|
||||
expected_shape = np.concatenate((input_a, input_b), axis=0).shape
|
||||
|
||||
parameter_a = ov.parameter(list(input_a.shape), name="A", dtype=np.float32)
|
||||
parameter_b = ov.parameter(list(input_b.shape), name="B", dtype=np.float32)
|
||||
node = ov.concat([parameter_a, parameter_b], axis)
|
||||
assert node.get_type_name() == "Concat"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(node.get_output_shape(0)) == [3, 2]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -50,37 +50,33 @@ def test_unary_op_array(graph_api_fn, type_name):
|
||||
assert list(node.get_output_shape(0)) == [2, 3, 4]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("graph_api_fn", "numpy_fn", "input_data"),
|
||||
[
|
||||
pytest.param(ov.absolute, np.abs, np.float32(-3)),
|
||||
pytest.param(ov.abs, np.abs, np.float32(-3)),
|
||||
pytest.param(ov.acos, np.arccos, np.float32(-0.5)),
|
||||
pytest.param(ov.asin, np.arcsin, np.float32(-0.5)),
|
||||
pytest.param(ov.atan, np.arctan, np.float32(-0.5)),
|
||||
pytest.param(ov.ceiling, np.ceil, np.float32(1.5)),
|
||||
pytest.param(ov.ceil, np.ceil, np.float32(1.5)),
|
||||
pytest.param(ov.cos, np.cos, np.float32(np.pi / 4.0)),
|
||||
pytest.param(ov.cosh, np.cosh, np.float32(np.pi / 4.0)),
|
||||
pytest.param(ov.exp, np.exp, np.float32(1.5)),
|
||||
pytest.param(ov.floor, np.floor, np.float32(1.5)),
|
||||
pytest.param(ov.log, np.log, np.float32(1.5)),
|
||||
pytest.param(ov.relu, lambda x: np.maximum(0, x), np.float32(-0.125)),
|
||||
pytest.param(ov.sign, np.sign, np.float32(0.0)),
|
||||
pytest.param(ov.sin, np.sin, np.float32(np.pi / 4.0)),
|
||||
pytest.param(ov.sinh, np.sinh, np.float32(0.0)),
|
||||
pytest.param(ov.sqrt, np.sqrt, np.float32(3.5)),
|
||||
pytest.param(ov.tan, np.tan, np.float32(np.pi / 4.0)),
|
||||
pytest.param(ov.tanh, np.tanh, np.float32(0.1234)),
|
||||
],
|
||||
)
|
||||
def test_unary_op_scalar(graph_api_fn, numpy_fn, input_data):
|
||||
expected_shape = numpy_fn(input_data).shape
|
||||
node = graph_api_fn(input_data)
|
||||
@pytest.mark.parametrize("graph_api_fn", [
|
||||
ov.absolute,
|
||||
ov.abs,
|
||||
ov.acos,
|
||||
ov.asin,
|
||||
ov.atan,
|
||||
ov.ceiling,
|
||||
ov.ceil,
|
||||
ov.cos,
|
||||
ov.cosh,
|
||||
ov.exp,
|
||||
ov.floor,
|
||||
ov.log,
|
||||
ov.relu,
|
||||
ov.sign,
|
||||
ov.sin,
|
||||
ov.sinh,
|
||||
ov.sqrt,
|
||||
ov.tan,
|
||||
ov.tanh,
|
||||
])
|
||||
def test_unary_op_scalar(graph_api_fn):
|
||||
node = graph_api_fn(np.float32(-0.5))
|
||||
|
||||
assert node.get_output_size() == 1
|
||||
assert node.get_output_element_type(0) == ov_runtime.Type.f32
|
||||
assert list(node.get_output_shape(0)) == list(expected_shape)
|
||||
assert list(node.get_output_shape(0)) == []
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -5,135 +5,68 @@
|
||||
import openvino.runtime.opset9 as ov
|
||||
from openvino.runtime import Shape, Type
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
|
||||
np.random.seed(0)
|
||||
|
||||
|
||||
def test_rdft_1d():
|
||||
input_size = 50
|
||||
shape = [input_size]
|
||||
@pytest.mark.parametrize(("shape", "axes", "expected_shape"), [
|
||||
([50], [0], [26, 2]),
|
||||
([100, 128], [0, 1], [100, 65, 2]),
|
||||
([1, 192, 36, 64], [-2, -1], [1, 192, 36, 33, 2]),
|
||||
])
|
||||
def test_rdft(shape, axes, expected_shape):
|
||||
param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
|
||||
input_axes = ov.constant(np.array([0], dtype=np.int64))
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
|
||||
node = ov.rdft(param, input_axes)
|
||||
assert node.get_type_name() == "RDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [26, 2]
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_irdft_1d():
|
||||
signal_size = 50
|
||||
shape = [signal_size // 2 + 1, 2]
|
||||
@pytest.mark.parametrize(("shape", "axes", "expected_shape"), [
|
||||
([100, 65, 2], [0, 1], [100, 128]),
|
||||
([1, 192, 36, 33, 2], [-2, -1], [1, 192, 36, 64]),
|
||||
])
|
||||
def test_irdft(shape, axes, expected_shape):
|
||||
param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
|
||||
input_axes = ov.constant(np.array([0], dtype=np.int64))
|
||||
node = ov.irdft(param, input_axes, ov.constant(np.array([signal_size], dtype=np.int64)))
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
node = ov.irdft(param, input_axes)
|
||||
assert node.get_type_name() == "IRDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [50]
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_rdft_2d():
|
||||
shape = [100, 128]
|
||||
@pytest.mark.parametrize(("shape", "axes", "expected_shape", "signal_size"), [
|
||||
([26, 2], [0], [50], [50]),
|
||||
([100, 65, 2], [0, 1], [100, 65], [100, 65]),
|
||||
([1, 192, 36, 33, 2], [-2, -1], [1, 192, 36, 64], [36, 64]),
|
||||
])
|
||||
def test_irdft_signal_size(shape, axes, expected_shape, signal_size):
|
||||
param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
|
||||
axes = [0, 1]
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
node = ov.rdft(param, input_axes)
|
||||
assert node.get_type_name() == "RDFT"
|
||||
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
|
||||
node = ov.irdft(param, input_axes, signal_size_node)
|
||||
assert node.get_type_name() == "IRDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [100, 65, 2]
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_rdft_2d_signal_size():
|
||||
shape = [100, 128]
|
||||
@pytest.mark.parametrize(("shape", "axes", "expected_shape", "signal_size"), [
|
||||
([100, 128], [0, 1], [30, 21, 2], [30, 40]),
|
||||
([1, 192, 36, 64], [-2, -1], [1, 192, 36, 33, 2], [36, 64]),
|
||||
])
|
||||
def test_rdft_signal_size(shape, axes, expected_shape, signal_size):
|
||||
param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
|
||||
axes = [0, 1]
|
||||
signal_size = [30, 40]
|
||||
axes_node = ov.constant(np.array(axes, dtype=np.int64))
|
||||
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
|
||||
node = ov.rdft(param, axes_node, signal_size_node)
|
||||
assert node.get_type_name() == "RDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [30, 21, 2]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_irdft_2d():
|
||||
axes = [0, 1]
|
||||
input_shape = [100, 65, 2]
|
||||
param = ov.parameter(Shape(input_shape), name="input", dtype=np.float32)
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
node = ov.irdft(param, input_axes)
|
||||
assert node.get_type_name() == "IRDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [100, 128]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_irdft_2d_signal_size():
|
||||
axes = [0, 1]
|
||||
input_shape = [100, 65, 2]
|
||||
signal_size = [100, 65]
|
||||
param = ov.parameter(Shape(input_shape), name="input", dtype=np.float32)
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
|
||||
node = ov.irdft(param, input_axes, signal_size_node)
|
||||
assert node.get_type_name() == "IRDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [100, 65]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_rdft_4d():
|
||||
shape = [1, 192, 36, 64]
|
||||
param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
|
||||
axes = [-2, -1]
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
node = ov.rdft(param, input_axes)
|
||||
assert node.get_type_name() == "RDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [1, 192, 36, 33, 2]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_rdft_4d_signal_size():
|
||||
shape = [1, 192, 36, 64]
|
||||
signal_size = [36, 64]
|
||||
param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
|
||||
axes = [-2, -1]
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
|
||||
node = ov.rdft(param, input_axes, signal_size_node)
|
||||
assert node.get_type_name() == "RDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [1, 192, 36, 33, 2]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_irdft_4d():
|
||||
shape = [1, 192, 36, 33, 2]
|
||||
param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
|
||||
axes = [-2, -1]
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
node = ov.irdft(param, input_axes)
|
||||
assert node.get_type_name() == "IRDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [1, 192, 36, 64]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_irdft_4d_signal_size():
|
||||
shape = [1, 192, 36, 33, 2]
|
||||
signal_size = [36, 64]
|
||||
param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
|
||||
axes = [-2, -1]
|
||||
input_axes = ov.constant(np.array(axes, dtype=np.int64))
|
||||
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
|
||||
node = ov.irdft(param, input_axes, signal_size_node)
|
||||
assert node.get_type_name() == "IRDFT"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [1, 192, 36, 64]
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
@ -103,7 +103,6 @@ def test_roi_align():
|
||||
data_shape = [7, 256, 200, 200]
|
||||
rois = [1000, 4]
|
||||
batch_indices = [1000]
|
||||
expected_shape = [1000, 256, 6, 6]
|
||||
|
||||
data_parameter = ov.parameter(data_shape, name="Data", dtype=np.float32)
|
||||
rois_parameter = ov.parameter(rois, name="Rois", dtype=np.float32)
|
||||
@ -127,7 +126,7 @@ def test_roi_align():
|
||||
|
||||
assert node.get_type_name() == "ROIAlign"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
assert list(node.get_output_shape(0)) == [1000, 256, 6, 6]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -3,29 +3,21 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
import openvino.runtime.opset8 as ov
|
||||
|
||||
|
||||
def test_onehot():
|
||||
@pytest.mark.parametrize(("depth", "on_value", "off_value", "axis", "expected_shape"), [
|
||||
(2, 5, 10, -1, [3, 2]),
|
||||
(3, 1, 0, 0, [3, 3]),
|
||||
])
|
||||
def test_one_hot(depth, on_value, off_value, axis, expected_shape):
|
||||
param = ov.parameter([3], dtype=np.int32)
|
||||
model = ov.one_hot(param, 3, 1, 0, 0)
|
||||
assert model.get_output_size() == 1
|
||||
assert model.get_type_name() == "OneHot"
|
||||
assert list(model.get_output_shape(0)) == [3, 3]
|
||||
|
||||
|
||||
def test_one_hot():
|
||||
data = np.array([0, 1, 2], dtype=np.int32)
|
||||
depth = 2
|
||||
on_value = 5
|
||||
off_value = 10
|
||||
axis = -1
|
||||
|
||||
node = ov.one_hot(data, depth, on_value, off_value, axis)
|
||||
node = ov.one_hot(param, depth, on_value, off_value, axis)
|
||||
assert node.get_output_size() == 1
|
||||
assert node.get_type_name() == "OneHot"
|
||||
assert list(node.get_output_shape(0)) == [3, 2]
|
||||
assert list(node.get_output_shape(0)) == expected_shape
|
||||
|
||||
|
||||
def test_range():
|
||||
|
@ -3,27 +3,18 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import openvino.runtime.opset8 as ov
|
||||
from openvino.runtime import Shape, Type
|
||||
|
||||
|
||||
def test_swish_props_with_beta():
|
||||
float_dtype = np.float32
|
||||
data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data")
|
||||
beta = ov.parameter(Shape([]), dtype=float_dtype, name="beta")
|
||||
@pytest.mark.parametrize(("beta"), [
|
||||
[],
|
||||
[ov.parameter(Shape([]), dtype=np.float32, name="beta")]])
|
||||
def test_swish(beta):
|
||||
data = ov.parameter(Shape([3, 10]), dtype=np.float32, name="data")
|
||||
|
||||
node = ov.swish(data, beta)
|
||||
assert node.get_type_name() == "Swish"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [3, 10]
|
||||
assert node.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_swish_props_without_beta():
|
||||
float_dtype = np.float32
|
||||
data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data")
|
||||
|
||||
node = ov.swish(data)
|
||||
node = ov.swish(data, *beta)
|
||||
assert node.get_type_name() == "Swish"
|
||||
assert node.get_output_size() == 1
|
||||
assert list(node.get_output_shape(0)) == [3, 10]
|
||||
|
Loading…
Reference in New Issue
Block a user