[PyOV] Make graph tests hardware agnostic - part 2 (#14519)

This commit is contained in:
Przemyslaw Wysocki 2022-12-21 10:15:53 +01:00 committed by GitHub
parent 2d5fa2d164
commit 271681a07f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 945 additions and 1692 deletions

View File

@ -173,7 +173,7 @@ def acosh(node: NodeInput, name: Optional[str] = None) -> Node:
:param name: Optional new name for output node. :param name: Optional new name for output node.
:return: New node with arccosh operation applied on it. :return: New node with arccosh operation applied on it.
""" """
return _get_node_factory_opset4().create("Acosh", [node]) return _get_node_factory_opset4().create("Acosh", as_nodes(node))
@nameable_op @nameable_op
@ -184,7 +184,7 @@ def asinh(node: NodeInput, name: Optional[str] = None) -> Node:
:param name: Optional new name for output node. :param name: Optional new name for output node.
:return: New node with arcsinh operation applied on it. :return: New node with arcsinh operation applied on it.
""" """
return _get_node_factory_opset4().create("Asinh", [node]) return _get_node_factory_opset4().create("Asinh", as_nodes(node))
@nameable_op @nameable_op
@ -195,7 +195,7 @@ def atanh(node: NodeInput, name: Optional[str] = None) -> Node:
:param name: Optional new name for output node. :param name: Optional new name for output node.
:return: New node with arctanh operation applied on it. :return: New node with arctanh operation applied on it.
""" """
return _get_node_factory_opset4().create("Atanh", [node]) return _get_node_factory_opset4().create("Atanh", as_nodes(node))
@nameable_op @nameable_op

View File

@ -50,8 +50,6 @@ xfail_issue_90649 = xfail_test(reason="RuntimeError: OV does not support the fol
"MelWeightMatrix, SequenceMap, STFT") "MelWeightMatrix, SequenceMap, STFT")
xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported") xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported")
xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable") xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable")
xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted "
"to HardSigmoid_IE")
xfail_issue_38091 = xfail_test(reason="AssertionError: Mismatched elements") xfail_issue_38091 = xfail_test(reason="AssertionError: Mismatched elements")
xfail_issue_38699 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: " xfail_issue_38699 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
"ai.onnx.preview.training.Gradient") "ai.onnx.preview.training.Gradient")

View File

@ -12,7 +12,7 @@ import numpy as np
from openvino.runtime import Core from openvino.runtime import Core
from openvino.runtime.exceptions import UserInputError from openvino.runtime.exceptions import UserInputError
from openvino.runtime import Model, Node, PartialShape, Tensor, Type from openvino.runtime import Model, Node, Tensor, Type
from openvino.runtime.utils.types import NumericData, get_shape, get_dtype from openvino.runtime.utils.types import NumericData, get_shape, get_dtype
import tests import tests

View File

@ -2,7 +2,6 @@
# Copyright (C) 2018-2022 Intel Corporation # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import json
import numpy as np import numpy as np
import pytest import pytest
@ -15,9 +14,8 @@ from openvino.runtime import Strides, AxisVector, Coordinate, CoordinateDiff
from openvino.runtime import Tensor, OVAny from openvino.runtime import Tensor, OVAny
from openvino._pyopenvino import DescriptorTensor from openvino._pyopenvino import DescriptorTensor
from openvino.runtime.op import Parameter from openvino.runtime.op import Parameter
from tests.runtime import get_runtime
from openvino.runtime.utils.types import get_dtype from openvino.runtime.utils.types import get_element_type
from tests.test_graph.util import run_op_node
def test_graph_function_api(): def test_graph_function_api():
@ -87,76 +85,49 @@ def test_graph_function_api():
Type.u64, Type.u64,
], ],
) )
def test_simple_computation_on_ndarrays(dtype): def test_simple_model_on_parameters(dtype):
runtime = get_runtime()
shape = [2, 2] shape = [2, 2]
parameter_a = ops.parameter(shape, dtype=dtype, name="A") parameter_a = ops.parameter(shape, dtype=dtype, name="A")
parameter_b = ops.parameter(shape, dtype=dtype, name="B") parameter_b = ops.parameter(shape, dtype=dtype, name="B")
parameter_c = ops.parameter(shape, dtype=dtype, name="C") parameter_c = ops.parameter(shape, dtype=dtype, name="C")
model = (parameter_a + parameter_b) * parameter_c model = (parameter_a + parameter_b) * parameter_c
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c) expected_type = dtype if isinstance(dtype, Type) else get_element_type(dtype)
assert model.get_type_name() == "Multiply"
np_dtype = get_dtype(dtype) if isinstance(dtype, Type) else dtype assert model.get_output_size() == 1
assert model.get_output_element_type(0) == expected_type
value_a = np.array([[1, 2], [3, 4]], dtype=np_dtype) assert list(model.get_output_shape(0)) == [2, 2]
value_b = np.array([[5, 6], [7, 8]], dtype=np_dtype)
value_c = np.array([[2, 3], [4, 5]], dtype=np_dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[12, 24], [40, 60]], dtype=np_dtype))
value_a = np.array([[9, 10], [11, 12]], dtype=np_dtype)
value_b = np.array([[13, 14], [15, 16]], dtype=np_dtype)
value_c = np.array([[5, 4], [3, 2]], dtype=np_dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[110, 96], [78, 56]], dtype=np_dtype))
def test_serialization():
dtype = np.float32
shape = [2, 2]
parameter_a = ops.parameter(shape, dtype=dtype, name="A")
parameter_b = ops.parameter(shape, dtype=dtype, name="B")
parameter_c = ops.parameter(shape, dtype=dtype, name="C")
model = (parameter_a + parameter_b) * parameter_c
runtime = get_runtime()
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
try:
serialized = computation.serialize(2)
serial_json = json.loads(serialized)
assert serial_json[0]["name"] != ""
assert 10 == len(serial_json[0]["ops"])
except Exception:
pass
def test_broadcast_1(): def test_broadcast_1():
input_data = np.array([1, 2, 3], dtype=np.int32) input_data = ops.parameter((3,), name="input_data", dtype=np.int32)
new_shape = [3, 3] new_shape = [3, 3]
expected = [[1, 2, 3], [1, 2, 3], [1, 2, 3]] node = ops.broadcast(input_data, new_shape)
result = run_op_node([input_data], ops.broadcast, new_shape) assert node.get_type_name() == "Broadcast"
assert np.allclose(result, expected) assert node.get_output_size() == 1
assert node.get_output_element_type(0) == Type.i32
assert list(node.get_output_shape(0)) == [3, 3]
def test_broadcast_2(): def test_broadcast_2():
input_data = np.arange(4, dtype=np.int32) input_data = ops.parameter((4,), name="input_data", dtype=np.int32)
new_shape = [3, 4, 2, 4] new_shape = [3, 4, 2, 4]
expected = np.broadcast_to(input_data, new_shape) expected_shape = np.broadcast_to(input_data, new_shape).shape
result = run_op_node([input_data], ops.broadcast, new_shape) node = ops.broadcast(input_data, new_shape)
assert np.allclose(result, expected) assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
assert node.get_output_element_type(0) == Type.i32
assert list(node.get_output_shape(0)) == list(expected_shape)
def test_broadcast_3(): def test_broadcast_3():
input_data = np.array([1, 2, 3], dtype=np.int32) input_data = ops.parameter((3,), name="input_data", dtype=np.int32)
new_shape = [3, 3] new_shape = [3, 3]
axis_mapping = [0] axis_mapping = [0]
expected = [[1, 1, 1], [2, 2, 2], [3, 3, 3]] node = ops.broadcast(input_data, new_shape, axis_mapping, "EXPLICIT")
assert node.get_type_name() == "Broadcast"
result = run_op_node([input_data], ops.broadcast, new_shape, axis_mapping, "EXPLICIT") assert node.get_output_size() == 1
assert np.allclose(result, expected) assert node.get_output_element_type(0) == Type.i32
assert list(node.get_output_shape(0)) == [3, 3]
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -164,10 +135,11 @@ def test_broadcast_3():
[(bool, np.zeros((2, 2), dtype=np.int32)), ("boolean", np.zeros((2, 2), dtype=np.int32))], [(bool, np.zeros((2, 2), dtype=np.int32)), ("boolean", np.zeros((2, 2), dtype=np.int32))],
) )
def test_convert_to_bool(destination_type, input_data): def test_convert_to_bool(destination_type, input_data):
expected = np.array(input_data, dtype=bool) node = ops.convert(input_data, destination_type)
result = run_op_node([input_data], ops.convert, destination_type) assert node.get_type_name() == "Convert"
assert np.allclose(result, expected) assert node.get_output_size() == 1
assert np.array(result).dtype == bool assert node.get_output_element_type(0) == Type.boolean
assert list(node.get_output_shape(0)) == [2, 2]
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -182,10 +154,11 @@ def test_convert_to_bool(destination_type, input_data):
def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type): def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type):
np.random.seed(133391) np.random.seed(133391)
input_data = np.random.randint(*rand_range, size=(2, 2), dtype=in_dtype) input_data = np.random.randint(*rand_range, size=(2, 2), dtype=in_dtype)
expected = np.array(input_data, dtype=expected_type) node = ops.convert(input_data, destination_type)
result = run_op_node([input_data], ops.convert, destination_type) assert node.get_type_name() == "Convert"
assert np.allclose(result, expected) assert node.get_output_size() == 1
assert np.array(result).dtype == expected_type assert node.get_output_element_type(0) == get_element_type(expected_type)
assert list(node.get_output_shape(0)) == [2, 2]
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -205,10 +178,11 @@ def test_convert_to_int(destination_type, expected_type):
np.random.seed(133391) np.random.seed(133391)
random_data = np.random.rand(2, 3, 4) * 16 random_data = np.random.rand(2, 3, 4) * 16
input_data = (np.ceil(-8 + random_data)).astype(expected_type) input_data = (np.ceil(-8 + random_data)).astype(expected_type)
expected = np.array(input_data, dtype=expected_type) node = ops.convert(input_data, destination_type)
result = run_op_node([input_data], ops.convert, destination_type) assert node.get_type_name() == "Convert"
assert np.allclose(result, expected) assert node.get_output_size() == 1
assert np.array(result).dtype == expected_type assert node.get_output_element_type(0) == get_element_type(expected_type)
assert list(node.get_output_shape(0)) == [2, 3, 4]
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -227,23 +201,11 @@ def test_convert_to_int(destination_type, expected_type):
def test_convert_to_uint(destination_type, expected_type): def test_convert_to_uint(destination_type, expected_type):
np.random.seed(133391) np.random.seed(133391)
input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(expected_type) input_data = np.ceil(np.random.rand(2, 3, 4) * 16).astype(expected_type)
expected = np.array(input_data, dtype=expected_type) node = ops.convert(input_data, destination_type)
result = run_op_node([input_data], ops.convert, destination_type) assert node.get_type_name() == "Convert"
assert np.allclose(result, expected) assert node.get_output_size() == 1
assert np.array(result).dtype == expected_type assert node.get_output_element_type(0) == get_element_type(expected_type)
assert list(node.get_output_shape(0)) == [2, 3, 4]
def test_bad_data_shape():
param_a = ops.parameter(shape=[2, 2], name="A", dtype=np.float32)
param_b = ops.parameter(shape=[2, 2], name="B")
model = param_a + param_b
runtime = get_runtime()
computation = runtime.computation(model, param_a, param_b)
value_a = np.array([[1, 2]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
with pytest.raises(RuntimeError):
computation(value_a, value_b)
def test_constant_get_data_bool(): def test_constant_get_data_bool():
@ -289,41 +251,41 @@ def test_constant_get_data_unsigned_integer(data_type):
def test_set_argument(): def test_set_argument():
runtime = get_runtime()
data1 = np.array([1, 2, 3]) data1 = np.array([1, 2, 3])
data2 = np.array([4, 5, 6]) data2 = np.array([4, 5, 6])
data3 = np.array([7, 8, 9]) data3 = np.array([7, 8, 9])
node1 = ops.constant(data1, dtype=np.float32) node1 = ops.constant(data1, dtype=np.float32)
node2 = ops.constant(data2, dtype=np.float32) node2 = ops.constant(data2, dtype=np.float32)
node3 = ops.constant(data3, dtype=np.float32) node3 = ops.constant(data3, dtype=np.float64)
node4 = ops.constant(data3, dtype=np.float64)
node_add = ops.add(node1, node2) node_add = ops.add(node1, node2)
# Original arguments # Original arguments
computation = runtime.computation(node_add) node_inputs = node_add.inputs()
output = computation() assert node_inputs[0].get_element_type() == Type.f32
assert np.allclose(data1 + data2, output) assert node_inputs[1].get_element_type() == Type.f32
assert len(node_inputs) == 2
# Arguments changed by set_argument
node_add.set_argument(1, node3.output(0))
output = computation()
assert np.allclose(data1 + data3, output)
# Arguments changed by set_argument # Arguments changed by set_argument
node_add.set_argument(0, node3.output(0)) node_add.set_argument(0, node3.output(0))
output = computation() node_add.set_argument(1, node4.output(0))
assert np.allclose(data3 + data3, output) node_inputs = node_add.inputs()
assert node_inputs[0].get_element_type() == Type.f64
assert node_inputs[1].get_element_type() == Type.f64
assert len(node_inputs) == 2
# Arguments changed by set_argument(OutputVector) # Arguments changed by set_argument(OutputVector)
node_add.set_arguments([node2.output(0), node3.output(0)]) node_add.set_arguments([node1.output(0), node2.output(0)])
output = computation() assert node_inputs[0].get_element_type() == Type.f32
assert np.allclose(data2 + data3, output) assert node_inputs[1].get_element_type() == Type.f32
assert len(node_inputs) == 2
# Arguments changed by set_arguments(NodeVector) # Arguments changed by set_arguments(NodeVector)
node_add.set_arguments([node1, node2]) node_add.set_arguments([node3, node4])
output = computation() assert node_inputs[0].get_element_type() == Type.f64
assert np.allclose(data1 + data2, output) assert node_inputs[1].get_element_type() == Type.f64
assert len(node_inputs) == 2
def test_clone_model(): def test_clone_model():
@ -351,9 +313,12 @@ def test_clone_model():
def test_result(): def test_result():
node = np.array([[11, 10], [1, 8], [3, 4]], dtype=np.float32) input_data = np.array([[11, 10], [1, 8], [3, 4]], dtype=np.float32)
result = run_op_node([node], ops.result) node = ops.result(input_data)
assert np.allclose(result, node) assert node.get_type_name() == "Result"
assert node.get_output_size() == 1
assert node.get_output_element_type(0) == Type.f32
assert list(node.get_output_shape(0)) == [3, 2]
def test_node_friendly_name(): def test_node_friendly_name():
@ -566,11 +531,10 @@ def test_multiple_outputs():
split_first_output = split.output(0) split_first_output = split.output(0)
relu = ops.relu(split_first_output) relu = ops.relu(split_first_output)
runtime = get_runtime() assert relu.get_type_name() == "Relu"
computation = runtime.computation(relu, test_param) assert relu.get_output_size() == 1
output = computation(input_data) assert relu.get_output_element_type(0) == Type.f32
assert list(relu.get_output_shape(0)) == [4, 2]
assert np.equal(output, expected_output).all()
def test_sink_function_ctor(): def test_sink_function_ctor():

View File

@ -3,11 +3,19 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import numpy as np import numpy as np
import pytest
from openvino.runtime import Type
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
def test_convolution_2d(): @pytest.mark.parametrize(("strides", "pads_begin", "pads_end", "dilations", "expected_shape"), [
(np.array([1, 1]), np.array([1, 1]), np.array([1, 1]), np.array([1, 1]), [1, 1, 9, 9]),
(np.array([1, 1]), np.array([0, 0]), np.array([0, 0]), np.array([1, 1]), [1, 1, 7, 7]),
(np.array([2, 2]), np.array([0, 0]), np.array([0, 0]), np.array([1, 1]), [1, 1, 4, 4]),
(np.array([1, 1]), np.array([0, 0]), np.array([0, 0]), np.array([2, 2]), [1, 1, 5, 5]),
])
def test_convolution_2d(strides, pads_begin, pads_end, dilations, expected_shape):
# input_x should have shape N(batch) x C x H x W # input_x should have shape N(batch) x C x H x W
input_x = ov.parameter((1, 1, 9, 9), name="input_x", dtype=np.float32) input_x = ov.parameter((1, 1, 9, 9), name="input_x", dtype=np.float32)
@ -15,53 +23,11 @@ def test_convolution_2d():
# filter weights should have shape M x C x kH x kW # filter weights should have shape M x C x kH x kW
input_filter = ov.parameter((1, 1, 3, 3), name="input_filter", dtype=np.float32) input_filter = ov.parameter((1, 1, 3, 3), name="input_filter", dtype=np.float32)
strides = np.array([1, 1])
pads_begin = np.array([1, 1])
pads_end = np.array([1, 1])
dilations = np.array([1, 1])
expected_shape = [1, 1, 9, 9]
# convolution with padding=1 should produce 9 x 9 output:
node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
# convolution with padding=0 should produce 7 x 7 output:
strides = np.array([1, 1])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([1, 1])
expected_shape = [1, 1, 7, 7]
node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
strides = np.array([2, 2])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([1, 1])
expected_shape = [1, 1, 4, 4]
# convolution with strides=2 should produce 4 x 4 output:
node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
strides = np.array([1, 1])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([2, 2])
expected_shape = [1, 1, 5, 5]
# convolution with dilation=2 should produce 5 x 5 output:
node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations) node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution" assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32
def test_convolution_backprop_data(): def test_convolution_backprop_data():
@ -80,6 +46,7 @@ def test_convolution_backprop_data():
assert deconvolution.get_type_name() == "ConvolutionBackpropData" assert deconvolution.get_type_name() == "ConvolutionBackpropData"
assert deconvolution.get_output_size() == 1 assert deconvolution.get_output_size() == 1
assert list(deconvolution.get_output_shape(0)) == expected_shape assert list(deconvolution.get_output_shape(0)) == expected_shape
assert deconvolution.get_output_element_type(0) == Type.f32
def test_convolution_v1(): def test_convolution_v1():
@ -95,3 +62,4 @@ def test_convolution_v1():
assert node.get_type_name() == "Convolution" assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32

View File

@ -2203,7 +2203,7 @@ def test_interpolate_opset10(dtype, expected_shape, shape_calculation_mode):
def test_is_finite_opset10(): def test_is_finite_opset10():
input_shape = [1, 2, 3, 4] input_shape = [1, 2, 3, 4]
input_node = ov.parameter(input_shape, float, name="InputData") input_node = ov.parameter(input_shape, np.float32, name="InputData")
node = ov_opset10.is_finite(input_node) node = ov_opset10.is_finite(input_node)
assert node.get_type_name() == "IsFinite" assert node.get_type_name() == "IsFinite"
@ -2214,7 +2214,7 @@ def test_is_finite_opset10():
def test_is_inf_opset10_default(): def test_is_inf_opset10_default():
input_shape = [2, 2, 2, 2] input_shape = [2, 2, 2, 2]
input_node = ov.parameter(input_shape, dtype=float, name="InputData") input_node = ov.parameter(input_shape, dtype=np.float32, name="InputData")
node = ov_opset10.is_inf(input_node) node = ov_opset10.is_inf(input_node)
assert node.get_type_name() == "IsInf" assert node.get_type_name() == "IsInf"
@ -2228,7 +2228,7 @@ def test_is_inf_opset10_default():
def test_is_inf_opset10_custom_attribute(): def test_is_inf_opset10_custom_attribute():
input_shape = [2, 2, 2] input_shape = [2, 2, 2]
input_node = ov.parameter(input_shape, dtype=float, name="InputData") input_node = ov.parameter(input_shape, dtype=np.float32, name="InputData")
attributes = { attributes = {
"detect_positive": False, "detect_positive": False,
} }
@ -2245,7 +2245,7 @@ def test_is_inf_opset10_custom_attribute():
def test_is_inf_opset10_custom_all_attributes(): def test_is_inf_opset10_custom_all_attributes():
input_shape = [2, 2, 2] input_shape = [2, 2, 2]
input_node = ov.parameter(input_shape, dtype=float, name="InputData") input_node = ov.parameter(input_shape, dtype=np.float32, name="InputData")
attributes = { attributes = {
"detect_negative": False, "detect_negative": False,
"detect_positive": True, "detect_positive": True,
@ -2263,7 +2263,7 @@ def test_is_inf_opset10_custom_all_attributes():
def test_is_nan_opset10(): def test_is_nan_opset10():
input_shape = [1, 2, 3, 4] input_shape = [1, 2, 3, 4]
input_node = ov.parameter(input_shape, float, name="InputData") input_node = ov.parameter(input_shape, np.float32, name="InputData")
node = ov_opset10.is_nan(input_node) node = ov_opset10.is_nan(input_node)
assert node.get_type_name() == "IsNaN" assert node.get_type_name() == "IsNaN"
@ -2274,7 +2274,7 @@ def test_is_nan_opset10():
def test_unique_opset10(): def test_unique_opset10():
input_shape = [1, 2, 3, 4] input_shape = [1, 2, 3, 4]
input_node = ov.parameter(input_shape, float, name="input_data") input_node = ov.parameter(input_shape, np.float32, name="input_data")
axis = ov.constant([1], np.int32, [1]) axis = ov.constant([1], np.int32, [1])
node = ov_opset10.unique(input_node, axis, False, "i32") node = ov_opset10.unique(input_node, axis, False, "i32")

View File

@ -22,6 +22,7 @@ def test_reverse_sequence():
assert model.get_type_name() == "ReverseSequence" assert model.get_type_name() == "ReverseSequence"
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape assert list(model.get_output_shape(0)) == expected_shape
assert model.get_output_element_type(0) == Type.i32
def test_pad_edge(): def test_pad_edge():
@ -35,6 +36,7 @@ def test_pad_edge():
assert model.get_type_name() == "Pad" assert model.get_type_name() == "Pad"
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape assert list(model.get_output_shape(0)) == expected_shape
assert model.get_output_element_type(0) == Type.i32
def test_pad_constant(): def test_pad_constant():
@ -48,6 +50,7 @@ def test_pad_constant():
assert model.get_type_name() == "Pad" assert model.get_type_name() == "Pad"
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape assert list(model.get_output_shape(0)) == expected_shape
assert model.get_output_element_type(0) == Type.i32
def test_select(): def test_select():
@ -60,6 +63,7 @@ def test_select():
assert node.get_type_name() == "Select" assert node.get_type_name() == "Select"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.i32
def test_gather_v8_nd(): def test_gather_v8_nd():

View File

@ -2,6 +2,7 @@
# Copyright (C) 2018-2022 Intel Corporation # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
from openvino.runtime import Type
import openvino.runtime.opset9 as ov import openvino.runtime.opset9 as ov
import numpy as np import numpy as np
@ -23,6 +24,7 @@ def test_dft_1d():
assert dft_node.get_type_name() == "DFT" assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape assert list(dft_node.get_output_shape(0)) == expected_shape
assert dft_node.get_output_element_type(0) == Type.f32
def test_dft_2d(): def test_dft_2d():
@ -37,6 +39,7 @@ def test_dft_2d():
assert dft_node.get_type_name() == "DFT" assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape assert list(dft_node.get_output_shape(0)) == expected_shape
assert dft_node.get_output_element_type(0) == Type.f32
def test_dft_3d(): def test_dft_3d():
@ -51,6 +54,7 @@ def test_dft_3d():
assert dft_node.get_type_name() == "DFT" assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape assert list(dft_node.get_output_shape(0)) == expected_shape
assert dft_node.get_output_element_type(0) == Type.f32
def test_dft_1d_signal_size(): def test_dft_1d_signal_size():
@ -66,6 +70,7 @@ def test_dft_1d_signal_size():
assert dft_node.get_type_name() == "DFT" assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape assert list(dft_node.get_output_shape(0)) == expected_shape
assert dft_node.get_output_element_type(0) == Type.f32
def test_dft_2d_signal_size_1(): def test_dft_2d_signal_size_1():
@ -81,6 +86,7 @@ def test_dft_2d_signal_size_1():
assert dft_node.get_type_name() == "DFT" assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape assert list(dft_node.get_output_shape(0)) == expected_shape
assert dft_node.get_output_element_type(0) == Type.f32
def test_dft_2d_signal_size_2(): def test_dft_2d_signal_size_2():
@ -96,6 +102,7 @@ def test_dft_2d_signal_size_2():
assert dft_node.get_type_name() == "DFT" assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape assert list(dft_node.get_output_shape(0)) == expected_shape
assert dft_node.get_output_element_type(0) == Type.f32
def test_dft_3d_signal_size(): def test_dft_3d_signal_size():
@ -111,3 +118,4 @@ def test_dft_3d_signal_size():
assert dft_node.get_type_name() == "DFT" assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape assert list(dft_node.get_output_shape(0)) == expected_shape
assert dft_node.get_output_element_type(0) == Type.f32

View File

@ -2,6 +2,7 @@
# Copyright (C) 2018-2022 Intel Corporation # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
from openvino.runtime import Type
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
import numpy as np import numpy as np
@ -16,6 +17,7 @@ def test_gather():
assert node.get_type_name() == "Gather" assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32
def test_gather_with_scalar_axis(): def test_gather_with_scalar_axis():
@ -28,6 +30,7 @@ def test_gather_with_scalar_axis():
assert node.get_type_name() == "Gather" assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32
def test_gather_batch_dims_1(): def test_gather_batch_dims_1():
@ -41,6 +44,7 @@ def test_gather_batch_dims_1():
assert node.get_type_name() == "Gather" assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32
def test_gather_negative_indices(): def test_gather_negative_indices():
@ -53,6 +57,7 @@ def test_gather_negative_indices():
assert node.get_type_name() == "Gather" assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32
def test_gather_batch_dims_1_negative_indices(): def test_gather_batch_dims_1_negative_indices():
@ -66,3 +71,4 @@ def test_gather_batch_dims_1_negative_indices():
assert node.get_type_name() == "Gather" assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32

View File

@ -2,6 +2,7 @@
# Copyright (C) 2018-2022 Intel Corporation # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
from openvino.runtime import Type
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
import numpy as np import numpy as np
@ -23,6 +24,7 @@ def test_idft_1d():
assert dft_node.get_type_name() == "IDFT" assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
assert dft_node.get_output_element_type(0) == Type.f32
def test_idft_2d(): def test_idft_2d():
@ -37,6 +39,7 @@ def test_idft_2d():
assert dft_node.get_type_name() == "IDFT" assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
assert dft_node.get_output_element_type(0) == Type.f32
def test_idft_3d(): def test_idft_3d():
@ -51,6 +54,7 @@ def test_idft_3d():
assert dft_node.get_type_name() == "IDFT" assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
assert dft_node.get_output_element_type(0) == Type.f32
def test_idft_1d_signal_size(): def test_idft_1d_signal_size():
@ -66,6 +70,7 @@ def test_idft_1d_signal_size():
assert dft_node.get_type_name() == "IDFT" assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
assert dft_node.get_output_element_type(0) == Type.f32
def test_idft_2d_signal_size_1(): def test_idft_2d_signal_size_1():
@ -81,6 +86,7 @@ def test_idft_2d_signal_size_1():
assert dft_node.get_type_name() == "IDFT" assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
assert dft_node.get_output_element_type(0) == Type.f32
def test_idft_2d_signal_size_2(): def test_idft_2d_signal_size_2():
@ -96,6 +102,7 @@ def test_idft_2d_signal_size_2():
assert dft_node.get_type_name() == "IDFT" assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
assert dft_node.get_output_element_type(0) == Type.f32
def test_idft_3d_signal_size(): def test_idft_3d_signal_size():
@ -111,3 +118,4 @@ def test_idft_3d_signal_size():
assert dft_node.get_type_name() == "IDFT" assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1 assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape) assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
assert dft_node.get_output_element_type(0) == Type.f32

View File

@ -6,8 +6,6 @@ import numpy as np
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
from openvino.runtime import Model from openvino.runtime import Model
from tests.runtime import get_runtime
from openvino.runtime.op.util import InvariantInputDescription, BodyOutputDescription from openvino.runtime.op.util import InvariantInputDescription, BodyOutputDescription
@ -149,34 +147,33 @@ def check_results(results, expected_results):
def check_if(if_model, cond_val, exp_results): def check_if(if_model, cond_val, exp_results):
last_node = if_model(cond_val) last_node = if_model(cond_val)
runtime = get_runtime() assert last_node.get_type_name() == exp_results[0]
computation = runtime.computation(last_node) assert last_node.get_output_size() == exp_results[1]
results = computation() assert list(last_node.get_output_shape(0)) == exp_results[2]
check_results(results, exp_results)
def test_if_with_two_outputs(): def test_if_with_two_outputs():
check_if(create_simple_if_with_two_outputs, True, check_if(create_simple_if_with_two_outputs, True,
[np.array([10], dtype=np.float32), np.array([-20], dtype=np.float32)]) ["If", 2, []])
check_if(create_simple_if_with_two_outputs, False, check_if(create_simple_if_with_two_outputs, False,
[np.array([17], dtype=np.float32), np.array([16], dtype=np.float32)]) ["If", 2, []])
def test_diff_if_with_two_outputs(): def test_diff_if_with_two_outputs():
check_if(create_diff_if_with_two_outputs, True, check_if(create_diff_if_with_two_outputs, True,
[np.array([10], dtype=np.float32), np.array([6, 4], dtype=np.float32)]) ["If", 2, []])
check_if(create_diff_if_with_two_outputs, False, check_if(create_diff_if_with_two_outputs, False,
[np.array([4], dtype=np.float32), np.array([12, 16], dtype=np.float32)]) ["If", 2, []])
def test_simple_if(): def test_simple_if():
check_if(simple_if, True, [np.array([6, 4], dtype=np.float32)]) check_if(simple_if, True, ["Relu", 1, [2]])
check_if(simple_if, False, [np.array([5, 5], dtype=np.float32)]) check_if(simple_if, False, ["Relu", 1, [2]])
def test_simple_if_without_body_parameters(): def test_simple_if_without_body_parameters():
check_if(simple_if_without_parameters, True, [np.array([0.7], dtype=np.float32)]) check_if(simple_if_without_parameters, True, ["Relu", 1, []])
check_if(simple_if_without_parameters, False, [np.array([9.0], dtype=np.float32)]) check_if(simple_if_without_parameters, False, ["Relu", 1, []])
def test_simple_if_basic(): def test_simple_if_basic():

View File

@ -4,6 +4,7 @@
import numpy as np import numpy as np
from openvino.runtime import Type
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
@ -15,12 +16,14 @@ def test_lrn():
assert model.get_type_name() == "LRN" assert model.get_type_name() == "LRN"
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == [2, 3, 2, 1] assert list(model.get_output_shape(0)) == [2, 3, 2, 1]
assert model.get_output_element_type(0) == Type.f32
# Test LRN default parameter values # Test LRN default parameter values
model = ov.lrn(ov.constant(input_image), ov.constant(axes)) model = ov.lrn(ov.constant(input_image), ov.constant(axes))
assert model.get_type_name() == "LRN" assert model.get_type_name() == "LRN"
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == [2, 3, 2, 1] assert list(model.get_output_shape(0)) == [2, 3, 2, 1]
assert model.get_output_element_type(0) == Type.f32
def test_lrn_factory(): def test_lrn_factory():
@ -36,6 +39,7 @@ def test_lrn_factory():
assert node.get_type_name() == "LRN" assert node.get_type_name() == "LRN"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32
def test_batch_norm(): def test_batch_norm():
@ -51,6 +55,7 @@ def test_batch_norm():
assert node.get_type_name() == "BatchNormInference" assert node.get_type_name() == "BatchNormInference"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32
def test_mvn_no_variance(): def test_mvn_no_variance():
@ -66,6 +71,7 @@ def test_mvn_no_variance():
assert node.get_type_name() == "MVN" assert node.get_type_name() == "MVN"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32
def test_mvn(): def test_mvn():
@ -81,3 +87,4 @@ def test_mvn():
assert node.get_type_name() == "MVN" assert node.get_type_name() == "MVN"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape assert list(node.get_output_shape(0)) == expected_shape
assert node.get_output_element_type(0) == Type.f32

View File

@ -6,9 +6,8 @@
import numpy as np import numpy as np
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
from openvino.runtime import AxisSet, Model, Shape, Type from openvino.runtime import AxisSet, Shape, Type
from openvino.runtime.op import Constant, Parameter from openvino.runtime.op import Constant, Parameter
from tests.runtime import get_runtime
def binary_op(op_str, a, b): def binary_op(op_str, a, b):
@ -81,47 +80,40 @@ def binary_op_ref(op_str, a, b):
return np.power(a, b) return np.power(a, b)
def binary_op_exec(op_str): def binary_op_exec(op_str, expected_ov_str=None):
if not expected_ov_str:
expected_ov_str = op_str
element_type = Type.f32 element_type = Type.f32
shape = Shape([2, 2]) shape = Shape([2, 2])
A = Parameter(element_type, shape) A = Parameter(element_type, shape)
B = Parameter(element_type, shape) B = Parameter(element_type, shape)
parameter_list = [A, B] node = binary_op(op_str, A, B)
function = Model([binary_op(op_str, A, B)], parameter_list, "test")
a_arr = np.array([[1, 6], [7, 4]], dtype=np.float32) assert node.get_type_name() == expected_ov_str
b_arr = np.array([[5, 2], [3, 8]], dtype=np.float32) assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [2, 2]
runtime = get_runtime() assert node.get_output_element_type(0) == Type.f32
computation = runtime.computation(function, A, B)
result = computation(a_arr, b_arr)[0]
expected = binary_op_ref(op_str, a_arr, b_arr)
assert np.allclose(result, expected)
def binary_op_comparison(op_str): def binary_op_comparison(op_str, expected_ov_str=None):
if not expected_ov_str:
expected_ov_str = op_str
element_type = Type.f32 element_type = Type.f32
shape = Shape([2, 2]) shape = Shape([2, 2])
A = Parameter(element_type, shape) A = Parameter(element_type, shape)
B = Parameter(element_type, shape) B = Parameter(element_type, shape)
parameter_list = [A, B] node = binary_op(op_str, A, B)
function = Model([binary_op(op_str, A, B)], parameter_list, "test")
a_arr = np.array([[1, 5], [3, 2]], dtype=np.float32)
b_arr = np.array([[2, 4], [3, 1]], dtype=np.float32)
runtime = get_runtime() assert node.get_type_name() == expected_ov_str
computation = runtime.computation(function, A, B) assert node.get_output_size() == 1
result = computation(a_arr, b_arr)[0] assert list(node.get_output_shape(0)) == [2, 2]
assert node.get_output_element_type(0) == Type.boolean
expected = binary_op_ref(op_str, a_arr, b_arr)
assert np.allclose(result, expected)
def test_add(): def test_add():
binary_op_exec("+") binary_op_exec("+", "Add")
def test_add_op(): def test_add_op():
@ -129,27 +121,27 @@ def test_add_op():
def test_sub(): def test_sub():
binary_op_exec("-") binary_op_exec("-", "Subtract")
def test_sub_op(): def test_sub_op():
binary_op_exec("Sub") binary_op_exec("Sub", "Subtract")
def test_mul(): def test_mul():
binary_op_exec("*") binary_op_exec("*", "Multiply")
def test_mul_op(): def test_mul_op():
binary_op_exec("Mul") binary_op_exec("Mul", "Multiply")
def test_div(): def test_div():
binary_op_exec("/") binary_op_exec("/", "Divide")
def test_div_op(): def test_div_op():
binary_op_exec("Div") binary_op_exec("Div", "Divide")
def test_maximum(): def test_maximum():
@ -169,7 +161,7 @@ def test_greater():
def test_greater_eq(): def test_greater_eq():
binary_op_comparison("GreaterEq") binary_op_comparison("GreaterEq", "GreaterEqual")
def test_less(): def test_less():
@ -177,7 +169,7 @@ def test_less():
def test_less_eq(): def test_less_eq():
binary_op_comparison("LessEq") binary_op_comparison("LessEq", "LessEqual")
def test_not_equal(): def test_not_equal():
@ -191,23 +183,12 @@ def test_add_with_mul():
A = Parameter(element_type, shape) A = Parameter(element_type, shape)
B = Parameter(element_type, shape) B = Parameter(element_type, shape)
C = Parameter(element_type, shape) C = Parameter(element_type, shape)
parameter_list = [A, B, C] node = ov.multiply(ov.add(A, B), C)
function = Model([ov.multiply(ov.add(A, B), C)], parameter_list, "test")
runtime = get_runtime() assert node.get_type_name() == "Multiply"
computation = runtime.computation(function, A, B, C) assert node.get_output_size() == 1
result = computation( assert list(node.get_output_shape(0)) == [4]
np.array([1, 2, 3, 4], dtype=np.float32), assert node.get_output_element_type(0) == Type.f32
np.array([5, 6, 7, 8], dtype=np.float32),
np.array([9, 10, 11, 12], dtype=np.float32),
)[0]
a_arr = np.array([1, 2, 3, 4], dtype=np.float32)
b_arr = np.array([5, 6, 7, 8], dtype=np.float32)
c_arr = np.array([9, 10, 11, 12], dtype=np.float32)
result_arr_ref = (a_arr + b_arr) * c_arr
assert np.allclose(result, result_arr_ref)
def unary_op(op_str, a): def unary_op(op_str, a):
@ -298,22 +279,22 @@ def unary_op_ref(op_str, a):
return np.tanh(a) return np.tanh(a)
def unary_op_exec(op_str, input_list): def unary_op_exec(op_str, input_list, expected_ov_str=None):
""" """
input_list needs to have deep length of 4 input_list needs to have deep length of 4
""" """
if not expected_ov_str:
expected_ov_str = op_str
element_type = Type.f32 element_type = Type.f32
shape = Shape(np.array(input_list).shape) shape = Shape(np.array(input_list).shape)
A = Parameter(element_type, shape) A = Parameter(element_type, shape)
parameter_list = [A] node = unary_op(op_str, A)
function = Model([unary_op(op_str, A)], parameter_list, "test")
runtime = get_runtime() assert node.get_type_name() == expected_ov_str
computation = runtime.computation(function, *parameter_list) assert node.get_output_size() == 1
result = computation(np.array(input_list, dtype=np.float32))[0] assert list(node.get_output_shape(0)) == list(shape)
assert node.get_output_element_type(0) == Type.f32
expected = unary_op_ref(op_str, np.array(input_list, dtype=np.float32))
assert np.allclose(result, expected)
def test_abs(): def test_abs():
@ -385,19 +366,19 @@ def test_floor():
def test_log(): def test_log():
input_list = [1, 2, 3, 4] input_list = [1, 2, 3, 4]
op_str = "log" op_str = "log"
unary_op_exec(op_str, input_list) unary_op_exec(op_str, input_list, "Log")
def test_exp(): def test_exp():
input_list = [-1, 0, 1, 2] input_list = [-1, 0, 1, 2]
op_str = "exp" op_str = "exp"
unary_op_exec(op_str, input_list) unary_op_exec(op_str, input_list, "Exp")
def test_negative(): def test_negative():
input_list = [-1, 0, 1, 2] input_list = [-1, 0, 1, 2]
op_str = "negative" op_str = "negative"
unary_op_exec(op_str, input_list) unary_op_exec(op_str, input_list, "Negative")
def test_sign(): def test_sign():
@ -437,95 +418,62 @@ def test_tanh():
def test_reshape(): def test_reshape():
element_type = Type.f32 element_type = Type.f32
shape = Shape([2, 3]) shape = Shape([2, 3])
A = Parameter(element_type, shape) A = Parameter(element_type, shape)
parameter_list = [A] node = ov.reshape(A, Shape([3, 2]), special_zero=False)
function = Model([ov.reshape(A, Shape([3, 2]), special_zero=False)], parameter_list, "test")
runtime = get_runtime() assert node.get_type_name() == "Reshape"
computation = runtime.computation(function, *parameter_list) assert node.get_output_size() == 1
result = computation(np.array(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), dtype=np.float32))[0] assert list(node.get_output_shape(0)) == [3, 2]
assert node.get_output_element_type(0) == element_type
expected = np.reshape(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32), (3, 2))
assert np.allclose(result, expected)
def test_broadcast(): def test_broadcast():
element_type = Type.f32 element_type = Type.f32
A = Parameter(element_type, Shape([3])) A = Parameter(element_type, Shape([3]))
parameter_list = [A] node = ov.broadcast(A, [3, 3])
function = Model([ov.broadcast(A, [3, 3])], parameter_list, "test") assert node.get_type_name() == "Broadcast"
assert node.get_output_size() == 1
runtime = get_runtime() assert list(node.get_output_shape(0)) == [3, 3]
computation = runtime.computation(function, *parameter_list) assert node.get_output_element_type(0) == element_type
result = computation(np.array([1, 2, 3], dtype=np.float32))[0]
a_arr = np.array([[0], [0], [0]], dtype=np.float32)
b_arr = np.array([[1, 2, 3]], dtype=np.float32)
expected = np.add(a_arr, b_arr)
assert np.allclose(result, expected)
def test_constant(): def test_constant():
element_type = Type.f32 element_type = Type.f32
parameter_list = [] node = Constant(element_type, Shape([3, 3]), list(range(9)))
function = Model([Constant(element_type, Shape([3, 3]), list(range(9)))], parameter_list, "test") assert node.get_type_name() == "Constant"
assert node.get_output_size() == 1
runtime = get_runtime() assert list(node.get_output_shape(0)) == [3, 3]
computation = runtime.computation(function, *parameter_list) assert node.get_output_element_type(0) == element_type
result = computation()[0]
expected = np.arange(9).reshape(3, 3)
assert np.allclose(result, expected)
def test_constant_opset_ov_type(): def test_constant_opset_ov_type():
parameter_list = [] node = ov.constant(np.arange(9).reshape(3, 3), Type.f32)
function = Model([ov.constant(np.arange(9).reshape(3, 3), Type.f32)], parameter_list, "test") assert node.get_type_name() == "Constant"
assert node.get_output_size() == 1
runtime = get_runtime() assert list(node.get_output_shape(0)) == [3, 3]
computation = runtime.computation(function, *parameter_list) assert node.get_output_element_type(0) == Type.f32
result = computation()[0]
expected = np.arange(9).reshape(3, 3)
assert np.allclose(result, expected)
def test_constant_opset_numpy_type(): def test_constant_opset_numpy_type():
parameter_list = [] node = ov.constant(np.arange(9).reshape(3, 3), np.float32)
function = Model([ov.constant(np.arange(9).reshape(3, 3), np.float32)], parameter_list, "test") assert node.get_type_name() == "Constant"
assert node.get_output_size() == 1
runtime = get_runtime() assert list(node.get_output_shape(0)) == [3, 3]
computation = runtime.computation(function, *parameter_list) assert node.get_output_element_type(0) == Type.f32
result = computation()[0]
expected = np.arange(9).reshape(3, 3)
assert np.allclose(result, expected)
def test_concat(): def test_concat():
element_type = Type.f32 element_type = Type.f32
A = Parameter(element_type, Shape([1, 2])) A = Parameter(element_type, Shape([1, 2]))
B = Parameter(element_type, Shape([1, 2])) B = Parameter(element_type, Shape([1, 2]))
C = Parameter(element_type, Shape([1, 2])) C = Parameter(element_type, Shape([1, 2]))
parameter_list = [A, B, C] node = ov.concat([A, B, C], axis=0)
axis = 0 assert node.get_type_name() == "Concat"
function = Model([ov.concat([A, B, C], axis)], parameter_list, "test") assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [3, 2]
a_arr = np.array([[1, 2]], dtype=np.float32) assert node.get_output_element_type(0) == element_type
b_arr = np.array([[5, 6]], dtype=np.float32)
c_arr = np.array([[7, 8]], dtype=np.float32)
runtime = get_runtime()
computation = runtime.computation(function, *parameter_list)
result = computation(a_arr, b_arr, c_arr)[0]
expected = np.concatenate((a_arr, b_arr, c_arr), axis)
assert np.allclose(result, expected)
def test_axisset(): def test_axisset():
@ -549,29 +497,17 @@ def test_select():
A = Parameter(Type.boolean, Shape([1, 2])) A = Parameter(Type.boolean, Shape([1, 2]))
B = Parameter(element_type, Shape([1, 2])) B = Parameter(element_type, Shape([1, 2]))
C = Parameter(element_type, Shape([1, 2])) C = Parameter(element_type, Shape([1, 2]))
parameter_list = [A, B, C] node = ov.select(A, B, C)
assert node.get_type_name() == "Select"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [1, 2]
assert node.get_output_element_type(0) == element_type
function = Model([ov.select(A, B, C)], parameter_list, "test")
runtime = get_runtime() def test_max_pool_1d():
computation = runtime.computation(function, *parameter_list)
result = computation(
np.array([[True, False]], dtype=bool),
np.array([[5, 6]], dtype=np.float32),
np.array([[7, 8]], dtype=np.float32),
)[0]
expected = np.array([[5, 8]])
assert np.allclose(result, expected)
def test_max_pool():
# test 1d
element_type = Type.f32 element_type = Type.f32
shape = Shape([1, 1, 10]) shape = Shape([1, 1, 10])
A = Parameter(element_type, shape) A = Parameter(element_type, shape)
parameter_list = [A]
input_arr = np.arange(10, dtype=np.float32).reshape([1, 1, 10])
window_shape = [3] window_shape = [3]
strides = [1] * len(window_shape) strides = [1] * len(window_shape)
@ -593,19 +529,25 @@ def test_max_pool():
auto_pad, auto_pad,
idx_elem_type, idx_elem_type,
) )
function = Model([model], parameter_list, "test") assert model.get_type_name() == "MaxPool"
assert model.get_output_size() == 2
assert list(model.get_output_shape(0)) == [1, 1, 8]
assert list(model.get_output_shape(1)) == [1, 1, 8]
assert model.get_output_element_type(0) == element_type
assert model.get_output_element_type(1) == Type.i32
runtime = get_runtime() def test_max_pool_1d_with_strides():
computation = runtime.computation(function, *parameter_list) element_type = Type.f32
result = computation(input_arr)[0] shape = Shape([1, 1, 10])
A = Parameter(element_type, shape)
expected = (np.arange(8) + 2).reshape(1, 1, 8) window_shape = [3]
assert np.allclose(result, expected)
# test 1d with strides
strides = [2] strides = [2]
pads_begin = [0] * len(window_shape) pads_begin = [0] * len(window_shape)
dilations = [1] * len(window_shape)
pads_end = [0] * len(window_shape) pads_end = [0] * len(window_shape)
rounding_type = "floor"
auto_pad = "explicit"
idx_elem_type = "i32"
model = ov.max_pool( model = ov.max_pool(
A, A,
@ -618,23 +560,22 @@ def test_max_pool():
auto_pad, auto_pad,
idx_elem_type, idx_elem_type,
) )
function = Model([model], parameter_list, "test")
size = 4 assert model.get_type_name() == "MaxPool"
computation = runtime.computation(function, *parameter_list) assert model.get_output_size() == 2
result = computation(input_arr)[0] assert list(model.get_output_shape(0)) == [1, 1, 4]
assert list(model.get_output_shape(1)) == [1, 1, 4]
assert model.get_output_element_type(0) == element_type
assert model.get_output_element_type(1) == Type.i32
expected = ((np.arange(size) + 1) * 2).reshape(1, 1, size) def test_max_pool_2d():
assert np.allclose(result, expected)
# test 2d
element_type = Type.f32 element_type = Type.f32
shape = Shape([1, 1, 10, 10]) shape = Shape([1, 1, 10, 10])
A = Parameter(element_type, shape) A = Parameter(element_type, shape)
parameter_list = [A]
input_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
window_shape = [3, 3] window_shape = [3, 3]
rounding_type = "floor"
auto_pad = "explicit"
idx_elem_type = "i32"
strides = [1, 1] strides = [1, 1]
dilations = [1, 1] dilations = [1, 1]
@ -652,19 +593,26 @@ def test_max_pool():
auto_pad, auto_pad,
idx_elem_type, idx_elem_type,
) )
function = Model([model], parameter_list, "test") assert model.get_type_name() == "MaxPool"
assert model.get_output_size() == 2
assert list(model.get_output_shape(0)) == [1, 1, 8, 8]
assert list(model.get_output_shape(1)) == [1, 1, 8, 8]
assert model.get_output_element_type(0) == element_type
assert model.get_output_element_type(1) == Type.i32
computation = runtime.computation(function, *parameter_list)
result = computation(input_arr)[0]
expected = ((np.arange(100).reshape(10, 10))[2:, 2:]).reshape(1, 1, 8, 8) def test_max_pool_2d_with_strides():
assert np.allclose(result, expected) element_type = Type.f32
shape = Shape([1, 1, 10, 10])
# test 2d with strides A = Parameter(element_type, shape)
strides = [2, 2] strides = [2, 2]
dilations = [1, 1] dilations = [1, 1]
pads_begin = [0, 0] pads_begin = [0, 0]
pads_end = [0, 0] pads_end = [0, 0]
window_shape = [3, 3]
rounding_type = "floor"
auto_pad = "explicit"
idx_elem_type = "i32"
model = ov.max_pool( model = ov.max_pool(
A, A,
@ -677,13 +625,12 @@ def test_max_pool():
auto_pad, auto_pad,
idx_elem_type, idx_elem_type,
) )
function = Model([model], parameter_list, "test") assert model.get_type_name() == "MaxPool"
computation = runtime.computation(function, *parameter_list) assert model.get_output_size() == 2
result = computation(input_arr)[0] assert list(model.get_output_shape(0)) == [1, 1, 4, 4]
assert list(model.get_output_shape(1)) == [1, 1, 4, 4]
size = 4 assert model.get_output_element_type(0) == element_type
expected = ((np.arange(100).reshape(10, 10))[2::2, 2::2]).reshape(1, 1, size, size) assert model.get_output_element_type(1) == Type.i32
assert np.allclose(result, expected)
def convolution2d( def convolution2d(
@ -733,15 +680,11 @@ def convolution2d(
def test_convolution_simple(): def test_convolution_simple():
element_type = Type.f32 element_type = Type.f32
image_shape = Shape([1, 1, 16, 16]) image_shape = Shape([1, 1, 16, 16])
filter_shape = Shape([1, 1, 3, 3]) filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape) data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape) filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16)
filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3) filter_arr = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
filter_arr[0][0][0][0] = -1 filter_arr[0][0][0][0] = -1
filter_arr[0][0][1][1] = -1 filter_arr[0][0][1][1] = -1
@ -755,14 +698,11 @@ def test_convolution_simple():
dilations = [1, 1] dilations = [1, 1]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Model([model], parameter_list, "test")
runtime = get_runtime() assert model.get_type_name() == "Convolution"
computation = runtime.computation(function, *parameter_list) assert model.get_output_size() == 1
result = computation(image_arr, filter_arr)[0] assert list(model.get_output_shape(0)) == [1, 1, 14, 14]
assert model.get_output_element_type(0) == element_type
expected = convolution2d(image_arr[0][0], filter_arr[0][0]).reshape(1, 1, 14, 14)
assert np.allclose(result, expected)
def test_convolution_with_strides(): def test_convolution_with_strides():
@ -772,9 +712,6 @@ def test_convolution_with_strides():
filter_shape = Shape([1, 1, 3, 3]) filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape) data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape) filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape([1, 1, 10, 10])
filter_arr = np.zeros(9, dtype=np.float32).reshape([1, 1, 3, 3]) filter_arr = np.zeros(9, dtype=np.float32).reshape([1, 1, 3, 3])
filter_arr[0][0][1][1] = 1 filter_arr[0][0][1][1] = 1
strides = [2, 2] strides = [2, 2]
@ -783,14 +720,11 @@ def test_convolution_with_strides():
dilations = [1, 1] dilations = [1, 1]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Model([model], parameter_list, "test")
runtime = get_runtime() assert model.get_type_name() == "Convolution"
computation = runtime.computation(function, *parameter_list) assert model.get_output_size() == 1
result = computation(image_arr, filter_arr)[0] assert list(model.get_output_shape(0)) == [1, 1, 4, 4]
assert model.get_output_element_type(0) == element_type
expected = convolution2d(image_arr[0][0], filter_arr[0][0], strides).reshape(1, 1, 4, 4)
assert np.allclose(result, expected)
def test_convolution_with_filter_dilation(): def test_convolution_with_filter_dilation():
@ -800,24 +734,17 @@ def test_convolution_with_filter_dilation():
filter_shape = Shape([1, 1, 3, 3]) filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape) data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape) filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape([1, 1, 10, 10])
filter_arr = np.ones(9, dtype=np.float32).reshape([1, 1, 3, 3])
strides = [1, 1] strides = [1, 1]
pads_begin = [0, 0] pads_begin = [0, 0]
pads_end = [0, 0] pads_end = [0, 0]
dilations = [2, 2] dilations = [2, 2]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Model([model], parameter_list, "test")
runtime = get_runtime() assert model.get_type_name() == "Convolution"
computation = runtime.computation(function, *parameter_list) assert model.get_output_size() == 1
result = computation(image_arr, filter_arr)[0] assert list(model.get_output_shape(0)) == [1, 1, 6, 6]
assert model.get_output_element_type(0) == element_type
expected = convolution2d(image_arr[0][0], filter_arr[0][0], strides, dilations).reshape([1, 1, 6, 6])
assert np.allclose(result, expected)
def test_convolution_with_padding(): def test_convolution_with_padding():
@ -827,9 +754,6 @@ def test_convolution_with_padding():
filter_shape = Shape([1, 1, 3, 3]) filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape) data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape) filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
filter_arr = np.zeros(9, dtype=np.float32).reshape(1, 1, 3, 3) filter_arr = np.zeros(9, dtype=np.float32).reshape(1, 1, 3, 3)
filter_arr[0][0][1][1] = 1 filter_arr[0][0][1][1] = 1
strides = [1, 1] strides = [1, 1]
@ -838,16 +762,11 @@ def test_convolution_with_padding():
pads_end = [0, 0] pads_end = [0, 0]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Model([model], parameter_list, "test")
runtime = get_runtime() assert model.get_type_name() == "Convolution"
computation = runtime.computation(function, *parameter_list) assert model.get_output_size() == 1
result = computation(image_arr, filter_arr)[0] assert list(model.get_output_shape(0)) == [1, 1, 6, 6]
assert model.get_output_element_type(0) == element_type
expected = convolution2d(
image_arr[0][0], filter_arr[0][0], strides, dilations, pads_begin, pads_end
).reshape([1, 1, 6, 6])
assert np.allclose(result, expected)
def test_convolution_with_non_zero_padding(): def test_convolution_with_non_zero_padding():
@ -856,9 +775,6 @@ def test_convolution_with_non_zero_padding():
filter_shape = Shape([1, 1, 3, 3]) filter_shape = Shape([1, 1, 3, 3])
data = Parameter(element_type, image_shape) data = Parameter(element_type, image_shape)
filters = Parameter(element_type, filter_shape) filters = Parameter(element_type, filter_shape)
parameter_list = [data, filters]
image_arr = np.arange(100, dtype=np.float32).reshape(1, 1, 10, 10)
filter_arr = (np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)) * -1 filter_arr = (np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)) * -1
filter_arr[0][0][1][1] = 1 filter_arr[0][0][1][1] = 1
strides = [1, 1] strides = [1, 1]
@ -867,13 +783,8 @@ def test_convolution_with_non_zero_padding():
pads_end = [1, 2] pads_end = [1, 2]
model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations) model = ov.convolution(data, filters, strides, pads_begin, pads_end, dilations)
function = Model([model], parameter_list, "test")
runtime = get_runtime() assert model.get_type_name() == "Convolution"
computation = runtime.computation(function, *parameter_list) assert model.get_output_size() == 1
result = computation(image_arr, filter_arr)[0] assert list(model.get_output_shape(0)) == [1, 1, 9, 9]
assert model.get_output_element_type(0) == element_type
expected = convolution2d(
image_arr[0][0], filter_arr[0][0], strides, dilations, pads_begin, pads_end
).reshape([1, 1, 9, 9])
assert np.allclose(result, expected)

View File

@ -7,28 +7,29 @@ import operator
import numpy as np import numpy as np
import pytest import pytest
from openvino.runtime import Type
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
@pytest.mark.parametrize( @pytest.mark.parametrize(
("graph_api_helper", "numpy_function"), ("graph_api_helper", "numpy_function", "expected_type"),
[ [
(ov.add, np.add), (ov.add, np.add, Type.f32),
(ov.divide, np.divide), (ov.divide, np.divide, Type.f32),
(ov.multiply, np.multiply), (ov.multiply, np.multiply, Type.f32),
(ov.subtract, np.subtract), (ov.subtract, np.subtract, Type.f32),
(ov.minimum, np.minimum), (ov.minimum, np.minimum, Type.f32),
(ov.maximum, np.maximum), (ov.maximum, np.maximum, Type.f32),
(ov.mod, np.mod), (ov.mod, np.mod, Type.f32),
(ov.equal, np.equal), (ov.equal, np.equal, Type.boolean),
(ov.not_equal, np.not_equal), (ov.not_equal, np.not_equal, Type.boolean),
(ov.greater, np.greater), (ov.greater, np.greater, Type.boolean),
(ov.greater_equal, np.greater_equal), (ov.greater_equal, np.greater_equal, Type.boolean),
(ov.less, np.less), (ov.less, np.less, Type.boolean),
(ov.less_equal, np.less_equal), (ov.less_equal, np.less_equal, Type.boolean),
], ],
) )
def test_binary_op(graph_api_helper, numpy_function): def test_binary_op(graph_api_helper, numpy_function, expected_type):
shape = [2, 2] shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32) parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
parameter_b = ov.parameter(shape, name="B", dtype=np.float32) parameter_b = ov.parameter(shape, name="B", dtype=np.float32)
@ -41,27 +42,28 @@ def test_binary_op(graph_api_helper, numpy_function):
expected_shape = numpy_function(value_a, value_b).shape expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape) assert list(model.get_output_shape(0)) == list(expected_shape)
assert model.get_output_element_type(0) == expected_type
@pytest.mark.parametrize( @pytest.mark.parametrize(
("graph_api_helper", "numpy_function"), ("graph_api_helper", "numpy_function", "expected_type"),
[ [
(ov.add, np.add), (ov.add, np.add, Type.f32),
(ov.divide, np.divide), (ov.divide, np.divide, Type.f32),
(ov.multiply, np.multiply), (ov.multiply, np.multiply, Type.f32),
(ov.subtract, np.subtract), (ov.subtract, np.subtract, Type.f32),
(ov.minimum, np.minimum), (ov.minimum, np.minimum, Type.f32),
(ov.maximum, np.maximum), (ov.maximum, np.maximum, Type.f32),
(ov.mod, np.mod), (ov.mod, np.mod, Type.f32),
(ov.equal, np.equal), (ov.equal, np.equal, Type.boolean),
(ov.not_equal, np.not_equal), (ov.not_equal, np.not_equal, Type.boolean),
(ov.greater, np.greater), (ov.greater, np.greater, Type.boolean),
(ov.greater_equal, np.greater_equal), (ov.greater_equal, np.greater_equal, Type.boolean),
(ov.less, np.less), (ov.less, np.less, Type.boolean),
(ov.less_equal, np.less_equal), (ov.less_equal, np.less_equal, Type.boolean),
], ],
) )
def test_binary_op_with_scalar(graph_api_helper, numpy_function): def test_binary_op_with_scalar(graph_api_helper, numpy_function, expected_type):
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
@ -73,6 +75,7 @@ def test_binary_op_with_scalar(graph_api_helper, numpy_function):
expected_shape = numpy_function(value_a, value_b).shape expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape) assert list(model.get_output_shape(0)) == list(expected_shape)
assert model.get_output_element_type(0) == expected_type
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -92,6 +95,7 @@ def test_binary_logical_op(graph_api_helper, numpy_function):
expected_shape = numpy_function(value_a, value_b).shape expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape) assert list(model.get_output_shape(0)) == list(expected_shape)
assert model.get_output_element_type(0) == Type.boolean
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -110,24 +114,25 @@ def test_binary_logical_op_with_scalar(graph_api_helper, numpy_function):
expected_shape = numpy_function(value_a, value_b).shape expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape) assert list(model.get_output_shape(0)) == list(expected_shape)
assert model.get_output_element_type(0) == Type.boolean
@pytest.mark.parametrize( @pytest.mark.parametrize(
("operator", "numpy_function"), ("operator", "numpy_function", "expected_type"),
[ [
(operator.add, np.add), (operator.add, np.add, Type.f32),
(operator.sub, np.subtract), (operator.sub, np.subtract, Type.f32),
(operator.mul, np.multiply), (operator.mul, np.multiply, Type.f32),
(operator.truediv, np.divide), (operator.truediv, np.divide, Type.f32),
(operator.eq, np.equal), (operator.eq, np.equal, Type.boolean),
(operator.ne, np.not_equal), (operator.ne, np.not_equal, Type.boolean),
(operator.gt, np.greater), (operator.gt, np.greater, Type.boolean),
(operator.ge, np.greater_equal), (operator.ge, np.greater_equal, Type.boolean),
(operator.lt, np.less), (operator.lt, np.less, Type.boolean),
(operator.le, np.less_equal), (operator.le, np.less_equal, Type.boolean),
], ],
) )
def test_binary_operators(operator, numpy_function): def test_binary_operators(operator, numpy_function, expected_type):
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[4, 5], [1, 7]], dtype=np.float32) value_b = np.array([[4, 5], [1, 7]], dtype=np.float32)
@ -139,24 +144,25 @@ def test_binary_operators(operator, numpy_function):
expected_shape = numpy_function(value_a, value_b).shape expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape) assert list(model.get_output_shape(0)) == list(expected_shape)
assert model.get_output_element_type(0) == expected_type
@pytest.mark.parametrize( @pytest.mark.parametrize(
("operator", "numpy_function"), ("operator", "numpy_function", "expected_type"),
[ [
(operator.add, np.add), (operator.add, np.add, Type.f32),
(operator.sub, np.subtract), (operator.sub, np.subtract, Type.f32),
(operator.mul, np.multiply), (operator.mul, np.multiply, Type.f32),
(operator.truediv, np.divide), (operator.truediv, np.divide, Type.f32),
(operator.eq, np.equal), (operator.eq, np.equal, Type.boolean),
(operator.ne, np.not_equal), (operator.ne, np.not_equal, Type.boolean),
(operator.gt, np.greater), (operator.gt, np.greater, Type.boolean),
(operator.ge, np.greater_equal), (operator.ge, np.greater_equal, Type.boolean),
(operator.lt, np.less), (operator.lt, np.less, Type.boolean),
(operator.le, np.less_equal), (operator.le, np.less_equal, Type.boolean),
], ],
) )
def test_binary_operators_with_scalar(operator, numpy_function): def test_binary_operators_with_scalar(operator, numpy_function, expected_type):
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32) value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32) value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
@ -168,6 +174,7 @@ def test_binary_operators_with_scalar(operator, numpy_function):
expected_shape = numpy_function(value_a, value_b).shape expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape) assert list(model.get_output_shape(0)) == list(expected_shape)
assert model.get_output_element_type(0) == expected_type
def test_multiply(): def test_multiply():
@ -180,6 +187,7 @@ def test_multiply():
assert node.get_type_name() == "Multiply" assert node.get_type_name() == "Multiply"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == list(expected_shape) assert list(node.get_output_shape(0)) == list(expected_shape)
assert node.get_output_element_type(0) == Type.i32
def test_power_v1(): def test_power_v1():
@ -192,3 +200,4 @@ def test_power_v1():
assert node.get_type_name() == "Power" assert node.get_type_name() == "Power"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == list(expected_shape) assert list(node.get_output_shape(0)) == list(expected_shape)
assert node.get_output_element_type(0) == Type.f32

View File

@ -5,9 +5,8 @@
import numpy as np import numpy as np
import pytest import pytest
import openvino.runtime as ov_runtime
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime
from tests import xfail_issue_36486
def test_elu_operator_with_scalar_and_array(): def test_elu_operator_with_scalar_and_array():
@ -131,289 +130,126 @@ def test_squeeze_operator():
def test_squared_difference_operator(): def test_squared_difference_operator():
runtime = get_runtime()
x1_shape = [1, 2, 3, 4] x1_shape = [1, 2, 3, 4]
x2_shape = [2, 3, 4] x2_shape = [2, 3, 4]
parameter_x1 = ov.parameter(x1_shape, name="x1", dtype=np.float32) parameter_x1 = ov.parameter(x1_shape, name="x1", dtype=np.float32)
parameter_x2 = ov.parameter(x2_shape, name="x2", dtype=np.float32) parameter_x2 = ov.parameter(x2_shape, name="x2", dtype=np.float32)
x1_value = np.arange(24.0, dtype=np.float32).reshape(x1_shape)
x2_value = np.arange(start=4.0, stop=28.0, step=1.0, dtype=np.float32).reshape(x2_shape)
model = ov.squared_difference(parameter_x1, parameter_x2) model = ov.squared_difference(parameter_x1, parameter_x2)
computation = runtime.computation(model, parameter_x1, parameter_x2) assert model.get_type_name() == "SquaredDifference"
assert model.get_output_size() == 1
result = computation(x1_value, x2_value) assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.square(np.subtract(x1_value, x2_value)) assert list(model.get_output_shape(0)) == [1, 2, 3, 4]
assert np.allclose(result, expected)
def test_shuffle_channels_operator(): def test_shuffle_channels_operator():
runtime = get_runtime()
data_shape = [1, 15, 2, 2] data_shape = [1, 15, 2, 2]
axis = 1 axis = 1
groups = 5 groups = 5
parameter = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter = ov.parameter(data_shape, name="Data", dtype=np.float32)
data_value = np.arange(60.0, dtype=np.float32).reshape(data_shape)
model = ov.shuffle_channels(parameter, axis, groups) model = ov.shuffle_channels(parameter, axis, groups)
computation = runtime.computation(model, parameter) assert model.get_type_name() == "ShuffleChannels"
assert model.get_output_size() == 1
result = computation(data_value) assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array( assert list(model.get_output_shape(0)) == [1, 15, 2, 2]
[
[
[[0.0, 1.0], [2.0, 3.0]],
[[12.0, 13.0], [14.0, 15.0]],
[[24.0, 25.0], [26.0, 27.0]],
[[36.0, 37.0], [38.0, 39.0]],
[[48.0, 49.0], [50.0, 51.0]],
[[4.0, 5.0], [6.0, 7.0]],
[[16.0, 17.0], [18.0, 19.0]],
[[28.0, 29.0], [30.0, 31.0]],
[[40.0, 41.0], [42.0, 43.0]],
[[52.0, 53.0], [54.0, 55.0]],
[[8.0, 9.0], [10.0, 11.0]],
[[20.0, 21.0], [22.0, 23.0]],
[[32.0, 33.0], [34.0, 35.0]],
[[44.0, 45.0], [46.0, 47.0]],
[[56.0, 57.0], [58.0, 59.0]],
],
],
dtype=np.float32,
)
assert np.allclose(result, expected)
def test_unsqueeze(): def test_unsqueeze():
runtime = get_runtime()
data_shape = [3, 4, 5] data_shape = [3, 4, 5]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
data_value = np.arange(60.0, dtype=np.float32).reshape(3, 4, 5)
axes = [0, 4] axes = [0, 4]
model = ov.unsqueeze(parameter_data, axes)
computation = runtime.computation(model, parameter_data)
result = computation(data_value) model = ov.unsqueeze(parameter_data, axes)
expected = np.arange(60.0, dtype=np.float32).reshape([1, 3, 4, 5, 1]) assert model.get_type_name() == "Unsqueeze"
assert np.allclose(result, expected) assert model.get_output_size() == 1
assert model.get_output_element_type(0) == ov_runtime.Type.f32
assert list(model.get_output_shape(0)) == [1, 3, 4, 5, 1]
def test_grn_operator(): def test_grn_operator():
runtime = get_runtime()
data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape([1, 2, 3, 4])
bias = np.float32(1e-6) bias = np.float32(1e-6)
data_shape = [1, 2, 3, 4] data_shape = [1, 2, 3, 4]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.grn(parameter_data, bias) model = ov.grn(parameter_data, bias)
computation = runtime.computation(model, parameter_data) assert model.get_type_name() == "GRN"
assert model.get_output_size() == 1
result = computation(data_value) assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array( assert list(model.get_output_shape(0)) == [1, 2, 3, 4]
[
[
[
[0.0766965, 0.14142136, 0.19611613, 0.24253564],
[0.28216633, 0.31622776, 0.34570536, 0.37139067],
[0.39391932, 0.41380295, 0.4314555, 0.4472136],
],
[
[0.9970545, 0.98994946, 0.9805807, 0.97014254],
[0.9593655, 0.9486833, 0.9383431, 0.9284767],
[0.91914505, 0.9103665, 0.9021342, 0.8944272],
],
],
],
dtype=np.float32,
)
assert np.allclose(result, expected)
def test_prelu_operator(): def test_prelu_operator():
runtime = get_runtime()
data_shape = [1, 2, 3, 4] data_shape = [1, 2, 3, 4]
slope_shape = [2, 3, 1] slope_shape = [2, 3, 1]
data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape) data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape)
slope_value = np.arange(start=-10.0, stop=-4.0, dtype=np.float32).reshape(slope_shape) slope_value = np.arange(start=-10.0, stop=-4.0, dtype=np.float32).reshape(slope_shape)
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
parameter_slope = ov.parameter(slope_shape, name="Slope", dtype=np.float32) parameter_slope = ov.parameter(slope_shape, name="Slope", dtype=np.float32)
model = ov.prelu(parameter_data, parameter_slope) model = ov.prelu(parameter_data, parameter_slope)
computation = runtime.computation(model, parameter_data, parameter_slope)
result = computation(data_value, slope_value)
expected = np.clip(data_value, 0, np.inf) + np.clip(data_value, -np.inf, 0) * slope_value expected = np.clip(data_value, 0, np.inf) + np.clip(data_value, -np.inf, 0) * slope_value
assert np.allclose(result, expected) assert model.get_type_name() == "PRelu"
assert model.get_output_size() == 1
assert model.get_output_element_type(0) == ov_runtime.Type.f32
assert list(model.get_output_shape(0)) == list(expected.shape)
def test_selu_operator(): def test_selu_operator():
runtime = get_runtime()
data_shape = [4, 2, 3, 1] data_shape = [4, 2, 3, 1]
data = np.arange(start=-1.0, stop=23.0, dtype=np.float32).reshape(data_shape)
alpha = np.array(1.6733, dtype=np.float32) alpha = np.array(1.6733, dtype=np.float32)
lambda_value = np.array(1.0507, dtype=np.float32) lambda_value = np.array(1.0507, dtype=np.float32)
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.selu(parameter_data, alpha, lambda_value) model = ov.selu(parameter_data, alpha, lambda_value)
computation = runtime.computation(model, parameter_data) assert model.get_type_name() == "Selu"
assert model.get_output_size() == 1
result = computation(data) assert model.get_output_element_type(0) == ov_runtime.Type.f32
mask = (data > 0) * data + (data <= 0) * (alpha * np.exp(data) - alpha) assert list(model.get_output_shape(0)) == [4, 2, 3, 1]
expected = mask * lambda_value
assert np.allclose(result, expected)
@xfail_issue_36486
def test_hard_sigmoid_operator(): def test_hard_sigmoid_operator():
runtime = get_runtime()
data_shape = [3] data_shape = [3]
alpha_value = np.float32(0.5)
beta_value = np.float32(0.6)
data_value = np.array([-1, 0, 1], dtype=np.float32)
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
parameter_alpha = ov.parameter([], name="Alpha", dtype=np.float32) parameter_alpha = ov.parameter([], name="Alpha", dtype=np.float32)
parameter_beta = ov.parameter([], name="Beta", dtype=np.float32) parameter_beta = ov.parameter([], name="Beta", dtype=np.float32)
model = ov.hard_sigmoid(parameter_data, parameter_alpha, parameter_beta) model = ov.hard_sigmoid(parameter_data, parameter_alpha, parameter_beta)
computation = runtime.computation(model, parameter_data, parameter_alpha, parameter_beta) assert model.get_type_name() == "HardSigmoid"
assert model.get_output_size() == 1
result = computation(data_value, alpha_value, beta_value) assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = [0.1, 0.6, 1.0] assert list(model.get_output_shape(0)) == [3]
assert np.allclose(result, expected)
def test_mvn_operator(): def test_mvn_operator():
runtime = get_runtime()
data_shape = [3, 3, 3, 1] data_shape = [3, 3, 3, 1]
axes = [0, 2, 3] axes = [0, 2, 3]
normalize_variance = True normalize_variance = True
eps = np.float32(1e-9) eps = np.float32(1e-9)
eps_mode = "outside_sqrt" eps_mode = "outside_sqrt"
data_value = np.array(
[
[
[[0.8439683], [0.5665144], [0.05836735]],
[[0.02916367], [0.12964272], [0.5060197]],
[[0.79538304], [0.9411346], [0.9546573]],
],
[
[[0.17730942], [0.46192095], [0.26480448]],
[[0.6746842], [0.01665257], [0.62473077]],
[[0.9240844], [0.9722341], [0.11965699]],
],
[
[[0.41356155], [0.9129373], [0.59330076]],
[[0.81929934], [0.7862604], [0.11799799]],
[[0.69248444], [0.54119414], [0.07513223]],
],
],
dtype=np.float32,
)
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.mvn(parameter_data, axes, normalize_variance, eps, eps_mode) model = ov.mvn(parameter_data, axes, normalize_variance, eps, eps_mode)
computation = runtime.computation(model, parameter_data) assert model.get_type_name() == "MVN"
assert model.get_output_size() == 1
result = computation(data_value) assert model.get_output_element_type(0) == ov_runtime.Type.f32
assert list(model.get_output_shape(0)) == [3, 3, 3, 1]
expected = np.array(
[
[
[[1.3546423], [0.33053496], [-1.5450814]],
[[-1.2106764], [-0.8925952], [0.29888135]],
[[0.38083088], [0.81808794], [0.85865635]],
],
[
[[-1.1060555], [-0.05552877], [-0.78310335]],
[[0.83281356], [-1.250282], [0.67467856]],
[[0.7669372], [0.9113869], [-1.6463585]],
],
[
[[-0.23402764], [1.6092131], [0.42940593]],
[[1.2906139], [1.1860244], [-0.92945826]],
[[0.0721334], [-0.38174], [-1.7799333]],
],
],
dtype=np.float32,
)
assert np.allclose(result, expected)
@pytest.mark.skip(reason="Sporadically failed. Need further investigation. Ticket - 95970") @pytest.mark.skip(reason="Sporadically failed. Need further investigation. Ticket - 95970")
def test_space_to_depth_operator(): def test_space_to_depth_operator():
runtime = get_runtime()
data_shape = [1, 2, 4, 4] data_shape = [1, 2, 4, 4]
data_value = np.arange(start=0, stop=32, step=1.0, dtype=np.float32).reshape(data_shape)
mode = "blocks_first" mode = "blocks_first"
block_size = 2 block_size = 2
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.space_to_depth(parameter_data, mode, block_size) model = ov.space_to_depth(parameter_data, mode, block_size)
computation = runtime.computation(model, parameter_data) assert model.get_type_name() == "SpaceToDepth"
assert model.get_output_size() == 1
result = computation(data_value) assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array( assert list(model.get_output_shape(0)) == [1, 8, 2, 2]
[
0,
2,
8,
10,
16,
18,
24,
26,
1,
3,
9,
11,
17,
19,
25,
27,
4,
6,
12,
14,
20,
22,
28,
30,
5,
7,
13,
15,
21,
23,
29,
31,
],
dtype=np.float32,
).reshape(1, 8, 2, 2)
assert np.allclose(result, expected)
batch_size = 2 batch_size = 2
input_size = 3 input_size = 3
@ -431,41 +267,6 @@ def test_space_to_depth_operator():
parameter_r = ov.parameter(r_shape, name="R", dtype=np.float32) parameter_r = ov.parameter(r_shape, name="R", dtype=np.float32)
parameter_b = ov.parameter(b_shape, name="B", dtype=np.float32) parameter_b = ov.parameter(b_shape, name="B", dtype=np.float32)
x_value = np.array(
[0.3432185, 0.612268, 0.20272376, 0.9513413, 0.30585995, 0.7265472], dtype=np.float32,
).reshape(x_shape)
h_t_value = np.array(
[0.12444675, 0.52055854, 0.46489045, 0.4983964, 0.7730452, 0.28439692], dtype=np.float32,
).reshape(h_t_shape)
w_value = np.array(
[
0.41930267,
0.7872176,
0.89940447,
0.23659843,
0.24676207,
0.17101714,
0.3147149,
0.6555601,
0.4559603,
],
dtype=np.float32,
).reshape(w_shape)
r_value = np.array(
[
0.8374871,
0.86660194,
0.82114047,
0.71549815,
0.18775631,
0.3182116,
0.25392973,
0.38301638,
0.85531586,
],
dtype=np.float32,
).reshape(r_shape)
b_value = np.array([1.0289404, 1.6362579, 0.4370661], dtype=np.float32).reshape(b_shape)
activations = ["sigmoid"] activations = ["sigmoid"]
activation_alpha = [] activation_alpha = []
activation_beta = [] activation_beta = []
@ -483,47 +284,33 @@ def test_space_to_depth_operator():
activation_beta, activation_beta,
clip, clip,
) )
computation = runtime.computation( assert model.get_type_name() == "SpaceToDepth"
model, parameter_x, parameter_h_t, parameter_w, parameter_r, parameter_b, assert model.get_output_size() == 1
) assert model.get_output_element_type(0) == ov_runtime.Type.f32
assert list(model.get_output_shape(0)) == [batch_size, hidden_size]
result = computation(x_value, h_t_value, w_value, r_value, b_value)
expected = np.array(
[0.94126844, 0.9036043, 0.841243, 0.9468489, 0.934215, 0.873708], dtype=np.float32,
).reshape(batch_size, hidden_size)
assert np.allclose(result, expected)
def test_group_convolution_operator(): def test_group_convolution_operator():
runtime = get_runtime()
data_shape = [1, 4, 2, 2] data_shape = [1, 4, 2, 2]
filters_shape = [2, 1, 2, 1, 1] filters_shape = [2, 1, 2, 1, 1]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
parameter_filters = ov.parameter(filters_shape, name="Filters", dtype=np.float32) parameter_filters = ov.parameter(filters_shape, name="Filters", dtype=np.float32)
data_value = np.arange(start=1.0, stop=17.0, dtype=np.float32).reshape(data_shape)
filters_value = np.arange(start=1.0, stop=5.0, dtype=np.float32).reshape(filters_shape)
strides = [1, 1] strides = [1, 1]
dilations = [1, 1] dilations = [1, 1]
pads_begin = [0, 0] pads_begin = [0, 0]
pads_end = [0, 0] pads_end = [0, 0]
model = ov.group_convolution(parameter_data, parameter_filters, strides, pads_begin, pads_end, dilations) model = ov.group_convolution(parameter_data, parameter_filters, strides, pads_begin, pads_end, dilations)
computation = runtime.computation(model, parameter_data, parameter_filters) assert model.get_type_name() == "GroupConvolution"
result = computation(data_value, filters_value) assert model.get_output_size() == 1
assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array([11, 14, 17, 20, 79, 86, 93, 100], dtype=np.float32).reshape(1, 2, 2, 2) assert list(model.get_output_shape(0)) == [1, 2, 2, 2]
assert np.allclose(result, expected)
@pytest.mark.xfail(reason="Computation mismatch") @pytest.mark.xfail(reason="Computation mismatch")
def test_group_convolution_backprop_data(): def test_group_convolution_backprop_data():
runtime = get_runtime()
data_shape = [1, 1, 3, 3] data_shape = [1, 1, 3, 3]
filters_shape = [1, 1, 1, 3, 3] filters_shape = [1, 1, 1, 3, 3]
strides = [2, 2] strides = [2, 2]
@ -537,87 +324,13 @@ def test_group_convolution_backprop_data():
data_node, filters_node, strides, None, pads_begin, pads_end, output_padding=output_padding, data_node, filters_node, strides, None, pads_begin, pads_end, output_padding=output_padding,
) )
data_value = np.array( assert model.get_type_name() == "GroupConvolutionBackpropData"
[ assert model.get_output_size() == 1
0.16857791, assert model.get_output_element_type(0) == ov_runtime.Type.f32
-0.15161794, assert list(model.get_output_shape(0)) == [1, 1, 6, 6]
0.08540368,
0.1820628,
-0.21746576,
0.08245695,
0.1431433,
-0.43156421,
0.30591947,
],
dtype=np.float32,
).reshape(data_shape)
filters_value = np.array(
[
-0.06230065,
0.37932432,
-0.25388849,
0.33878803,
0.43709868,
-0.22477469,
0.04118127,
-0.44696793,
0.06373066,
],
dtype=np.float32,
).reshape(filters_shape)
computation = runtime.computation(model, data_node, filters_node)
result = computation(data_value, filters_value)
expected = np.array(
[
0.07368518,
-0.08925839,
-0.06627201,
0.06301362,
0.03732984,
-0.01919658,
-0.00628807,
-0.02817563,
-0.01472169,
0.04392925,
-0.00689478,
-0.01549204,
0.07957941,
-0.11459791,
-0.09505399,
0.07681622,
0.03604182,
-0.01853423,
-0.0270785,
-0.00680824,
-0.06650258,
0.08004665,
0.07918708,
0.0724144,
0.06256775,
-0.17838378,
-0.18863615,
0.20064656,
0.133717,
-0.06876295,
-0.06398046,
-0.00864975,
0.19289537,
-0.01490572,
-0.13673618,
0.01949645,
],
dtype=np.float32,
).reshape(1, 1, 6, 6)
assert np.allclose(result, expected)
def test_group_convolution_backprop_data_output_shape(): def test_group_convolution_backprop_data_output_shape():
runtime = get_runtime()
data_shape = [1, 1, 1, 10] data_shape = [1, 1, 1, 10]
filters_shape = [1, 1, 1, 1, 5] filters_shape = [1, 1, 1, 1, 5]
strides = [1, 1] strides = [1, 1]
@ -630,17 +343,7 @@ def test_group_convolution_backprop_data_output_shape():
data_node, filters_node, strides, output_shape_node, auto_pad="same_upper", data_node, filters_node, strides, output_shape_node, auto_pad="same_upper",
) )
data_value = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], dtype=np.float32).reshape( assert model.get_type_name() == "GroupConvolutionBackpropData"
data_shape, assert model.get_output_size() == 1
) assert model.get_output_element_type(0) == ov_runtime.Type.f32
assert list(model.get_output_shape(0)) == [1, 1, 1, 14]
filters_value = np.array([1.0, 2.0, 3.0, 2.0, 1.0], dtype=np.float32).reshape(filters_shape)
computation = runtime.computation(model, data_node, filters_node)
result = computation(data_value, filters_value)
expected = np.array(
[0.0, 1.0, 4.0, 10.0, 18.0, 27.0, 36.0, 45.0, 54.0, 63.0, 62.0, 50.0, 26.0, 9.0], dtype=np.float32,
).reshape(1, 1, 1, 14)
assert np.allclose(result, expected)

View File

@ -6,37 +6,31 @@ import numpy as np
import pytest import pytest
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
from tests.test_graph.util import run_op_node
@pytest.mark.parametrize( @pytest.mark.parametrize(
("shape_a", "shape_b", "transpose_a", "transpose_b"), ("shape_a", "shape_b", "transpose_a", "transpose_b", "output_shape"),
[ [
# matrix, vector # matrix, vector
([2, 4], [4], False, False), ([2, 4], [4], False, False, [2]),
([4], [4, 2], False, False), ([4], [4, 2], False, False, [2]),
# matrix, matrix # matrix, matrix
([2, 4], [4, 2], False, False), ([2, 4], [4, 2], False, False, [2, 2]),
# tensor, vector # tensor, vector
([2, 4, 5], [5], False, False), ([2, 4, 5], [5], False, False, [2, 4]),
# # tensor, matrix # # tensor, matrix
([2, 4, 5], [5, 4], False, False), ([2, 4, 5], [5, 4], False, False, [2, 4, 4]),
# # tensor, tensor # # tensor, tensor
([2, 2, 4], [2, 4, 2], False, False), ([2, 2, 4], [2, 4, 2], False, False, [2, 2, 2]),
], ],
) )
@pytest.mark.skip(reason="Sporadically failed. Need further investigation. Ticket - 95970") @pytest.mark.skip(reason="Sporadically failed. Need further investigation. Ticket - 95970")
def test_matmul(shape_a, shape_b, transpose_a, transpose_b): def test_matmul(shape_a, shape_b, transpose_a, transpose_b, output_shape):
np.random.seed(133391) np.random.seed(133391)
left_input = -100.0 + np.random.rand(*shape_a).astype(np.float32) * 200.0 left_input = -100.0 + np.random.rand(*shape_a).astype(np.float32) * 200.0
right_input = -100.0 + np.random.rand(*shape_b).astype(np.float32) * 200.0 right_input = -100.0 + np.random.rand(*shape_b).astype(np.float32) * 200.0
result = run_op_node([left_input, right_input], ov.matmul, transpose_a, transpose_b) node = ov.matmul(left_input, right_input, transpose_a, transpose_b)
assert node.get_type_name() == "MatMul"
if transpose_a: assert node.get_output_size() == 1
left_input = np.transpose(left_input) assert list(node.get_output_shape(0)) == output_shape
if transpose_b:
right_input = np.transpose(right_input)
expected = np.matmul(left_input, right_input)
assert np.allclose(result, expected)

View File

@ -5,33 +5,28 @@
import numpy as np import numpy as np
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime
def test_split(): def test_split():
runtime = get_runtime()
input_tensor = ov.constant(np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)) input_tensor = ov.constant(np.array([0, 1, 2, 3, 4, 5], dtype=np.int32))
axis = ov.constant(0, dtype=np.int64) axis = ov.constant(0, dtype=np.int64)
splits = 3 splits = 3
split_node = ov.split(input_tensor, axis, splits) split_node = ov.split(input_tensor, axis, splits)
computation = runtime.computation(split_node) assert split_node.get_type_name() == "Split"
split_results = computation() assert split_node.get_output_size() == 3
expected_results = np.array([[0, 1], [2, 3], [4, 5]], dtype=np.int32) assert list(split_node.get_output_shape(0)) == [2]
assert np.allclose(split_results, expected_results) assert list(split_node.get_output_shape(1)) == [2]
assert list(split_node.get_output_shape(2)) == [2]
def test_variadic_split(): def test_variadic_split():
runtime = get_runtime()
input_tensor = ov.constant(np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], dtype=np.int32)) input_tensor = ov.constant(np.array([[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]], dtype=np.int32))
axis = ov.constant(1, dtype=np.int64) axis = ov.constant(1, dtype=np.int64)
splits = ov.constant(np.array([2, 4], dtype=np.int64)) splits = ov.constant(np.array([2, 4], dtype=np.int64))
v_split_node = ov.variadic_split(input_tensor, axis, splits) v_split_node = ov.variadic_split(input_tensor, axis, splits)
computation = runtime.computation(v_split_node) assert v_split_node.get_type_name() == "VariadicSplit"
results = computation() assert v_split_node.get_output_size() == 2
split0 = np.array([[0, 1], [6, 7]], dtype=np.int32) assert list(v_split_node.get_output_shape(0)) == [2, 2]
split1 = np.array([[2, 3, 4, 5], [8, 9, 10, 11]], dtype=np.int32) assert list(v_split_node.get_output_shape(1)) == [2, 4]
assert np.allclose(results[0], split0)
assert np.allclose(results[1], split1)

View File

@ -2,36 +2,37 @@
# Copyright (C) 2018-2022 Intel Corporation # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import openvino.runtime as ov_runtime
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
import numpy as np import numpy as np
import pytest import pytest
from tests.runtime import get_runtime from openvino.runtime.utils.types import get_element_type
from tests.test_graph.util import run_op_node, run_op_numeric_data
def test_concat(): def test_concat():
input_a = np.array([[1, 2], [3, 4]]).astype(np.float32) input_a = np.array([[1, 2], [3, 4]]).astype(np.float32)
input_b = np.array([[5, 6]]).astype(np.float32) input_b = np.array([[5, 6]]).astype(np.float32)
axis = 0 axis = 0
expected = np.concatenate((input_a, input_b), axis=0) expected_shape = np.concatenate((input_a, input_b), axis=0).shape
runtime = get_runtime()
parameter_a = ov.parameter(list(input_a.shape), name="A", dtype=np.float32) parameter_a = ov.parameter(list(input_a.shape), name="A", dtype=np.float32)
parameter_b = ov.parameter(list(input_b.shape), name="B", dtype=np.float32) parameter_b = ov.parameter(list(input_b.shape), name="B", dtype=np.float32)
node = ov.concat([parameter_a, parameter_b], axis) node = ov.concat([parameter_a, parameter_b], axis)
computation = runtime.computation(node, parameter_a, parameter_b) assert node.get_type_name() == "Concat"
result = computation(input_a, input_b) assert node.get_output_size() == 1
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == list(expected_shape)
@pytest.mark.parametrize( @pytest.mark.parametrize(
("val_type", "value"), [(bool, False), (bool, np.empty((2, 2), dtype=bool))], ("val_type", "value", "output_shape"), [(bool, False, []), (bool, np.empty((2, 2), dtype=bool), [2, 2])],
) )
def test_constant_from_bool(val_type, value): def test_constant_from_bool(val_type, value, output_shape):
expected = np.array(value, dtype=val_type) node = ov.constant(value, val_type)
result = run_op_numeric_data(value, ov.constant, val_type) assert node.get_type_name() == "Constant"
assert np.allclose(result, expected) assert node.get_output_size() == 1
assert node.get_output_element_type(0) == ov_runtime.Type.boolean
assert list(node.get_output_shape(0)) == output_shape
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -50,9 +51,11 @@ def test_constant_from_bool(val_type, value):
], ],
) )
def test_constant_from_scalar(val_type, value): def test_constant_from_scalar(val_type, value):
expected = np.array(value, dtype=val_type) node = ov.constant(value, val_type)
result = run_op_numeric_data(value, ov.constant, val_type) assert node.get_type_name() == "Constant"
assert np.allclose(result, expected) assert node.get_output_size() == 1
assert node.get_output_element_type(0) == get_element_type(val_type)
assert list(node.get_output_shape(0)) == []
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -65,8 +68,11 @@ def test_constant_from_scalar(val_type, value):
def test_constant_from_float_array(val_type): def test_constant_from_float_array(val_type):
np.random.seed(133391) np.random.seed(133391)
input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type) input_data = np.array(-1 + np.random.rand(2, 3, 4) * 2, dtype=val_type)
result = run_op_numeric_data(input_data, ov.constant, val_type) node = ov.constant(input_data, val_type)
assert np.allclose(result, input_data) assert node.get_type_name() == "Constant"
assert node.get_output_size() == 1
assert node.get_output_element_type(0) == get_element_type(val_type)
assert list(node.get_output_shape(0)) == [2, 3, 4]
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -87,8 +93,11 @@ def test_constant_from_integer_array(val_type, range_start, range_end):
input_data = np.array( input_data = np.array(
np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type, np.random.randint(range_start, range_end, size=(2, 2)), dtype=val_type,
) )
result = run_op_numeric_data(input_data, ov.constant, val_type) node = ov.constant(input_data, val_type)
assert np.allclose(result, input_data) assert node.get_type_name() == "Constant"
assert node.get_output_size() == 1
assert node.get_output_element_type(0) == get_element_type(val_type)
assert list(node.get_output_shape(0)) == [2, 2]
def test_broadcast_numpy(): def test_broadcast_numpy():
@ -127,27 +136,24 @@ def test_transpose():
) )
input_order = np.array([0, 2, 3, 1], dtype=np.int32) input_order = np.array([0, 2, 3, 1], dtype=np.int32)
result = run_op_node([input_tensor], ov.transpose, input_order) node = ov.transpose(input_tensor, input_order)
assert node.get_type_name() == "Transpose"
expected = np.transpose(input_tensor, input_order) assert node.get_output_size() == 1
assert node.get_output_element_type(0) == ov_runtime.Type.i32
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == [3, 224, 224, 3]
def test_tile(): def test_tile():
input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3)) input_tensor = np.arange(6, dtype=np.int32).reshape((2, 1, 3))
repeats = np.array([2, 1], dtype=np.int32) repeats = np.array([2, 1], dtype=np.int32)
node = ov.tile(input_tensor, repeats)
result = run_op_node([input_tensor], ov.tile, repeats) assert node.get_type_name() == "Tile"
assert node.get_output_size() == 1
expected = np.array([0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5]).reshape((2, 2, 3)) assert node.get_output_element_type(0) == ov_runtime.Type.i32
assert list(node.get_output_shape(0)) == [2, 2, 3]
assert np.allclose(result, expected)
@pytest.mark.xfail(
reason="RuntimeError: Check 'shape_size(get_input_shape(0)) == shape_size(output_shape)'",
)
def test_strided_slice(): def test_strided_slice():
input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4)) input_tensor = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4))
begin = np.array([1, 0], dtype=np.int32) begin = np.array([1, 0], dtype=np.int32)
@ -159,9 +165,8 @@ def test_strided_slice():
shrink_axis_mask = np.array([1, 0, 0], dtype=np.int32) shrink_axis_mask = np.array([1, 0, 0], dtype=np.int32)
ellipsis_mask = np.array([0, 0, 0], dtype=np.int32) ellipsis_mask = np.array([0, 0, 0], dtype=np.int32)
result = run_op_node( node = ov.strided_slice(
[input_tensor], input_tensor,
ov.strided_slice,
begin, begin,
end, end,
strides, strides,
@ -171,12 +176,10 @@ def test_strided_slice():
shrink_axis_mask, shrink_axis_mask,
ellipsis_mask, ellipsis_mask,
) )
assert node.get_type_name() == "StridedSlice"
expected = np.array( assert node.get_output_size() == 1
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], dtype=np.float32, assert node.get_output_element_type(0) == ov_runtime.Type.f32
).reshape((1, 3, 4)) assert list(node.get_output_shape(0)) == [1, 3, 4]
assert np.allclose(result, expected)
def test_reshape_v1(): def test_reshape_v1():
@ -184,16 +187,18 @@ def test_reshape_v1():
shape = np.array([0, -1, 4], dtype=np.int32) shape = np.array([0, -1, 4], dtype=np.int32)
special_zero = True special_zero = True
expected_shape = np.array([2, 150, 4]) node = ov.reshape(param_a, shape, special_zero)
expected = np.reshape(param_a, expected_shape) assert node.get_type_name() == "Reshape"
result = run_op_node([param_a], ov.reshape, shape, special_zero) assert node.get_output_size() == 1
assert node.get_output_element_type(0) == ov_runtime.Type.f32
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == [2, 150, 4]
def test_shape_of(): def test_shape_of():
input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) input_tensor = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
result = run_op_node([input_tensor], ov.shape_of) node = ov.shape_of(input_tensor)
assert node.get_type_name() == "ShapeOf"
assert np.allclose(result, [3, 3]) assert node.get_output_size() == 1
assert node.get_output_element_type(0) == ov_runtime.Type.i64
assert list(node.get_output_shape(0)) == [2]

View File

@ -5,48 +5,49 @@
import numpy as np import numpy as np
import pytest import pytest
import openvino.runtime as ov_runtime
import openvino.runtime.opset9 as ov import openvino.runtime.opset9 as ov
from openvino.runtime import Shape, Type from openvino.runtime import Shape, Type
from tests.runtime import get_runtime
from tests.test_graph.util import run_op_node
R_TOLERANCE = 1e-6 # global relative tolerance R_TOLERANCE = 1e-6 # global relative tolerance
@pytest.mark.parametrize( @pytest.mark.parametrize(
("graph_api_fn", "numpy_fn", "range_start", "range_end"), ("graph_api_fn", "type_name"),
[ [
(ov.absolute, np.abs, -1, 1), (ov.absolute, "Abs"),
(ov.abs, np.abs, -1, 1), (ov.abs, "Abs"),
(ov.acos, np.arccos, -1, 1), (ov.acos, "Acos"),
(ov.acosh, np.arccosh, 1, 2), (ov.acosh, "Acosh"),
(ov.asin, np.arcsin, -1, 1), (ov.asin, "Asin"),
(ov.asinh, np.arcsinh, -1, 1), (ov.asinh, "Asinh"),
(ov.atan, np.arctan, -100.0, 100.0), (ov.atan, "Atan"),
(ov.atanh, np.arctanh, 0.0, 1.0), (ov.atanh, "Atanh"),
(ov.ceiling, np.ceil, -100.0, 100.0), (ov.ceiling, "Ceiling"),
(ov.ceil, np.ceil, -100.0, 100.0), (ov.ceil, "Ceiling"),
(ov.cos, np.cos, -100.0, 100.0), (ov.cos, "Cos"),
(ov.cosh, np.cosh, -100.0, 100.0), (ov.cosh, "Cosh"),
(ov.exp, np.exp, -100.0, 100.0), (ov.exp, "Exp"),
(ov.floor, np.floor, -100.0, 100.0), (ov.floor, "Floor"),
(ov.log, np.log, 0, 100.0), (ov.log, "Log"),
(ov.relu, lambda x: np.maximum(0, x), -100.0, 100.0), (ov.relu, "Relu"),
(ov.sign, np.sign, -100.0, 100.0), (ov.sign, "Sign"),
(ov.sin, np.sin, -100.0, 100.0), (ov.sin, "Sin"),
(ov.sinh, np.sinh, -100.0, 100.0), (ov.sinh, "Sinh"),
(ov.sqrt, np.sqrt, 0.0, 100.0), (ov.sqrt, "Sqrt"),
(ov.tan, np.tan, -1.0, 1.0), (ov.tan, "Tan"),
(ov.tanh, np.tanh, -100.0, 100.0), (ov.tanh, "Tanh"),
], ],
) )
def test_unary_op_array(graph_api_fn, numpy_fn, range_start, range_end): def test_unary_op_array(graph_api_fn, type_name):
np.random.seed(133391) np.random.seed(133391)
input_data = (range_start + np.random.rand(2, 3, 4) * (range_end - range_start)).astype(np.float32) input_data = np.random.rand(2, 3, 4).astype(np.float32)
expected = numpy_fn(input_data)
result = run_op_node([input_data], graph_api_fn) node = graph_api_fn(input_data)
assert np.allclose(result, expected, rtol=0.001) assert node.get_output_size() == 1
assert node.get_type_name() == type_name
assert node.get_output_element_type(0) == ov_runtime.Type.f32
assert list(node.get_output_shape(0)) == [2, 3, 4]
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -74,10 +75,12 @@ def test_unary_op_array(graph_api_fn, numpy_fn, range_start, range_end):
], ],
) )
def test_unary_op_scalar(graph_api_fn, numpy_fn, input_data): def test_unary_op_scalar(graph_api_fn, numpy_fn, input_data):
expected = numpy_fn(input_data) expected_shape = numpy_fn(input_data).shape
node = graph_api_fn(input_data)
result = run_op_node([input_data], graph_api_fn) assert node.get_output_size() == 1
assert np.allclose(result, expected) assert node.get_output_element_type(0) == ov_runtime.Type.f32
assert list(node.get_output_shape(0)) == list(expected_shape)
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -85,52 +88,42 @@ def test_unary_op_scalar(graph_api_fn, numpy_fn, input_data):
[(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))], [(np.array([True, False, True, False])), (np.array([True])), (np.array([False]))],
) )
def test_logical_not(input_data): def test_logical_not(input_data):
expected = np.logical_not(input_data) node = ov.logical_not(input_data)
assert node.get_output_size() == 1
result = run_op_node([input_data], ov.logical_not) assert node.get_type_name() == "LogicalNot"
assert np.allclose(result, expected) assert node.get_output_element_type(0) == ov_runtime.Type.boolean
assert list(node.get_output_shape(0)) == list(input_data.shape)
def test_sigmoid(): def test_sigmoid():
input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32) input_data = np.array([-3.14, -1.0, 0.0, 2.71001, 1000.0], dtype=np.float32)
result = run_op_node([input_data], ov.sigmoid) node = ov.sigmoid(input_data)
def sigmoid(value): assert node.get_output_size() == 1
return 1.0 / (1.0 + np.exp(-value)) assert node.get_type_name() == "Sigmoid"
assert node.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array(list(map(sigmoid, input_data))) assert list(node.get_output_shape(0)) == [5]
assert np.allclose(result, expected)
def test_softmax_positive_axis(): def test_softmax():
axis = 1 axis = 1
input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32) input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
result = run_op_node([input_tensor], ov.softmax, axis) node = ov.softmax(input_tensor, axis)
assert node.get_output_size() == 1
expected = [[0.09003056, 0.24472842, 0.6652409], [0.09003056, 0.24472842, 0.6652409]] assert node.get_type_name() == "Softmax"
assert node.get_output_element_type(0) == ov_runtime.Type.f32
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == [2, 3]
def test_softmax_negative_axis():
axis = -1
input_tensor = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
result = run_op_node([input_tensor], ov.softmax, axis)
expected = [[0.09003056, 0.24472842, 0.6652409], [0.09003056, 0.24472842, 0.6652409]]
assert np.allclose(result, expected)
def test_erf(): def test_erf():
input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32) input_tensor = np.array([-1.0, 0.0, 1.0, 2.5, 3.14, 4.0], dtype=np.float32)
expected = [-0.842701, 0.0, 0.842701, 0.999593, 0.999991, 1.0]
result = run_op_node([input_tensor], ov.erf) node = ov.erf(input_tensor)
assert np.allclose(result, expected) assert node.get_output_size() == 1
assert node.get_type_name() == "Erf"
assert node.get_output_element_type(0) == ov_runtime.Type.f32
assert list(node.get_output_shape(0)) == [6]
def test_hswish(): def test_hswish():
@ -144,7 +137,7 @@ def test_hswish():
assert node.get_output_element_type(0) == Type.f32 assert node.get_output_element_type(0) == Type.f32
def test_round_even(): def test_round():
float_dtype = np.float32 float_dtype = np.float32
data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data") data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data")
@ -155,27 +148,12 @@ def test_round_even():
assert node.get_output_element_type(0) == Type.f32 assert node.get_output_element_type(0) == Type.f32
input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32) input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
expected = [-2.0, -2.0, 0.0, 0.0, 1.0, 2.0, 2.0, 2.0, 4.0]
result = run_op_node([input_tensor], ov.round, "HALF_TO_EVEN") node = ov.round(input_tensor, "HALF_TO_EVEN")
assert np.allclose(result, expected)
def test_round_away():
float_dtype = np.float32
data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data")
node = ov.round(data, "HALF_AWAY_FROM_ZERO")
assert node.get_type_name() == "Round"
assert node.get_output_size() == 1 assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [3, 10] assert node.get_type_name() == "Round"
assert node.get_output_element_type(0) == Type.f32 assert node.get_output_element_type(0) == ov_runtime.Type.f32
assert list(node.get_output_shape(0)) == [9]
input_tensor = np.array([-2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5], dtype=np.float32)
expected = [-3.0, -2.0, -1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 4.0]
result = run_op_node([input_tensor], ov.round, "HALF_AWAY_FROM_ZERO")
assert np.allclose(result, expected)
def test_hsigmoid(): def test_hsigmoid():
@ -190,102 +168,42 @@ def test_hsigmoid():
def test_gelu_operator_with_parameters(): def test_gelu_operator_with_parameters():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
data_shape = [2, 2] data_shape = [2, 2]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.gelu(parameter_data, "erf") model = ov.gelu(parameter_data, "erf")
computation = runtime.computation(model, parameter_data) assert model.get_output_size() == 1
assert model.get_type_name() == "Gelu"
result = computation(data_value) assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array([[-1.6391277e-06, 8.4134471e-01], [-4.5500278e-02, 2.9959502]], dtype=np.float32) assert list(model.get_output_shape(0)) == [2, 2]
assert np.allclose(result, expected, 1e-6, 1e-6)
def test_gelu_operator_with_array(): def test_gelu_operator_with_array():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
model = ov.gelu(data_value, "erf") model = ov.gelu(data_value, "erf")
computation = runtime.computation(model) assert model.get_output_size() == 1
assert model.get_type_name() == "Gelu"
result = computation() assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array([[-1.6391277e-06, 8.4134471e-01], [-4.5500278e-02, 2.9959502]], dtype=np.float32) assert list(model.get_output_shape(0)) == [2, 2]
assert np.allclose(result, expected, 1e-6, 1e-6)
def test_gelu_tanh_operator_with_parameters(): def test_gelu_tanh_operator_with_parameters():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
data_shape = [2, 2] data_shape = [2, 2]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32) parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.gelu(parameter_data, "tanh") model = ov.gelu(parameter_data, "tanh")
computation = runtime.computation(model, parameter_data) assert model.get_output_size() == 1
assert model.get_type_name() == "Gelu"
result = computation(data_value) assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array([[0.0, 0.841192], [-0.04540223, 2.9963627]], dtype=np.float32) assert list(model.get_output_shape(0)) == [2, 2]
assert np.allclose(result, expected, 1e-6, 1e-6)
def test_gelu_tanh_operator_with_array(): def test_gelu_tanh_operator_with_array():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32) data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
model = ov.gelu(data_value, "tanh") model = ov.gelu(data_value, "tanh")
computation = runtime.computation(model) assert model.get_output_size() == 1
assert model.get_type_name() == "Gelu"
result = computation() assert model.get_output_element_type(0) == ov_runtime.Type.f32
expected = np.array([[0.0, 0.841192], [-0.04540223, 2.9963627]], dtype=np.float32) assert list(model.get_output_shape(0)) == [2, 2]
assert np.allclose(result, expected, 1e-6, 1e-6)
type_tolerance = [
(np.float64, 1e-6),
(np.float32, 1e-6),
(np.float16, 1e-3),
]
@pytest.mark.parametrize("type_tolerance", type_tolerance)
def test_softsign_with_parameters(type_tolerance):
dtype, atol = type_tolerance
data = np.random.uniform(-1.0, 1.0, (32, 5)).astype(dtype)
expected = np.divide(data, np.abs(data) + 1)
runtime = get_runtime()
param = ov.parameter(data.shape, dtype, name="Data")
result = runtime.computation(ov.softsign(param, "SoftSign"), param)(data)
assert np.allclose(result, expected, R_TOLERANCE, atol)
@pytest.mark.parametrize("type_tolerance", type_tolerance)
def test_softsign_with_array(type_tolerance):
dtype, atol = type_tolerance
data = np.random.uniform(-1.0, 1.0, (32, 5)).astype(dtype)
expected = np.divide(data, np.abs(data) + 1)
runtime = get_runtime()
result = runtime.computation(ov.softsign(data, "SoftSign"))()
assert np.allclose(result, expected, R_TOLERANCE, atol)
@pytest.mark.parametrize("type_tolerance", type_tolerance)
def test_softsign(type_tolerance):
dtype, atol = type_tolerance
data = np.random.uniform(-1.0, 1.0, (32, 5)).astype(dtype)
expected = np.divide(data, np.abs(data) + 1)
result = run_op_node([data], ov.softsign)
assert np.allclose(result, expected, R_TOLERANCE, atol)

View File

@ -6,7 +6,7 @@ import numpy as np
import pytest import pytest
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime from openvino.runtime import Type
@pytest.fixture() @pytest.fixture()
@ -15,7 +15,6 @@ def ndarray_1x1x4x4():
def test_avg_pool_2d(ndarray_1x1x4x4): def test_avg_pool_2d(ndarray_1x1x4x4):
runtime = get_runtime()
input_data = ndarray_1x1x4x4 input_data = ndarray_1x1x4x4
param = ov.parameter(input_data.shape, name="A", dtype=np.float32) param = ov.parameter(input_data.shape, name="A", dtype=np.float32)
@ -25,41 +24,15 @@ def test_avg_pool_2d(ndarray_1x1x4x4):
pads_end = [0] * spatial_dim_count pads_end = [0] * spatial_dim_count
strides = [2, 2] strides = [2, 2]
exclude_pad = True exclude_pad = True
expected = [[[[13.5, 15.5], [21.5, 23.5]]]]
avg_pool_node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
computation = runtime.computation(avg_pool_node, param) assert node.get_type_name() == "AvgPool"
result = computation(input_data) assert node.get_output_size() == 1
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == [1, 1, 2, 2]
assert node.get_output_element_type(0) == Type.f32
expected = [[[[13.5, 14.5, 15.5], [17.5, 18.5, 19.5], [21.5, 22.5, 23.5]]]]
strides = [1, 1]
avg_pool_node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
computation = runtime.computation(avg_pool_node, param)
result = computation(input_data)
assert np.allclose(result, expected)
pads_begin = [1, 1]
pads_end = [1, 1]
strides = [2, 2]
exclude_pad = True
expected = [[[[11.0, 12.5, 14.0], [17.0, 18.5, 20.0], [23.0, 24.5, 26.0]]]]
avg_pool_node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
computation = runtime.computation(avg_pool_node, param)
result = computation(input_data)
assert np.allclose(result, expected)
exclude_pad = False
expected = [[[[2.75, 6.25, 3.5], [8.5, 18.5, 10.0], [5.75, 12.25, 6.5]]]]
avg_pool_node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
computation = runtime.computation(avg_pool_node, param)
result = computation(input_data)
assert np.allclose(result, expected)
def test_avg_pooling_3d(ndarray_1x1x4x4): def test_avg_pooling_3d(ndarray_1x1x4x4):
rt = get_runtime()
data = ndarray_1x1x4x4 data = ndarray_1x1x4x4
data = np.broadcast_to(data, (1, 1, 4, 4, 4)) data = np.broadcast_to(data, (1, 1, 4, 4, 4))
param = ov.parameter(list(data.shape)) param = ov.parameter(list(data.shape))
@ -70,21 +43,14 @@ def test_avg_pooling_3d(ndarray_1x1x4x4):
pads_end = [0] * spatial_dim_count pads_end = [0] * spatial_dim_count
exclude_pad = True exclude_pad = True
avgpool = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad) node = ov.avg_pool(param, strides, pads_begin, pads_end, kernel_shape, exclude_pad)
comp = rt.computation(avgpool, param) assert node.get_type_name() == "AvgPool"
result = comp(data) assert node.get_output_size() == 1
result_ref = [[[[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]]]] assert list(node.get_output_shape(0)) == [1, 1, 2, 2, 2]
assert np.allclose(result, result_ref) assert node.get_output_element_type(0) == Type.f32
def test_max_pool_basic(): def test_max_pool_basic():
rt = get_runtime()
"""array([[[[ 0.5, 1.5, 2.5, 3.5],
[ 4.5, 5.5, 6.5, 7.5],
[ 8.5, 9.5, 10.5, 11.5],
[12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
"""
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
strides = [1, 1] strides = [1, 1]
dilations = [1, 1] dilations = [1, 1]
@ -96,7 +62,7 @@ def test_max_pool_basic():
index_et = "i32" index_et = "i32"
data_node = ov.parameter(data.shape, name="A", dtype=np.float32) data_node = ov.parameter(data.shape, name="A", dtype=np.float32)
maxpool_node = ov.max_pool( node = ov.max_pool(
data_node, data_node,
strides, strides,
dilations, dilations,
@ -107,23 +73,15 @@ def test_max_pool_basic():
auto_pad, auto_pad,
index_et, index_et,
) )
comp = rt.computation(maxpool_node, data_node) assert node.get_type_name() == "MaxPool"
result = comp(data) assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [1, 1, 3, 3]
expected = np.array([[[[5.5, 6.5, 7.5], [9.5, 10.5, 11.5], [13.5, 14.5, 15.5]]]], dtype=np.float32) assert list(node.get_output_shape(1)) == [1, 1, 3, 3]
expected_idx = np.array([[[[5, 6, 7], [9, 10, 11], [13, 14, 15]]]], dtype=np.int32) assert node.get_output_element_type(0) == Type.f32
assert np.allclose(result[0], expected) assert node.get_output_element_type(1) == Type.i32
assert np.allclose(result[1], expected_idx)
def test_max_pool_strides(): def test_max_pool_strides():
rt = get_runtime()
"""array([[[[ 0.5, 1.5, 2.5, 3.5],
[ 4.5, 5.5, 6.5, 7.5],
[ 8.5, 9.5, 10.5, 11.5],
[12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
"""
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
strides = [2, 1] strides = [2, 1]
dilations = [1, 1] dilations = [1, 1]
@ -135,7 +93,7 @@ def test_max_pool_strides():
index_et = "i32" index_et = "i32"
data_node = ov.parameter(data.shape, name="A", dtype=np.float32) data_node = ov.parameter(data.shape, name="A", dtype=np.float32)
maxpool_node = ov.max_pool( node = ov.max_pool(
data_node, data_node,
strides, strides,
dilations, dilations,
@ -146,23 +104,15 @@ def test_max_pool_strides():
auto_pad, auto_pad,
index_et, index_et,
) )
comp = rt.computation(maxpool_node, data_node) assert node.get_type_name() == "MaxPool"
result = comp(data) assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [1, 1, 2, 3]
expected = np.array([[[[5.5, 6.5, 7.5], [13.5, 14.5, 15.5]]]], dtype=np.float32) assert list(node.get_output_shape(1)) == [1, 1, 2, 3]
expected_idx = np.array([[[[5, 6, 7], [13, 14, 15]]]], dtype=np.int32) assert node.get_output_element_type(0) == Type.f32
assert np.allclose(result[0], expected) assert node.get_output_element_type(1) == Type.i32
assert np.allclose(result[1], expected_idx)
def test_max_pool_kernel_shape1x1(): def test_max_pool_kernel_shape1x1():
rt = get_runtime()
"""array([[[[ 0.5, 1.5, 2.5, 3.5],
[ 4.5, 5.5, 6.5, 7.5],
[ 8.5, 9.5, 10.5, 11.5],
[12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
"""
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
strides = [1, 1] strides = [1, 1]
dilations = [1, 1] dilations = [1, 1]
@ -174,7 +124,7 @@ def test_max_pool_kernel_shape1x1():
index_et = "i32" index_et = "i32"
data_node = ov.parameter(data.shape, name="A", dtype=np.float32) data_node = ov.parameter(data.shape, name="A", dtype=np.float32)
maxpool_node = ov.max_pool( node = ov.max_pool(
data_node, data_node,
strides, strides,
dilations, dilations,
@ -185,21 +135,15 @@ def test_max_pool_kernel_shape1x1():
auto_pad, auto_pad,
index_et, index_et,
) )
comp = rt.computation(maxpool_node, data_node) assert node.get_type_name() == "MaxPool"
result = comp(data) assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [1, 1, 4, 4]
assert np.allclose(result[0], data) assert list(node.get_output_shape(1)) == [1, 1, 4, 4]
assert np.allclose(result[1], np.arange(0, 16, dtype=np.int32).reshape((1, 1, 4, 4))) assert node.get_output_element_type(0) == Type.f32
assert node.get_output_element_type(1) == Type.i32
def test_max_pool_kernel_shape3x3(): def test_max_pool_kernel_shape3x3():
rt = get_runtime()
"""array([[[[ 0.5, 1.5, 2.5, 3.5],
[ 4.5, 5.5, 6.5, 7.5],
[ 8.5, 9.5, 10.5, 11.5],
[12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
"""
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
strides = [1, 1] strides = [1, 1]
dilations = [1, 1] dilations = [1, 1]
@ -211,7 +155,7 @@ def test_max_pool_kernel_shape3x3():
index_et = "i32" index_et = "i32"
data_node = ov.parameter(data.shape, name="A", dtype=np.float32) data_node = ov.parameter(data.shape, name="A", dtype=np.float32)
maxpool_node = ov.max_pool( node = ov.max_pool(
data_node, data_node,
strides, strides,
dilations, dilations,
@ -222,40 +166,27 @@ def test_max_pool_kernel_shape3x3():
auto_pad, auto_pad,
index_et, index_et,
) )
comp = rt.computation(maxpool_node, data_node) assert node.get_type_name() == "MaxPool"
result = comp(data) assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [1, 1, 2, 2]
expected = np.array([[[[10.5, 11.5], [14.5, 15.5]]]], dtype=np.float32) assert list(node.get_output_shape(1)) == [1, 1, 2, 2]
assert np.allclose(result[0], expected) assert node.get_output_element_type(0) == Type.f32
assert node.get_output_element_type(1) == Type.i32
def test_max_pool_non_zero_pads(): def test_max_pool_non_zero_pads():
rt = get_runtime()
"""array([[[[ 0.5, 1.5, 2.5, 3.5],
[ 4.5, 5.5, 6.5, 7.5],
[ 8.5, 9.5, 10.5, 11.5],
[12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
"""
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
strides = [1, 1] strides = [1, 1]
dilations = [1, 1] dilations = [1, 1]
pads_begin = [1, 1] pads_begin = [1, 1]
pads_end = [1, 1] pads_end = [1, 1]
"""0 0 , 0 , 0 , 0, 0
0 [ 0.5, 1.5, 2.5, 3.5], 0,
0 [ 4.5, 5.5, 6.5, 7.5], 0,
0 [ 8.5, 9.5, 10.5, 11.5], 0,
0 [12.5, 13.5, 14.5, 15.5], 0
0 0 , 0 , 0 , 0, 0
"""
kernel_shape = [2, 2] kernel_shape = [2, 2]
rounding_type = "floor" rounding_type = "floor"
auto_pad = None auto_pad = None
index_et = "i32" index_et = "i32"
data_node = ov.parameter(data.shape, name="A", dtype=np.float32) data_node = ov.parameter(data.shape, name="A", dtype=np.float32)
maxpool_node = ov.max_pool( node = ov.max_pool(
data_node, data_node,
strides, strides,
dilations, dilations,
@ -266,67 +197,27 @@ def test_max_pool_non_zero_pads():
auto_pad, auto_pad,
index_et, index_et,
) )
comp = rt.computation(maxpool_node, data_node) assert node.get_type_name() == "MaxPool"
result = comp(data) assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [1, 1, 5, 5]
expected = np.array( assert list(node.get_output_shape(1)) == [1, 1, 5, 5]
[ assert node.get_output_element_type(0) == Type.f32
[ assert node.get_output_element_type(1) == Type.i32
[
[0.5, 1.5, 2.5, 3.5, 3.5],
[4.5, 5.5, 6.5, 7.5, 7.5],
[8.5, 9.5, 10.5, 11.5, 11.5],
[12.5, 13.5, 14.5, 15.5, 15.5],
[12.5, 13.5, 14.5, 15.5, 15.5],
],
],
],
dtype=np.float32,
)
expected_idx = np.array(
[
[
[
[0, 1, 2, 3, 3],
[4, 5, 6, 7, 7],
[8, 9, 10, 11, 11],
[12, 13, 14, 15, 15],
[12, 13, 14, 15, 15],
],
],
],
dtype=np.int32,
)
assert np.allclose(result[0], expected)
assert np.allclose(result[1], expected_idx)
def test_max_pool_same_upper_auto_pads(): def test_max_pool_same_upper_auto_pads():
rt = get_runtime()
"""array([[[[ 0.5, 1.5, 2.5, 3.5],
[ 4.5, 5.5, 6.5, 7.5],
[ 8.5, 9.5, 10.5, 11.5],
[12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
"""
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
strides = [1, 1] strides = [1, 1]
dilations = [1, 1] dilations = [1, 1]
pads_begin = [0, 0] pads_begin = [0, 0]
pads_end = [0, 0] pads_end = [0, 0]
"""[ 0.5, 1.5, 2.5, 3.5], 0,
[ 4.5, 5.5, 6.5, 7.5], 0,
[ 8.5, 9.5, 10.5, 11.5], 0,
[12.5, 13.5, 14.5, 15.5], 0
0 , 0 , 0 , 0, 0
"""
kernel_shape = [2, 2] kernel_shape = [2, 2]
auto_pad = "same_upper" auto_pad = "same_upper"
rounding_type = "floor" rounding_type = "floor"
index_et = "i32" index_et = "i32"
data_node = ov.parameter(data.shape, name="A", dtype=np.float32) data_node = ov.parameter(data.shape, name="A", dtype=np.float32)
maxpool_node = ov.max_pool( node = ov.max_pool(
data_node, data_node,
strides, strides,
dilations, dilations,
@ -337,65 +228,27 @@ def test_max_pool_same_upper_auto_pads():
auto_pad, auto_pad,
index_et, index_et,
) )
comp = rt.computation(maxpool_node, data_node) assert node.get_type_name() == "MaxPool"
result = comp(data) assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [1, 1, 4, 4]
expected = np.array( assert list(node.get_output_shape(1)) == [1, 1, 4, 4]
[ assert node.get_output_element_type(0) == Type.f32
[ assert node.get_output_element_type(1) == Type.i32
[
[5.5, 6.5, 7.5, 7.5],
[9.5, 10.5, 11.5, 11.5],
[13.5, 14.5, 15.5, 15.5],
[13.5, 14.5, 15.5, 15.5],
],
],
],
dtype=np.float32,
)
expected_idx = np.array(
[
[
[
[5, 6, 7, 7],
[9, 10, 11, 11],
[13, 14, 15, 15],
[13, 14, 15, 15],
],
],
],
dtype=np.int32,
)
assert np.allclose(result[0], expected)
assert np.allclose(result[1], expected_idx)
def test_max_pool_same_lower_auto_pads(): def test_max_pool_same_lower_auto_pads():
rt = get_runtime()
"""array([[[[ 0.5, 1.5, 2.5, 3.5],
[ 4.5, 5.5, 6.5, 7.5],
[ 8.5, 9.5, 10.5, 11.5],
[12.5, 13.5, 14.5, 15.5]]]], dtype=float32)
"""
data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4)) data = np.arange(0.5, 16, dtype=np.float32).reshape((1, 1, 4, 4))
strides = [1, 1] strides = [1, 1]
dilations = [1, 1] dilations = [1, 1]
pads_begin = [0, 0] pads_begin = [0, 0]
pads_end = [0, 0] pads_end = [0, 0]
"""0 0 , 0 , 0 , 0,
0 [ 0.5, 1.5, 2.5, 3.5],
0 [ 4.5, 5.5, 6.5, 7.5],
0 [ 8.5, 9.5, 10.5, 11.5],
0 [12.5, 13.5, 14.5, 15.5],
"""
kernel_shape = [2, 2] kernel_shape = [2, 2]
auto_pad = "same_lower" auto_pad = "same_lower"
rounding_type = "floor" rounding_type = "floor"
index_et = "i32" index_et = "i32"
data_node = ov.parameter(data.shape, name="A", dtype=np.float32) data_node = ov.parameter(data.shape, name="A", dtype=np.float32)
maxpool_node = ov.max_pool( node = ov.max_pool(
data_node, data_node,
strides, strides,
dilations, dilations,
@ -406,34 +259,9 @@ def test_max_pool_same_lower_auto_pads():
auto_pad, auto_pad,
index_et, index_et,
) )
comp = rt.computation(maxpool_node, data_node) assert node.get_type_name() == "MaxPool"
result = comp(data) assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [1, 1, 4, 4]
expected = np.array( assert list(node.get_output_shape(1)) == [1, 1, 4, 4]
[ assert node.get_output_element_type(0) == Type.f32
[ assert node.get_output_element_type(1) == Type.i32
[
[0.5, 1.5, 2.5, 3.5],
[4.5, 5.5, 6.5, 7.5],
[8.5, 9.5, 10.5, 11.5],
[12.5, 13.5, 14.5, 15.5],
],
],
],
dtype=np.float32,
)
expected_idx = np.array(
[
[
[
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
],
],
],
dtype=np.int32,
)
assert np.allclose(result[0], expected)
assert np.allclose(result[1], expected_idx)

View File

@ -10,7 +10,6 @@ import openvino.runtime.opset8 as ops
from openvino.runtime import Model, Output, Type from openvino.runtime import Model, Output, Type
from openvino.runtime.utils.decorators import custom_preprocess_function from openvino.runtime.utils.decorators import custom_preprocess_function
from openvino.runtime import Core from openvino.runtime import Core
from tests.runtime import get_runtime
from openvino.preprocess import PrePostProcessor, ColorFormat, ResizeAlgorithm from openvino.preprocess import PrePostProcessor, ColorFormat, ResizeAlgorithm
@ -19,20 +18,18 @@ def test_graph_preprocess_mean():
parameter_a = ops.parameter(shape, dtype=np.float32, name="A") parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
model = parameter_a model = parameter_a
function = Model(model, [parameter_a], "TestFunction") function = Model(model, [parameter_a], "TestFunction")
ppp = PrePostProcessor(function) ppp = PrePostProcessor(function)
inp = ppp.input() inp = ppp.input()
prep = inp.preprocess() prep = inp.preprocess()
prep.mean(1.0) prep.mean(1.0)
function = ppp.build() function = ppp.build()
model_operators = [op.get_name().split("_")[0] for op in function.get_ordered_ops()]
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32) assert len(model_operators) == 4
expected_output = np.array([[0, 1], [2, 3]]).astype(np.float32) assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [2, 2]
runtime = get_runtime() assert function.get_output_element_type(0) == Type.f32
computation = runtime.computation(function) assert "Constant" in model_operators
output = computation(input_data) assert "Subtract" in model_operators
assert np.equal(output, expected_output).all()
def test_graph_preprocess_mean_vector(): def test_graph_preprocess_mean_vector():
@ -47,13 +44,13 @@ def test_graph_preprocess_mean_vector():
ppp.input().preprocess().mean([1., 2.]) ppp.input().preprocess().mean([1., 2.])
function = ppp.build() function = ppp.build()
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32) model_operators = [op.get_name().split("_")[0] for op in function.get_ordered_ops()]
expected_output = np.array([[0, 0], [2, 2]]).astype(np.float32) assert len(model_operators) == 4
assert function.get_output_size() == 1
runtime = get_runtime() assert list(function.get_output_shape(0)) == [2, 2]
computation = runtime.computation(function) assert function.get_output_element_type(0) == Type.f32
output = computation(input_data) assert "Constant" in model_operators
assert np.equal(output, expected_output).all() assert "Subtract" in model_operators
def test_graph_preprocess_scale_vector(): def test_graph_preprocess_scale_vector():
@ -69,13 +66,13 @@ def test_graph_preprocess_scale_vector():
inp.preprocess().scale([0.5, 2.0]) inp.preprocess().scale([0.5, 2.0])
function = ppp.build() function = ppp.build()
input_data = np.array([[1, 2], [3, 4]]).astype(np.float32) model_operators = [op.get_name().split("_")[0] for op in function.get_ordered_ops()]
expected_output = np.array([[2, 1], [6, 2]]).astype(np.float32) assert len(model_operators) == 4
assert function.get_output_size() == 1
runtime = get_runtime() assert list(function.get_output_shape(0)) == [2, 2]
computation = runtime.computation(function) assert function.get_output_element_type(0) == Type.f32
output = computation(input_data) assert "Constant" in model_operators
assert np.equal(output, expected_output).all() assert "Divide" in model_operators
def test_graph_preprocess_mean_scale_convert(): def test_graph_preprocess_mean_scale_convert():
@ -97,16 +94,24 @@ def test_graph_preprocess_mean_scale_convert():
inp1.preprocess().convert_element_type(Type.f32).mean(1.).custom(custom_preprocess) inp1.preprocess().convert_element_type(Type.f32).mean(1.).custom(custom_preprocess)
function = ppp.build() function = ppp.build()
input_data1 = np.array([[0, 1], [2, -2]]).astype(np.int32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
input_data2 = np.array([[1, 3], [5, 7]]).astype(np.int32) expected_ops = [
expected_output1 = np.array([[1, 0], [1, 3]]).astype(np.float32) "Parameter",
expected_output2 = np.array([[0, 1], [2, 3]]).astype(np.float32) "Convert",
"Constant",
runtime = get_runtime() "Subtract",
computation = runtime.computation(function) "Divide",
[output1, output2] = computation(input_data1, input_data2) "Result",
assert np.equal(output1, expected_output1).all() "Abs",
assert np.equal(output2, expected_output2).all() ]
assert len(model_operators) == 15
assert function.get_output_size() == 2
assert list(function.get_output_shape(0)) == [2, 2]
assert list(function.get_output_shape(1)) == [2, 2]
assert function.get_output_element_type(0) == Type.i32
assert function.get_output_element_type(1) == Type.i32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_input_output_by_name(): def test_graph_preprocess_input_output_by_name():
@ -131,16 +136,24 @@ def test_graph_preprocess_input_output_by_name():
out2.postprocess().custom(custom_preprocess) out2.postprocess().custom(custom_preprocess)
function = ppp.build() function = ppp.build()
input_data1 = np.array([[0, 1], [2, -2]]).astype(np.int32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
input_data2 = np.array([[-1, 3], [5, 7]]).astype(np.int32) expected_ops = [
expected_output1 = np.array([[1, 0], [1, 3]]).astype(np.float32) "Parameter",
expected_output2 = np.array([[1, 1], [2, 3]]).astype(np.float32) "Convert",
"Constant",
runtime = get_runtime() "Subtract",
computation = runtime.computation(function) "Divide",
[output1, output2] = computation(input_data1, input_data2) "Result",
assert np.equal(output1, expected_output1).all() "Abs",
assert np.equal(output2, expected_output2).all() ]
assert len(model_operators) == 16
assert function.get_output_size() == 2
assert list(function.get_output_shape(0)) == [2, 2]
assert list(function.get_output_shape(1)) == [2, 2]
assert function.get_output_element_type(0) == Type.i32
assert function.get_output_element_type(1) == Type.i32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_output_postprocess(): def test_graph_preprocess_output_postprocess():
@ -155,7 +168,6 @@ def test_graph_preprocess_output_postprocess():
@custom_preprocess_function @custom_preprocess_function
def custom_postprocess(output: Output): def custom_postprocess(output: Output):
return ops.abs(output) return ops.abs(output)
ppp = PrePostProcessor(function) ppp = PrePostProcessor(function)
inp = ppp.input() inp = ppp.input()
inp.tensor().set_layout(layout1) inp.tensor().set_layout(layout1)
@ -168,13 +180,22 @@ def test_graph_preprocess_output_postprocess():
out.postprocess().custom(custom_postprocess).convert_element_type(Type.f16).convert_element_type() out.postprocess().custom(custom_postprocess).convert_element_type(Type.f16).convert_element_type()
function = ppp.build() function = ppp.build()
input_data = np.array([[-1, -2, -3], [-4, -5, -6]]).astype(np.int32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_output = np.array([[2, 4, 6], [5, 7, 9]]).astype(np.float32) expected_ops = [
"Parameter",
runtime = get_runtime() "Convert",
computation = runtime.computation(function) "Constant",
output = computation(input_data) "Subtract",
assert np.equal(output, expected_output).all() "Transpose",
"Result",
"Abs",
]
assert len(model_operators) == 14
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [2, 3]
assert function.get_output_element_type(0) == Type.f32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_spatial_static_shape(): def test_graph_preprocess_spatial_static_shape():
@ -196,13 +217,20 @@ def test_graph_preprocess_spatial_static_shape():
out.model().set_layout(layout) out.model().set_layout(layout)
function = ppp.build() function = ppp.build()
input_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.int32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_output = np.array([[[0, 1], [2, 3]], [[3, 4], [5, 6]]]).astype(np.float32) expected_ops = [
"Parameter",
runtime = get_runtime() "Convert",
computation = runtime.computation(function) "Constant",
output = computation(input_data) "Subtract",
assert np.equal(output, expected_output).all() "Result",
]
assert len(model_operators) == 7
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [2, 2, 2]
assert function.get_output_element_type(0) == Type.f32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_set_shape(): def test_graph_preprocess_set_shape():
@ -225,15 +253,19 @@ def test_graph_preprocess_set_shape():
inp.preprocess().custom(custom_crop) inp.preprocess().custom(custom_crop)
function = ppp.build() function = ppp.build()
input_data = np.array([[[0, 1, 2], [3, 4, 5], [6, 7, 8]], model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
[[9, 10, 11], [12, 13, 14], [15, 16, 17]], expected_ops = [
[[18, 19, 20], [21, 22, 23], [24, 25, 26]]]).astype(np.int32) "Parameter",
expected_output = np.array([[[13]]]).astype(np.float32) "Constant",
"Result",
runtime = get_runtime() "Slice",
computation = runtime.computation(function) ]
output = computation(input_data) assert len(model_operators) == 7
assert np.equal(output, expected_output).all() assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [1, 1, 1]
assert function.get_output_element_type(0) == Type.i32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_set_from_tensor(): def test_graph_preprocess_set_from_tensor():
@ -282,12 +314,20 @@ def test_graph_preprocess_set_from_np_infer():
assert function.input().shape == ov.Shape([3, 3, 3]) assert function.input().shape == ov.Shape([3, 3, 3])
assert function.input().element_type == Type.i32 assert function.input().element_type == Type.i32
expected_output = np.array([[[13]]]).astype(np.float32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_ops = [
runtime = get_runtime() "Parameter",
computation = runtime.computation(function) "Convert",
output = computation(input_data) "Constant",
assert np.equal(output, expected_output).all() "Result",
"Slice",
]
assert len(model_operators) == 8
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [1, 1, 1]
assert function.get_output_element_type(0) == Type.f32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_set_memory_type(): def test_graph_preprocess_set_memory_type():
@ -344,13 +384,20 @@ def test_graph_preprocess_steps(algorithm, color_format1, color_format2, is_fail
assert "is not convertible to" in str(e.value) assert "is not convertible to" in str(e.value)
else: else:
function = custom_processor.build() function = custom_processor.build()
input_data = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]).astype(np.float32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_output = np.array([[[[0, 3, 6], [1, 4, 7], [2, 5, 8]]]]).astype(np.float32) expected_ops = [
"Parameter",
runtime = get_runtime() "Constant",
computation = runtime.computation(function) "Result",
output = computation(input_data) "Gather",
assert np.equal(output, expected_output).all() "Interpolate",
]
assert len(model_operators) == 16
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [1, 1, 3, 3]
assert function.get_output_element_type(0) == Type.f32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_postprocess_layout(): def test_graph_preprocess_postprocess_layout():
@ -369,13 +416,21 @@ def test_graph_preprocess_postprocess_layout():
out.postprocess().convert_layout([0, 1, 2, 3]) out.postprocess().convert_layout([0, 1, 2, 3])
function = ppp.build() function = ppp.build()
input_data = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]).astype(np.float32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_output = np.array([[[[0, 3, 6], [1, 4, 7], [2, 5, 8]]]]).astype(np.float32) expected_ops = [
"Parameter",
runtime = get_runtime() "Constant",
computation = runtime.computation(function) "Result",
output = computation(input_data) "Gather",
assert np.equal(output, expected_output).all() "Range",
"Transpose",
]
assert len(model_operators) == 14
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [1, 1, 3, 3]
assert function.get_output_element_type(0) == Type.f32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_reverse_channels(): def test_graph_preprocess_reverse_channels():
@ -391,13 +446,20 @@ def test_graph_preprocess_reverse_channels():
inp.preprocess().mean(1.).reverse_channels() inp.preprocess().mean(1.).reverse_channels()
function = ppp.build() function = ppp.build()
input_data = np.array([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]).astype(np.float32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_output = np.array([[[[4, 5], [6, 7]], [[0, 1], [2, 3]]]]).astype(np.float32) expected_ops = [
"Parameter",
runtime = get_runtime() "Constant",
computation = runtime.computation(function) "Result",
output = computation(input_data) "Gather",
assert np.equal(output, expected_output).all() "Range",
]
assert len(model_operators) == 10
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [1, 2, 2, 2]
assert function.get_output_element_type(0) == Type.f32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_crop(): def test_graph_preprocess_crop():
@ -412,13 +474,20 @@ def test_graph_preprocess_crop():
ppp.input().preprocess().crop([0, 0, 1, 1], [1, 2, -1, -1]) ppp.input().preprocess().crop([0, 0, 1, 1], [1, 2, -1, -1])
function = ppp.build() function = ppp.build()
input_data = np.arange(18).astype(np.float32).reshape(tensor_shape) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_output = np.array([4, 13]).astype(np.float32).reshape(orig_shape) expected_ops = [
"Parameter",
runtime = get_runtime() "Constant",
computation = runtime.computation(function) "Result",
output = computation(input_data) "Relu",
assert np.equal(output, expected_output).all() "Slice",
]
assert len(model_operators) == 7
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [1, 2, 1, 1]
assert function.get_output_element_type(0) == Type.f32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_resize_algorithm(): def test_graph_preprocess_resize_algorithm():
@ -435,13 +504,20 @@ def test_graph_preprocess_resize_algorithm():
inp.preprocess().mean(1.).resize(resize_alg, 3, 3) inp.preprocess().mean(1.).resize(resize_alg, 3, 3)
function = ppp.build() function = ppp.build()
input_data = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]).astype(np.float32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_output = np.array([[[[0, 1, 2], [3, 4, 5], [6, 7, 8]]]]).astype(np.float32) expected_ops = [
"Parameter",
runtime = get_runtime() "Constant",
computation = runtime.computation(function) "Result",
output = computation(input_data) "Subtract",
assert np.equal(output, expected_output).all() "Interpolate",
]
assert len(model_operators) == 8
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [1, 1, 3, 3]
assert function.get_output_element_type(0) == Type.f32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_model(): def test_graph_preprocess_model():
@ -517,14 +593,23 @@ def test_graph_preprocess_model():
ppp.output(0).postprocess().custom(custom_preprocess) ppp.output(0).postprocess().custom(custom_preprocess)
function = ppp.build() function = ppp.build()
input_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.float32) model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
expected_output = np.array([[[2, 1], [4, 7]], [[10, 13], [16, 19]]]).astype(np.float32) expected_ops = [
"Parameter",
runtime = get_runtime() "Constant",
computation = runtime.computation(function) "Result",
output = computation(input_data, input_data) "Subtract",
"Convert",
assert np.equal(output, expected_output).all() "Abs",
"Add",
"Divide",
]
assert len(model_operators) == 13
assert function.get_output_size() == 1
assert list(function.get_output_shape(0)) == [2, 2, 2]
assert function.get_output_element_type(0) == Type.i32
for op in expected_ops:
assert op in model_operators
def test_graph_preprocess_dump(): def test_graph_preprocess_dump():

View File

@ -2,30 +2,20 @@
# Copyright (C) 2018-2022 Intel Corporation # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import openvino.runtime.opset8 as ov import openvino.runtime as ov
import openvino.runtime.opset8 as ops
import numpy as np import numpy as np
from tests.runtime import get_runtime
def test_random_uniform(): def test_random_uniform():
runtime = get_runtime() input_tensor = ops.constant(np.array([2, 4, 3], dtype=np.int32))
input_tensor = ov.constant(np.array([2, 4, 3], dtype=np.int32)) min_val = ops.constant(np.array([-2.7], dtype=np.float32))
min_val = ov.constant(np.array([-2.7], dtype=np.float32)) max_val = ops.constant(np.array([3.5], dtype=np.float32))
max_val = ov.constant(np.array([3.5], dtype=np.float32))
random_uniform_node = ov.random_uniform(input_tensor, min_val, max_val, random_uniform_node = ops.random_uniform(input_tensor, min_val, max_val,
output_type="f32", global_seed=7461, output_type="f32", global_seed=7461,
op_seed=1546) op_seed=1546)
computation = runtime.computation(random_uniform_node) assert random_uniform_node.get_output_size() == 1
random_uniform_results = computation() assert random_uniform_node.get_type_name() == "RandomUniform"
expected_results = np.array([[[2.8450181, -2.3457108, 2.2134445], assert random_uniform_node.get_output_element_type(0) == ov.Type.f32
[-1.0436587, 0.79548645, 1.3023183], assert list(random_uniform_node.get_output_shape(0)) == [2, 4, 3]
[0.34447956, -2.0267959, 1.3989122],
[0.9607613, 1.5363653, 3.117298]],
[[1.570041, 2.2782724, 2.3193843],
[3.3393657, 0.63299894, 0.41231918],
[3.1739233, 0.03919673, -0.2136085],
[-1.4519991, -2.277353, 2.630727]]], dtype=np.float32)
assert np.allclose(random_uniform_results, expected_results)

View File

@ -3,163 +3,137 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import openvino.runtime.opset9 as ov import openvino.runtime.opset9 as ov
from openvino.runtime import Shape from openvino.runtime import Shape, Type
import numpy as np import numpy as np
from tests.runtime import get_runtime
np.random.seed(0) np.random.seed(0)
def test_rdft_1d(): def test_rdft_1d():
runtime = get_runtime()
input_size = 50 input_size = 50
shape = [input_size] shape = [input_size]
data = np.random.uniform(0, 1, shape).astype(np.float32)
param = ov.parameter(Shape(shape), name="input", dtype=np.float32) param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
input_axes = ov.constant(np.array([0], dtype=np.int64)) input_axes = ov.constant(np.array([0], dtype=np.int64))
node = ov.rdft(param, input_axes) node = ov.rdft(param, input_axes)
computation = runtime.computation(node, param) assert node.get_type_name() == "RDFT"
actual = computation(data) assert node.get_output_size() == 1
np_results = np.fft.rfft(data) assert list(node.get_output_shape(0)) == [26, 2]
expected_results = np.stack((np_results.real, np_results.imag), axis=-1) assert node.get_output_element_type(0) == Type.f32
np.testing.assert_allclose(expected_results, actual[0], atol=0.0001)
def test_irdft_1d(): def test_irdft_1d():
runtime = get_runtime()
signal_size = 50 signal_size = 50
shape = [signal_size // 2 + 1, 2] shape = [signal_size // 2 + 1, 2]
data = np.random.uniform(0, 1, shape).astype(np.float32)
param = ov.parameter(Shape(shape), name="input", dtype=np.float32) param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
input_axes = ov.constant(np.array([0], dtype=np.int64)) input_axes = ov.constant(np.array([0], dtype=np.int64))
node = ov.irdft(param, input_axes, ov.constant(np.array([signal_size], dtype=np.int64))) node = ov.irdft(param, input_axes, ov.constant(np.array([signal_size], dtype=np.int64)))
computation = runtime.computation(node, param) assert node.get_type_name() == "IRDFT"
actual = computation(data) assert node.get_output_size() == 1
expected_results = np.fft.irfft(data[:, 0] + 1j * data[:, 1], signal_size) assert list(node.get_output_shape(0)) == [50]
np.testing.assert_allclose(expected_results, actual[0], atol=0.0001) assert node.get_output_element_type(0) == Type.f32
def test_rdft_2d(): def test_rdft_2d():
runtime = get_runtime()
shape = [100, 128] shape = [100, 128]
data = np.random.uniform(0, 1, shape).astype(np.float32)
param = ov.parameter(Shape(shape), name="input", dtype=np.float32) param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
axes = [0, 1] axes = [0, 1]
input_axes = ov.constant(np.array(axes, dtype=np.int64)) input_axes = ov.constant(np.array(axes, dtype=np.int64))
node = ov.rdft(param, input_axes) node = ov.rdft(param, input_axes)
computation = runtime.computation(node, param) assert node.get_type_name() == "RDFT"
actual = computation(data) assert node.get_output_size() == 1
np_results = np.fft.rfftn(data, axes=axes) assert list(node.get_output_shape(0)) == [100, 65, 2]
expected_results = np.stack((np_results.real, np_results.imag), axis=-1) assert node.get_output_element_type(0) == Type.f32
np.testing.assert_allclose(expected_results, actual[0], atol=0.0007)
def test_rdft_2d_signal_size(): def test_rdft_2d_signal_size():
runtime = get_runtime()
shape = [100, 128] shape = [100, 128]
data = np.random.uniform(0, 1, shape).astype(np.float32)
param = ov.parameter(Shape(shape), name="input", dtype=np.float32) param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
axes = [0, 1] axes = [0, 1]
signal_size = [30, 40] signal_size = [30, 40]
axes_node = ov.constant(np.array(axes, dtype=np.int64)) axes_node = ov.constant(np.array(axes, dtype=np.int64))
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64)) signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
node = ov.rdft(param, axes_node, signal_size_node) node = ov.rdft(param, axes_node, signal_size_node)
computation = runtime.computation(node, param) assert node.get_type_name() == "RDFT"
actual = computation(data) assert node.get_output_size() == 1
np_results = np.fft.rfftn(data, s=signal_size, axes=axes) assert list(node.get_output_shape(0)) == [30, 21, 2]
expected_results = np.stack((np_results.real, np_results.imag), axis=-1) assert node.get_output_element_type(0) == Type.f32
np.testing.assert_allclose(expected_results, actual[0], atol=0.0007)
def test_irdft_2d(): def test_irdft_2d():
runtime = get_runtime()
axes = [0, 1] axes = [0, 1]
input_shape = [100, 65, 2] input_shape = [100, 65, 2]
data = np.random.uniform(0, 1, input_shape).astype(np.float32)
param = ov.parameter(Shape(input_shape), name="input", dtype=np.float32) param = ov.parameter(Shape(input_shape), name="input", dtype=np.float32)
input_axes = ov.constant(np.array(axes, dtype=np.int64)) input_axes = ov.constant(np.array(axes, dtype=np.int64))
node = ov.irdft(param, input_axes) node = ov.irdft(param, input_axes)
computation = runtime.computation(node, param) assert node.get_type_name() == "IRDFT"
actual = computation(data) assert node.get_output_size() == 1
expected_results = np.fft.irfftn(data[:, :, 0] + 1j * data[:, :, 1], axes=axes) assert list(node.get_output_shape(0)) == [100, 128]
np.testing.assert_allclose(expected_results, actual[0], atol=0.0001) assert node.get_output_element_type(0) == Type.f32
def test_irdft_2d_signal_size(): def test_irdft_2d_signal_size():
runtime = get_runtime()
axes = [0, 1] axes = [0, 1]
input_shape = [100, 65, 2] input_shape = [100, 65, 2]
signal_size = [100, 65] signal_size = [100, 65]
data = np.random.uniform(0, 1, input_shape).astype(np.float32)
param = ov.parameter(Shape(input_shape), name="input", dtype=np.float32) param = ov.parameter(Shape(input_shape), name="input", dtype=np.float32)
input_axes = ov.constant(np.array(axes, dtype=np.int64)) input_axes = ov.constant(np.array(axes, dtype=np.int64))
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64)) signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
node = ov.irdft(param, input_axes, signal_size_node) node = ov.irdft(param, input_axes, signal_size_node)
computation = runtime.computation(node, param) assert node.get_type_name() == "IRDFT"
actual = computation(data) assert node.get_output_size() == 1
expected_results = np.fft.irfftn(data[:, :, 0] + 1j * data[:, :, 1], s=signal_size, axes=axes) assert list(node.get_output_shape(0)) == [100, 65]
np.testing.assert_allclose(expected_results, actual[0], atol=0.0001) assert node.get_output_element_type(0) == Type.f32
def test_rdft_4d(): def test_rdft_4d():
runtime = get_runtime()
shape = [1, 192, 36, 64] shape = [1, 192, 36, 64]
data = np.random.uniform(0, 1, shape).astype(np.float32)
param = ov.parameter(Shape(shape), name="input", dtype=np.float32) param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
axes = [-2, -1] axes = [-2, -1]
input_axes = ov.constant(np.array(axes, dtype=np.int64)) input_axes = ov.constant(np.array(axes, dtype=np.int64))
node = ov.rdft(param, input_axes) node = ov.rdft(param, input_axes)
computation = runtime.computation(node, param) assert node.get_type_name() == "RDFT"
actual = computation(data) assert node.get_output_size() == 1
np_results = np.fft.rfftn(data, axes=axes) assert list(node.get_output_shape(0)) == [1, 192, 36, 33, 2]
expected_results = np.stack((np_results.real, np_results.imag), axis=-1) assert node.get_output_element_type(0) == Type.f32
np.testing.assert_allclose(expected_results, actual[0], atol=0.0007)
def test_rdft_4d_signal_size(): def test_rdft_4d_signal_size():
runtime = get_runtime()
shape = [1, 192, 36, 64] shape = [1, 192, 36, 64]
signal_size = [36, 64] signal_size = [36, 64]
data = np.random.uniform(0, 1, shape).astype(np.float32)
param = ov.parameter(Shape(shape), name="input", dtype=np.float32) param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
axes = [-2, -1] axes = [-2, -1]
input_axes = ov.constant(np.array(axes, dtype=np.int64)) input_axes = ov.constant(np.array(axes, dtype=np.int64))
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64)) signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
node = ov.rdft(param, input_axes, signal_size_node) node = ov.rdft(param, input_axes, signal_size_node)
computation = runtime.computation(node, param) assert node.get_type_name() == "RDFT"
actual = computation(data) assert node.get_output_size() == 1
np_results = np.fft.rfftn(data, signal_size, axes=axes) assert list(node.get_output_shape(0)) == [1, 192, 36, 33, 2]
expected_results = np.stack((np_results.real, np_results.imag), axis=-1) assert node.get_output_element_type(0) == Type.f32
np.testing.assert_allclose(expected_results, actual[0], atol=0.0007)
def test_irdft_4d(): def test_irdft_4d():
runtime = get_runtime()
shape = [1, 192, 36, 33, 2] shape = [1, 192, 36, 33, 2]
data = np.random.uniform(0, 1, shape).astype(np.float32)
param = ov.parameter(Shape(shape), name="input", dtype=np.float32) param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
axes = [-2, -1] axes = [-2, -1]
input_axes = ov.constant(np.array(axes, dtype=np.int64)) input_axes = ov.constant(np.array(axes, dtype=np.int64))
node = ov.irdft(param, input_axes) node = ov.irdft(param, input_axes)
computation = runtime.computation(node, param) assert node.get_type_name() == "IRDFT"
actual = computation(data) assert node.get_output_size() == 1
expected_results = np.fft.irfftn(data[:, :, :, :, 0] + 1j * data[:, :, :, :, 1], axes=axes) assert list(node.get_output_shape(0)) == [1, 192, 36, 64]
np.testing.assert_allclose(expected_results, actual[0], atol=0.0001) assert node.get_output_element_type(0) == Type.f32
def test_irdft_4d_signal_size(): def test_irdft_4d_signal_size():
runtime = get_runtime()
shape = [1, 192, 36, 33, 2] shape = [1, 192, 36, 33, 2]
signal_size = [36, 64] signal_size = [36, 64]
data = np.random.uniform(0, 1, shape).astype(np.float32)
param = ov.parameter(Shape(shape), name="input", dtype=np.float32) param = ov.parameter(Shape(shape), name="input", dtype=np.float32)
axes = [-2, -1] axes = [-2, -1]
input_axes = ov.constant(np.array(axes, dtype=np.int64)) input_axes = ov.constant(np.array(axes, dtype=np.int64))
signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64)) signal_size_node = ov.constant(np.array(signal_size, dtype=np.int64))
node = ov.irdft(param, input_axes, signal_size_node) node = ov.irdft(param, input_axes, signal_size_node)
computation = runtime.computation(node, param) assert node.get_type_name() == "IRDFT"
actual = computation(data) assert node.get_output_size() == 1
expected_results = np.fft.irfftn(data[:, :, :, :, 0] + 1j * data[:, :, :, :, 1], signal_size, axes=axes) assert list(node.get_output_shape(0)) == [1, 192, 36, 64]
np.testing.assert_allclose(expected_results, actual[0], atol=0.0001) assert node.get_output_element_type(0) == Type.f32

View File

@ -4,60 +4,56 @@
import numpy as np import numpy as np
import pytest import pytest
from openvino.runtime import PartialShape, Dimension
import openvino.runtime.opset9 as ov import openvino.runtime.opset9 as ov
from openvino.runtime.utils.types import make_constant_node
from tests.runtime import get_runtime
from tests.test_graph.util import run_op_node
@pytest.mark.parametrize( @pytest.mark.parametrize(
("graph_api_helper", "numpy_function", "reduction_axes"), ("graph_api_helper", "reduction_axes", "expected_shape"),
[ [
(ov.reduce_max, np.max, np.array([0, 1, 2, 3])), (ov.reduce_max, np.array([0, 1, 2, 3]), []),
(ov.reduce_min, np.min, np.array([0, 1, 2, 3])), (ov.reduce_min, np.array([0, 1, 2, 3]), []),
(ov.reduce_sum, np.sum, np.array([0, 1, 2, 3])), (ov.reduce_sum, np.array([0, 1, 2, 3]), []),
(ov.reduce_prod, np.prod, np.array([0, 1, 2, 3])), (ov.reduce_prod, np.array([0, 1, 2, 3]), []),
(ov.reduce_max, np.max, np.array([0])), (ov.reduce_max, np.array([0]), [4, 3, 2]),
(ov.reduce_min, np.min, np.array([0])), (ov.reduce_min, np.array([0]), [4, 3, 2]),
(ov.reduce_sum, np.sum, np.array([0])), (ov.reduce_sum, np.array([0]), [4, 3, 2]),
(ov.reduce_prod, np.prod, np.array([0])), (ov.reduce_prod, np.array([0]), [4, 3, 2]),
(ov.reduce_max, np.max, np.array([0, 2])), (ov.reduce_max, np.array([0, 2]), [4, 2]),
(ov.reduce_min, np.min, np.array([0, 2])), (ov.reduce_min, np.array([0, 2]), [4, 2]),
(ov.reduce_sum, np.sum, np.array([0, 2])), (ov.reduce_sum, np.array([0, 2]), [4, 2]),
(ov.reduce_prod, np.prod, np.array([0, 2])), (ov.reduce_prod, np.array([0, 2]), [4, 2]),
], ],
) )
def test_reduction_ops(graph_api_helper, numpy_function, reduction_axes): def test_reduction_ops(graph_api_helper, reduction_axes, expected_shape):
shape = [2, 4, 3, 2] shape = [2, 4, 3, 2]
np.random.seed(133391) np.random.seed(133391)
input_data = np.random.randn(*shape).astype(np.float32) input_data = np.random.randn(*shape).astype(np.float32)
expected = numpy_function(input_data, axis=tuple(reduction_axes)) node = graph_api_helper(input_data, reduction_axes)
result = run_op_node([input_data], graph_api_helper, reduction_axes) assert node.get_output_size() == 1
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == expected_shape
@pytest.mark.parametrize( @pytest.mark.parametrize(
("graph_api_helper", "numpy_function", "reduction_axes"), ("graph_api_helper", "reduction_axes", "expected_shape"),
[ [
(ov.reduce_logical_and, np.logical_and.reduce, np.array([0])), (ov.reduce_logical_and, np.array([0]), [4, 3, 2]),
(ov.reduce_logical_or, np.logical_or.reduce, np.array([0])), (ov.reduce_logical_or, np.array([0]), [4, 3, 2]),
(ov.reduce_logical_and, np.logical_and.reduce, np.array([0, 2])), (ov.reduce_logical_and, np.array([0, 2]), [4, 2]),
(ov.reduce_logical_or, np.logical_or.reduce, np.array([0, 2])), (ov.reduce_logical_or, np.array([0, 2]), [4, 2]),
(ov.reduce_logical_and, np.logical_and.reduce, np.array([0, 1, 2, 3])), (ov.reduce_logical_and, np.array([0, 1, 2, 3]), []),
(ov.reduce_logical_or, np.logical_or.reduce, np.array([0, 1, 2, 3])), (ov.reduce_logical_or, np.array([0, 1, 2, 3]), []),
], ],
) )
def test_reduction_logical_ops(graph_api_helper, numpy_function, reduction_axes): def test_reduction_logical_ops(graph_api_helper, reduction_axes, expected_shape):
shape = [2, 4, 3, 2] shape = [2, 4, 3, 2]
np.random.seed(133391) np.random.seed(133391)
input_data = np.random.randn(*shape).astype(bool) input_data = np.random.randn(*shape).astype(bool)
expected = numpy_function(input_data, axis=tuple(reduction_axes)) node = graph_api_helper(input_data, reduction_axes)
result = run_op_node([input_data], graph_api_helper, reduction_axes) assert node.get_output_size() == 1
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == expected_shape
def test_topk(): def test_topk():
@ -73,21 +69,21 @@ def test_topk():
@pytest.mark.parametrize( @pytest.mark.parametrize(
("graph_api_helper", "numpy_function", "reduction_axes"), ("graph_api_helper", "reduction_axes", "expected_shape"),
[ [
(ov.reduce_mean, np.mean, np.array([0, 1, 2, 3])), (ov.reduce_mean, np.array([0, 1, 2, 3]), []),
(ov.reduce_mean, np.mean, np.array([0])), (ov.reduce_mean, np.array([0]), [4, 3, 2]),
(ov.reduce_mean, np.mean, np.array([0, 2])), (ov.reduce_mean, np.array([0, 2]), [4, 2]),
], ],
) )
def test_reduce_mean_op(graph_api_helper, numpy_function, reduction_axes): def test_reduce_mean_op(graph_api_helper, reduction_axes, expected_shape):
shape = [2, 4, 3, 2] shape = [2, 4, 3, 2]
np.random.seed(133391) np.random.seed(133391)
input_data = np.random.randn(*shape).astype(np.float32) input_data = np.random.randn(*shape).astype(np.float32)
expected = numpy_function(input_data, axis=tuple(reduction_axes)) node = graph_api_helper(input_data, reduction_axes)
result = run_op_node([input_data], graph_api_helper, reduction_axes) assert node.get_output_size() == 1
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == expected_shape
def test_non_zero(): def test_non_zero():
@ -141,16 +137,10 @@ def test_roi_align():
def test_cum_sum(input_shape, cumsum_axis, reverse): def test_cum_sum(input_shape, cumsum_axis, reverse):
input_data = np.arange(np.prod(input_shape)).reshape(input_shape) input_data = np.arange(np.prod(input_shape)).reshape(input_shape)
if reverse:
expected = np.cumsum(input_data[::-1], axis=cumsum_axis)[::-1]
else:
expected = np.cumsum(input_data, axis=cumsum_axis)
runtime = get_runtime()
node = ov.cum_sum(input_data, cumsum_axis, reverse=reverse) node = ov.cum_sum(input_data, cumsum_axis, reverse=reverse)
computation = runtime.computation(node) assert node.get_output_size() == 1
result = computation() assert node.get_type_name() == "CumSum"
assert np.allclose(result, expected) assert list(node.get_output_shape(0)) == input_shape
def test_normalize_l2(): def test_normalize_l2():
@ -161,38 +151,7 @@ def test_normalize_l2():
eps = 1e-6 eps = 1e-6
eps_mode = "add" eps_mode = "add"
runtime = get_runtime()
node = ov.normalize_l2(input_data, axes, eps, eps_mode) node = ov.normalize_l2(input_data, axes, eps, eps_mode)
computation = runtime.computation(node) assert node.get_output_size() == 1
result = computation() assert node.get_type_name() == "NormalizeL2"
assert list(node.get_output_shape(0)) == input_shape
expected = np.array(
[
0.01428571,
0.02857143,
0.04285714,
0.05714286,
0.07142857,
0.08571429,
0.1,
0.11428571,
0.12857144,
0.14285715,
0.15714286,
0.17142858,
0.18571429,
0.2,
0.21428572,
0.22857143,
0.24285714,
0.25714287,
0.27142859,
0.2857143,
0.30000001,
0.31428573,
0.32857144,
0.34285715,
],
).reshape(input_shape)
assert np.allclose(result, expected)

View File

@ -4,19 +4,15 @@
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
import numpy as np import numpy as np
from tests.runtime import get_runtime
def test_roll(): def test_roll():
runtime = get_runtime()
input_vals = np.reshape(np.arange(10), (2, 5)) input_vals = np.reshape(np.arange(10), (2, 5))
input_tensor = ov.constant(input_vals) input_tensor = ov.constant(input_vals)
input_shift = ov.constant(np.array([-10, 7], dtype=np.int32)) input_shift = ov.constant(np.array([-10, 7], dtype=np.int32))
input_axes = ov.constant(np.array([-1, 0], dtype=np.int32)) input_axes = ov.constant(np.array([-1, 0], dtype=np.int32))
roll_node = ov.roll(input_tensor, input_shift, input_axes) roll_node = ov.roll(input_tensor, input_shift, input_axes)
computation = runtime.computation(roll_node) assert roll_node.get_output_size() == 1
roll_results = computation() assert roll_node.get_type_name() == "Roll"
expected_results = np.roll(input_vals, shift=(-10, 7), axis=(-1, 0)) assert list(roll_node.get_output_shape(0)) == [2, 5]
assert np.allclose(roll_results, expected_results)

View File

@ -5,20 +5,14 @@
import numpy as np import numpy as np
import openvino.runtime.opset8 as ov import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime
from tests.test_graph.util import run_op_node
def test_onehot(): def test_onehot():
runtime = get_runtime()
param = ov.parameter([3], dtype=np.int32) param = ov.parameter([3], dtype=np.int32)
model = ov.one_hot(param, 3, 1, 0, 0) model = ov.one_hot(param, 3, 1, 0, 0)
computation = runtime.computation(model, param) assert model.get_output_size() == 1
assert model.get_type_name() == "OneHot"
expected = np.eye(3)[np.array([1, 0, 2])] assert list(model.get_output_shape(0)) == [3, 3]
input_data = np.array([1, 0, 2], dtype=np.int32)
result = computation(input_data)
assert np.allclose(result, expected)
def test_one_hot(): def test_one_hot():
@ -27,10 +21,11 @@ def test_one_hot():
on_value = 5 on_value = 5
off_value = 10 off_value = 10
axis = -1 axis = -1
excepted = [[5, 10], [10, 5], [10, 10]]
result = run_op_node([data, depth, on_value, off_value], ov.one_hot, axis) node = ov.one_hot(data, depth, on_value, off_value, axis)
assert np.allclose(result, excepted) assert node.get_output_size() == 1
assert node.get_type_name() == "OneHot"
assert list(node.get_output_shape(0)) == [3, 2]
def test_range(): def test_range():
@ -38,5 +33,7 @@ def test_range():
stop = 35 stop = 35
step = 5 step = 5
result = run_op_node([start, stop, step], ov.range) node = ov.range(start, stop, step)
assert np.allclose(result, [5, 10, 15, 20, 25, 30]) assert node.get_output_size() == 1
assert node.get_type_name() == "Range"
assert list(node.get_output_shape(0)) == [6]

View File

@ -2,7 +2,6 @@
# Copyright (C) 2018-2022 Intel Corporation # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
import numpy as np
import openvino.runtime as ov import openvino.runtime as ov
import pytest import pytest
from openvino._pyopenvino.util import deprecation_warning from openvino._pyopenvino.util import deprecation_warning

View File

@ -2,75 +2,6 @@
# Copyright (C) 2018-2022 Intel Corporation # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
from typing import Any, Callable, List, Union
import numpy as np
import openvino.runtime.opset8 as ov
from openvino.runtime.utils.types import NumericData
from tests.runtime import get_runtime
from string import ascii_uppercase
def _get_numpy_dtype(scalar):
return np.array([scalar]).dtype
def run_op_node(input_data, op_fun, *args):
# type: (Union[NumericData, List[NumericData]], Callable, *Any) -> List[NumericData]
"""Run computation on node performing `op_fun`.
`op_fun` has to accept a node as an argument.
This function converts passed raw input data to graph Constant Node and that form is passed
to `op_fun`.
:param input_data: The input data for performed computation.
:param op_fun: The function handler for operation we want to carry out.
:param args: The arguments passed to operation we want to carry out.
:return: The result from computations.
"""
runtime = get_runtime()
comp_args = []
op_fun_args = []
comp_inputs = []
for idx, data in enumerate(input_data):
node = None
if np.isscalar(data):
node = ov.parameter([], name=ascii_uppercase[idx], dtype=_get_numpy_dtype(data))
else:
node = ov.parameter(data.shape, name=ascii_uppercase[idx], dtype=data.dtype)
op_fun_args.append(node)
comp_args.append(node)
comp_inputs.append(data)
op_fun_args.extend(args)
node = op_fun(*op_fun_args)
computation = runtime.computation(node, *comp_args)
return computation(*comp_inputs)
def run_op_numeric_data(input_data, op_fun, *args):
# type: (NumericData, Callable, *Any) -> List[NumericData]
"""Run computation on node performing `op_fun`.
`op_fun` has to accept a scalar or an array.
This function passess input data AS IS. This mean that in case they're a scalar (integral,
or floating point value) or a NumPy's ndarray object they will be automatically converted
to graph's Constant Nodes.
:param input_data: The input data for performed computation.
:param op_fun: The function handler for operation we want to carry out.
:param args: The arguments passed to operation we want to carry out.
:return: The result from computations.
"""
runtime = get_runtime()
node = op_fun(input_data, *args)
computation = runtime.computation(node)
return computation()
def count_ops_of_type(func, op_type): def count_ops_of_type(func, op_type):
count = 0 count = 0

View File

@ -259,7 +259,7 @@ def test_deformable_psroi_pooling(dtype):
([2, 3, 5, 6], [7, 4], [7], 2, 2, 1, 1.0, "avg", "asymmetric", [7, 3, 2, 2]), ([2, 3, 5, 6], [7, 4], [7], 2, 2, 1, 1.0, "avg", "asymmetric", [7, 3, 2, 2]),
([10, 3, 5, 5], [7, 4], [7], 3, 4, 1, 1.0, "avg", "half_pixel_for_nn", [7, 3, 3, 4]), ([10, 3, 5, 5], [7, 4], [7], 3, 4, 1, 1.0, "avg", "half_pixel_for_nn", [7, 3, 3, 4]),
([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, 1.0, "avg", "half_pixel", [3, 3, 3, 4]), ([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, 1.0, "avg", "half_pixel", [3, 3, 3, 4]),
([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, float(1), "avg", "half_pixel", [3, 3, 3, 4]), ([10, 3, 5, 5], [3, 4], [3], 3, 4, 1, np.float32(1), "avg", "half_pixel", [3, 3, 3, 4]),
], ],
) )
def test_roi_align(data_shape, rois, batch_indices, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode, aligned_mode, expected_shape): def test_roi_align(data_shape, rois, batch_indices, pooled_h, pooled_w, sampling_ratio, spatial_scale, mode, aligned_mode, expected_shape):
@ -1882,11 +1882,11 @@ def test_multiclass_nms():
0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0,
0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32")
boxes_data = boxes_data.reshape([1, 6, 4]) boxes_data = boxes_data.reshape([1, 6, 4])
box = ng.constant(boxes_data, dtype=float) box = ng.constant(boxes_data, dtype=np.float32)
scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3,
0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32")
scores_data = scores_data.reshape([1, 2, 6]) scores_data = scores_data.reshape([1, 2, 6])
score = ng.constant(scores_data, dtype=float) score = ng.constant(scores_data, dtype=np.float32)
nms_node = ng.multiclass_nms(box, score, None, output_type="i32", nms_top_k=3, nms_node = ng.multiclass_nms(box, score, None, output_type="i32", nms_top_k=3,
iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid", iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid",
@ -1907,13 +1907,13 @@ def test_multiclass_nms():
[9.66, 3.36, 18.57, 13.26]], [9.66, 3.36, 18.57, 13.26]],
[[6.50, 7.00, 13.33, 17.63], [[6.50, 7.00, 13.33, 17.63],
[0.73, 5.34, 19.97, 19.97]]]).astype("float32") [0.73, 5.34, 19.97, 19.97]]]).astype("float32")
box = ng.constant(boxes_data, dtype=float) box = ng.constant(boxes_data, dtype=np.float32)
scores_data = np.array([[0.34, 0.66], scores_data = np.array([[0.34, 0.66],
[0.45, 0.61], [0.45, 0.61],
[0.39, 0.59]]).astype("float32") [0.39, 0.59]]).astype("float32")
score = ng.constant(scores_data, dtype=float) score = ng.constant(scores_data, dtype=np.float32)
rois_num_data = np.array([3]).astype("int32") rois_num_data = np.array([3]).astype("int32")
roisnum = ng.constant(rois_num_data, dtype=int) roisnum = ng.constant(rois_num_data, dtype=np.int32)
nms_node = ng.multiclass_nms(box, score, roisnum, output_type="i32", nms_top_k=3, nms_node = ng.multiclass_nms(box, score, roisnum, output_type="i32", nms_top_k=3,
iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid", iou_threshold=0.5, score_threshold=0.0, sort_result_type="classid",
nms_eta=1.0) nms_eta=1.0)
@ -1933,11 +1933,11 @@ def test_matrix_nms():
0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0, 0.0, -0.1, 1.0, 0.9, 0.0, 10.0, 1.0, 11.0,
0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32") 0.0, 10.1, 1.0, 11.1, 0.0, 100.0, 1.0, 101.0], dtype="float32")
boxes_data = boxes_data.reshape([1, 6, 4]) boxes_data = boxes_data.reshape([1, 6, 4])
box = ng.constant(boxes_data, dtype=float) box = ng.constant(boxes_data, dtype=np.float32)
scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3, scores_data = np.array([0.9, 0.75, 0.6, 0.95, 0.5, 0.3,
0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32") 0.95, 0.75, 0.6, 0.80, 0.5, 0.3], dtype="float32")
scores_data = scores_data.reshape([1, 2, 6]) scores_data = scores_data.reshape([1, 2, 6])
score = ng.constant(scores_data, dtype=float) score = ng.constant(scores_data, dtype=np.float32)
nms_node = ng.matrix_nms(box, score, output_type="i32", nms_top_k=3, nms_node = ng.matrix_nms(box, score, output_type="i32", nms_top_k=3,
score_threshold=0.0, sort_result_type="score", background_class=0, score_threshold=0.0, sort_result_type="score", background_class=0,
@ -2268,7 +2268,7 @@ def test_interpolate_opset10(dtype, expected_shape, shape_calculation_mode):
def test_is_finite_opset10(): def test_is_finite_opset10():
input_shape = [1, 2, 3, 4] input_shape = [1, 2, 3, 4]
input_node = ng.parameter(input_shape, float, name="InputData") input_node = ng.parameter(input_shape, np.float32, name="InputData")
node = ng_opset10.is_finite(input_node) node = ng_opset10.is_finite(input_node)
assert node.get_type_name() == "IsFinite" assert node.get_type_name() == "IsFinite"
@ -2278,7 +2278,7 @@ def test_is_finite_opset10():
def test_is_inf_opset10_default(): def test_is_inf_opset10_default():
input_shape = [2, 2, 2, 2] input_shape = [2, 2, 2, 2]
input_node = ng.parameter(input_shape, dtype=float, name="InputData") input_node = ng.parameter(input_shape, dtype=np.float32, name="InputData")
node = ng_opset10.is_inf(input_node) node = ng_opset10.is_inf(input_node)
assert node.get_type_name() == "IsInf" assert node.get_type_name() == "IsInf"
@ -2292,7 +2292,7 @@ def test_is_inf_opset10_default():
def test_is_inf_opset10_custom_attribute(): def test_is_inf_opset10_custom_attribute():
input_shape = [2, 2, 2] input_shape = [2, 2, 2]
input_node = ng.parameter(input_shape, dtype=float, name="InputData") input_node = ng.parameter(input_shape, dtype=np.float32, name="InputData")
attributes = { attributes = {
"detect_positive": False, "detect_positive": False,
} }
@ -2309,7 +2309,7 @@ def test_is_inf_opset10_custom_attribute():
def test_is_inf_opset10_custom_all_attributes(): def test_is_inf_opset10_custom_all_attributes():
input_shape = [2, 2, 2] input_shape = [2, 2, 2]
input_node = ng.parameter(input_shape, dtype=float, name="InputData") input_node = ng.parameter(input_shape, dtype=np.float32, name="InputData")
attributes = { attributes = {
"detect_negative": False, "detect_negative": False,
"detect_positive": True, "detect_positive": True,
@ -2327,7 +2327,7 @@ def test_is_inf_opset10_custom_all_attributes():
def test_is_nan_opset10(): def test_is_nan_opset10():
input_shape = [1, 2, 3, 4] input_shape = [1, 2, 3, 4]
input_node = ng.parameter(input_shape, float, name="InputData") input_node = ng.parameter(input_shape, np.float32, name="InputData")
node = ng_opset10.is_nan(input_node) node = ng_opset10.is_nan(input_node)
assert node.get_type_name() == "IsNaN" assert node.get_type_name() == "IsNaN"
@ -2338,7 +2338,7 @@ def test_is_nan_opset10():
def test_unique_opset10(): def test_unique_opset10():
input_shape = [1, 2, 3, 4] input_shape = [1, 2, 3, 4]
input_node = ng.parameter(input_shape, float, name="input_data") input_node = ng.parameter(input_shape, np.float32, name="input_data")
axis = ng.constant([1], np.int32, [1]) axis = ng.constant([1], np.int32, [1])
node = ng_opset10.unique(input_node, axis, False, "i32") node = ng_opset10.unique(input_node, axis, False, "i32")

View File

@ -115,14 +115,14 @@ def simple_if_without_parameters(condition_val):
condition = ng.constant(condition_val, dtype=bool) condition = ng.constant(condition_val, dtype=bool)
# then_body # then_body
then_constant = ng.constant(0.7, dtype=float) then_constant = ng.constant(0.7, dtype=np.float32)
then_body_res_1 = ng.result(then_constant) then_body_res_1 = ng.result(then_constant)
then_body = GraphBody([], [then_body_res_1]) then_body = GraphBody([], [then_body_res_1])
then_body_inputs = [] then_body_inputs = []
then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)] then_body_outputs = [TensorIteratorBodyOutputDesc(0, 0)]
# else_body # else_body
else_const = ng.constant(9.0, dtype=float) else_const = ng.constant(9.0, dtype=np.float32)
else_body_res_1 = ng.result(else_const) else_body_res_1 = ng.result(else_const)
else_body = GraphBody([], [else_body_res_1]) else_body = GraphBody([], [else_body_res_1])
else_body_inputs = [] else_body_inputs = []

View File

@ -80,7 +80,6 @@ def test_binary_logical_op(ng_api_helper):
parameter_b = ng.parameter(shape, name="B", dtype=bool) parameter_b = ng.parameter(shape, name="B", dtype=bool)
model = ng_api_helper(parameter_a, parameter_b) model = ng_api_helper(parameter_a, parameter_b)
assert model.get_output_size() == 1 assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == [2, 2] assert list(model.get_output_shape(0)) == [2, 2]
assert model.get_output_element_type(0) == Type.boolean assert model.get_output_element_type(0) == Type.boolean

View File

@ -7,7 +7,7 @@ from ngraph.impl import Shape
def test_get_constant_from_source_success(): def test_get_constant_from_source_success():
dtype = np.int dtype = np.int32
input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1") input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1")
input2 = ng.parameter(Shape([25]), dtype=dtype, name="input_2") input2 = ng.parameter(Shape([25]), dtype=dtype, name="input_2")
shape_of = ng.shape_of(input2, name="shape_of") shape_of = ng.shape_of(input2, name="shape_of")
@ -19,7 +19,7 @@ def test_get_constant_from_source_success():
def test_get_constant_from_source_failed(): def test_get_constant_from_source_failed():
dtype = np.int dtype = np.int32
input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1") input1 = ng.parameter(Shape([5, 5]), dtype=dtype, name="input_1")
input2 = ng.parameter(Shape([1]), dtype=dtype, name="input_2") input2 = ng.parameter(Shape([1]), dtype=dtype, name="input_2")
reshape = ng.reshape(input1, input2, special_zero=True) reshape = ng.reshape(input1, input2, special_zero=True)