[PyOV] Make graph tests hardware agnostic - part 1 (#14500)

* Halfway done

* Prepare part 1

* Minor changes

* Minor changes
This commit is contained in:
Przemyslaw Wysocki 2022-12-08 18:01:18 +01:00 committed by GitHub
parent 32ae862f99
commit c99abd5c24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 242 additions and 829 deletions

View File

@ -1,69 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import openvino.runtime.opset8 as ov
import numpy as np
from tests.runtime import get_runtime
def test_adaptive_avg_pool():
runtime = get_runtime()
input_vals = np.reshape([
0.0, 4, 1, 3, -2, -5, -2,
-2, 1, -3, 1, -3, -4, 0,
-2, 1, -1, -2, 3, -1, -3,
-1, -2, 3, 4, -3, -4, 1,
2, 0, -4, -5, -2, -2, -3,
2, 3, 1, -5, 2, -4, -2],
(2, 3, 7))
input_tensor = ov.constant(input_vals)
output_shape = ov.constant(np.array([3], dtype=np.int32))
adaptive_pool_node = ov.adaptive_avg_pool(input_tensor, output_shape)
computation = runtime.computation(adaptive_pool_node)
adaptive_pool_results = computation()
expected_results = np.reshape([1.66666663, 0.66666669, -3.,
-1.33333337, -1.66666663, -2.33333325,
-0.66666669, 0., -0.33333334,
0., 1.33333337, -2.,
-0.66666669, -3.66666675, -2.33333325,
2., -0.66666669, -1.33333337], (2, 3, 3))
assert np.allclose(adaptive_pool_results, expected_results)
def test_adaptive_max_pool():
runtime = get_runtime()
input_vals = np.reshape([
0, 4, 1, 3, -2, -5, -2,
-2, 1, -3, 1, -3, -4, 0,
-2, 1, -1, -2, 3, -1, -3,
-1, -2, 3, 4, -3, -4, 1,
2, 0, -4, -5, -2, -2, -3,
2, 3, 1, -5, 2, -4, -2],
(2, 3, 7))
input_tensor = ov.constant(input_vals)
output_shape = ov.constant(np.array([3], dtype=np.int32))
adaptive_pool_node = ov.adaptive_max_pool(input_tensor, output_shape)
computation = runtime.computation(adaptive_pool_node)
adaptive_pool_results = computation()
expected_results = np.reshape([4, 3, -2,
1, 1, 0,
1, 3, 3,
3, 4, 1,
2, -2, -2,
3, 2, 2], (2, 3, 3))
expected_indices = np.reshape([1, 3, 4,
1, 3, 6,
1, 4, 4,
2, 3, 6,
0, 4, 4,
1, 4, 4], (2, 3, 3))
assert np.allclose(adaptive_pool_results, [expected_results, expected_indices])

View File

@ -10,7 +10,6 @@ import pytest
import openvino.runtime.opset8 as ops
import openvino.runtime as ov
from openvino.runtime.exceptions import UserInputError
from openvino.runtime import Model, PartialShape, Shape, Type, layout_helpers
from openvino.runtime import Strides, AxisVector, Coordinate, CoordinateDiff
from openvino.runtime import Tensor, OVAny

View File

@ -5,143 +5,66 @@
import numpy as np
import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime
from tests.test_graph.test_ops import convolution2d
from tests.test_graph.util import run_op_node
def test_convolution_2d():
# input_x should have shape N(batch) x C x H x W
input_x = np.array(
[
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0],
],
dtype=np.float32,
).reshape(1, 1, 9, 9)
input_x = ov.parameter((1, 1, 9, 9), name="input_x", dtype=np.float32)
# filter weights should have shape M x C x kH x kW
input_filter = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape(
1, 1, 3, 3,
)
input_filter = ov.parameter((1, 1, 3, 3), name="input_filter", dtype=np.float32)
strides = np.array([1, 1])
pads_begin = np.array([1, 1])
pads_end = np.array([1, 1])
dilations = np.array([1, 1])
expected_shape = [1, 1, 9, 9]
# convolution with padding=1 should produce 9 x 9 output:
result = run_op_node([input_x, input_filter], ov.convolution, strides, pads_begin, pads_end, dilations)
assert np.allclose(
result,
np.array(
[
[
[
[0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -20.0, -20.0, 20.0, 20.0, 0.0, 0.0, 0.0, 0.0],
[0.0, -15.0, -15.0, 15.0, 15.0, 0.0, 0.0, 0.0, 0.0],
],
],
],
dtype=np.float32,
),
)
node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
# convolution with padding=0 should produce 7 x 7 output:
strides = np.array([1, 1])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([1, 1])
result = run_op_node([input_x, input_filter], ov.convolution, strides, pads_begin, pads_end, dilations)
assert np.allclose(
result,
np.array(
[
[
[
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
],
],
],
dtype=np.float32,
),
)
expected_shape = [1, 1, 7, 7]
node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
strides = np.array([2, 2])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([1, 1])
expected_shape = [1, 1, 4, 4]
# convolution with strides=2 should produce 4 x 4 output:
result = run_op_node([input_x, input_filter], ov.convolution, strides, pads_begin, pads_end, dilations)
assert np.allclose(
result,
np.array(
[
[
[
[-20.0, 20.0, 0.0, 0.0],
[-20.0, 20.0, 0.0, 0.0],
[-20.0, 20.0, 0.0, 0.0],
[-20.0, 20.0, 0.0, 0.0],
],
],
],
dtype=np.float32,
),
)
node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
strides = np.array([1, 1])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([2, 2])
expected_shape = [1, 1, 5, 5]
# convolution with dilation=2 should produce 5 x 5 output:
result = run_op_node([input_x, input_filter], ov.convolution, strides, pads_begin, pads_end, dilations)
assert np.allclose(
result,
np.array(
[
[
[
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
[0, 0, 20, 20, 0],
],
],
],
dtype=np.float32,
),
)
node = ov.convolution(input_x, input_filter, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_convolution_backprop_data():
runtime = get_runtime()
output_spatial_shape = [9, 9]
filter_shape = [1, 1, 3, 3]
@ -151,70 +74,24 @@ def test_convolution_backprop_data():
data_node = ov.parameter(shape=data_shape)
filter_node = ov.parameter(shape=filter_shape)
output_shape_node = ov.constant(np.array(output_spatial_shape, dtype=np.int64))
expected_shape = [1, 1, 9, 9]
deconvolution = ov.convolution_backprop_data(data_node, filter_node, strides, output_shape_node)
input_data = np.array(
[
[
[
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
[-20, -20, 20, 20, 0, 0, 0],
],
],
],
dtype=np.float32,
)
filter_data = np.array([[1.0, 0.0, -1.0], [2.0, 0.0, -2.0], [1.0, 0.0, -1.0]], dtype=np.float32).reshape(
1, 1, 3, 3,
)
model = runtime.computation(deconvolution, data_node, filter_node)
result = model(input_data, filter_data)
assert np.allclose(
result,
np.array(
[
[
[
[-20.0, -20.0, 40.0, 40.0, -20.0, -20.0, 0.0, 0.0, 0.0],
[-60.0, -60.0, 120.0, 120.0, -60.0, -60.0, 0.0, 0.0, 0.0],
[-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0],
[-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0],
[-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0],
[-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0],
[-80.0, -80.0, 160.0, 160.0, -80.0, -80.0, 0.0, 0.0, 0.0],
[-60.0, -60.0, 120.0, 120.0, -60.0, -60.0, 0.0, 0.0, 0.0],
[-20.0, -20.0, 40.0, 40.0, -20.0, -20.0, 0.0, 0.0, 0.0],
],
],
],
dtype=np.float32,
),
)
assert deconvolution.get_type_name() == "ConvolutionBackpropData"
assert deconvolution.get_output_size() == 1
assert list(deconvolution.get_output_shape(0)) == expected_shape
def test_convolution_v1():
input_tensor = np.arange(-128, 128, 1, dtype=np.float32).reshape(1, 1, 16, 16)
filters = np.ones(9, dtype=np.float32).reshape(1, 1, 3, 3)
filters[0, 0, 0, 0] = -1
filters[0, 0, 1, 1] = -1
filters[0, 0, 2, 2] = -1
filters[0, 0, 0, 2] = -1
filters[0, 0, 2, 0] = -1
input_tensor = ov.parameter((1, 1, 16, 16), name="input_tensor", dtype=np.float32)
filters = ov.parameter((1, 1, 3, 3), name="filters", dtype=np.float32)
strides = np.array([1, 1])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([1, 1])
expected_shape = [1, 1, 14, 14]
result = run_op_node([input_tensor, filters], ov.convolution, strides, pads_begin, pads_end, dilations)
expected = convolution2d(input_tensor[0, 0], filters[0, 0]).reshape(1, 1, 14, 14)
assert np.allclose(result, expected)
node = ov.convolution(input_tensor, filters, strides, pads_begin, pads_end, dilations)
assert node.get_type_name() == "Convolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape

View File

@ -6,194 +6,65 @@ import numpy as np
import openvino.runtime.opset8 as ov
from openvino.runtime import Type, Shape
from tests.runtime import get_runtime
from tests.test_graph.util import run_op_node
def test_reverse_sequence():
input_data = np.array(
[
0,
0,
3,
0,
6,
0,
9,
0,
1,
0,
4,
0,
7,
0,
10,
0,
2,
0,
5,
0,
8,
0,
11,
0,
12,
0,
15,
0,
18,
0,
21,
0,
13,
0,
16,
0,
19,
0,
22,
0,
14,
0,
17,
0,
20,
0,
23,
0,
],
dtype=np.int32,
).reshape([2, 3, 4, 2])
input_data = ov.parameter((2, 3, 4, 2), name="input_data", dtype=np.int32)
seq_lengths = np.array([1, 2, 1, 2], dtype=np.int32)
batch_axis = 2
sequence_axis = 1
expected_shape = [2, 3, 4, 2]
input_param = ov.parameter(input_data.shape, name="input", dtype=np.int32)
seq_lengths_param = ov.parameter(seq_lengths.shape, name="sequence lengths", dtype=np.int32)
model = ov.reverse_sequence(input_param, seq_lengths_param, batch_axis, sequence_axis)
runtime = get_runtime()
computation = runtime.computation(model, input_param, seq_lengths_param)
result = computation(input_data, seq_lengths)
expected = np.array(
[
0,
0,
4,
0,
6,
0,
10,
0,
1,
0,
3,
0,
7,
0,
9,
0,
2,
0,
5,
0,
8,
0,
11,
0,
12,
0,
16,
0,
18,
0,
22,
0,
13,
0,
15,
0,
19,
0,
21,
0,
14,
0,
17,
0,
20,
0,
23,
0,
],
).reshape([1, 2, 3, 4, 2])
assert np.allclose(result, expected)
assert model.get_type_name() == "ReverseSequence"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_pad_edge():
input_data = np.arange(1, 13).reshape([3, 4]).astype(np.int32)
pads_begin = np.array([0, 1], dtype=np.int32)
pads_end = np.array([2, 3], dtype=np.int32)
expected_shape = [5, 8]
input_param = ov.parameter(input_data.shape, name="input", dtype=np.int32)
input_param = ov.parameter((3, 4), name="input", dtype=np.int32)
model = ov.pad(input_param, pads_begin, pads_end, "edge")
runtime = get_runtime()
computation = runtime.computation(model, input_param)
result = computation(input_data)
expected = np.array(
[
[1, 1, 2, 3, 4, 4, 4, 4],
[5, 5, 6, 7, 8, 8, 8, 8],
[9, 9, 10, 11, 12, 12, 12, 12],
[9, 9, 10, 11, 12, 12, 12, 12],
[9, 9, 10, 11, 12, 12, 12, 12],
],
)
assert np.allclose(result, expected)
assert model.get_type_name() == "Pad"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_pad_constant():
input_data = np.arange(1, 13).reshape([3, 4]).astype(np.int32)
pads_begin = np.array([0, 1], dtype=np.int32)
pads_end = np.array([2, 3], dtype=np.int32)
expected_shape = [5, 8]
input_param = ov.parameter(input_data.shape, name="input", dtype=np.int32)
input_param = ov.parameter((3, 4), name="input", dtype=np.int32)
model = ov.pad(input_param, pads_begin, pads_end, "constant", arg_pad_value=np.array(100, dtype=np.int32))
runtime = get_runtime()
computation = runtime.computation(model, input_param)
result = computation(input_data)
expected = np.array(
[
[100, 1, 2, 3, 4, 100, 100, 100],
[100, 5, 6, 7, 8, 100, 100, 100],
[100, 9, 10, 11, 12, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100],
],
)
assert np.allclose(result, expected)
assert model.get_type_name() == "Pad"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_select():
cond = np.array([[False, False], [True, False], [True, True]])
then_node = np.array([[-1, 0], [1, 2], [3, 4]], dtype=np.int32)
else_node = np.array([[11, 10], [9, 8], [7, 6]], dtype=np.int32)
excepted = np.array([[11, 10], [1, 8], [3, 4]], dtype=np.int32)
expected_shape = [3, 2]
result = run_op_node([cond, then_node, else_node], ov.select)
assert np.allclose(result, excepted)
node = ov.select(cond, then_node, else_node)
assert node.get_type_name() == "Select"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_gather_v8_nd():
indices_type = np.int32
data_dtype = np.float32
data = ov.parameter([2, 10, 80, 30, 50], dtype=data_dtype, name="data")
indices = ov.parameter([2, 10, 30, 40, 2], dtype=indices_type, name="indices")
data = ov.parameter([2, 10, 80, 30, 50], dtype=np.float32, name="data")
indices = ov.parameter([2, 10, 30, 40, 2], dtype=np.int32, name="indices")
batch_dims = 2
expected_shape = [2, 10, 30, 40, 50]

View File

@ -3,9 +3,7 @@
# SPDX-License-Identifier: Apache-2.0
import openvino.runtime.opset9 as ov
from openvino.runtime import Shape
import numpy as np
from tests.runtime import get_runtime
def build_fft_input_data():
@ -14,109 +12,102 @@ def build_fft_input_data():
def test_dft_1d():
runtime = get_runtime()
input_data = build_fft_input_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([2], dtype=np.int64))
dft_node = ov.dft(input_tensor, input_axes)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
axis=2).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.00001)
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape
def test_dft_2d():
runtime = get_runtime()
input_data = build_fft_input_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([1, 2], dtype=np.int64))
dft_node = ov.dft(input_tensor, input_axes)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
axes=[1, 2]).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.000062)
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape
def test_dft_3d():
runtime = get_runtime()
input_data = build_fft_input_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64))
dft_node = ov.dft(input_tensor, input_axes)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.fftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
axes=[0, 1, 2]).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.0002)
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape
def test_dft_1d_signal_size():
runtime = get_runtime()
input_data = build_fft_input_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([-2], dtype=np.int64))
input_signal_size = ov.constant(np.array([20], dtype=np.int64))
dft_node = ov.dft(input_tensor, input_axes, input_signal_size)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.fft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), n=20,
axis=-2).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.00001)
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape
def test_dft_2d_signal_size_1():
runtime = get_runtime()
input_data = build_fft_input_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([0, 2], dtype=np.int64))
input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64))
dft_node = ov.dft(input_tensor, input_axes, input_signal_size)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5],
axes=[0, 2]).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.000062)
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape
def test_dft_2d_signal_size_2():
runtime = get_runtime()
input_data = build_fft_input_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([1, 2], dtype=np.int64))
input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64))
dft_node = ov.dft(input_tensor, input_axes, input_signal_size)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.fft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5],
axes=[1, 2]).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.000062)
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape
def test_dft_3d_signal_size():
runtime = get_runtime()
input_data = build_fft_input_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64))
input_signal_size = ov.constant(np.array([4, 5, 16], dtype=np.int64))
dft_node = ov.dft(input_tensor, input_axes, input_signal_size)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.fftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
s=[4, 5, 16], axes=[0, 1, 2]).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.0002)
expected_shape = list(np.stack((np_results.real, np_results.imag), axis=-1).shape)
assert dft_node.get_type_name() == "DFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == expected_shape

View File

@ -8,11 +8,10 @@ import pytest
from openvino.runtime.utils.types import get_element_type
from tests import xfail_issue_58033
from tests.runtime import get_runtime
def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype,
with_value=False, seed=202104):
seed=202104):
"""Test Einsum operation for given input shapes, equation, and data type.
It generates input data of given shapes and type, receives reference results using numpy,
@ -20,17 +19,11 @@ def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype,
:param input_shapes: a list of tuples with shapes
:param equation: Einsum equation
:param data_type: a type of input data
:param with_value: if True - tests output data shape and type along with its value,
otherwise, tests only the output shape and type
:param seed: a seed for random generation of input data
"""
np.random.seed(seed)
num_inputs = len(input_shapes)
runtime = get_runtime()
# set absolute tolerance based on the data type
atol = 0.0 if np.issubdtype(data_type, np.integer) else 1e-04
# generate input tensors
graph_inputs = []
@ -49,12 +42,6 @@ def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype,
assert list(einsum_model.get_output_shape(0)) == list(expected_result.shape)
assert einsum_model.get_output_element_type(0) == get_element_type(data_type)
# check inference result
if with_value:
computation = runtime.computation(einsum_model, *graph_inputs)
actual_result = computation(*np_inputs)
np.allclose(actual_result, expected_result, atol=atol)
@pytest.mark.parametrize("data_type", [np.float32, np.int32])
def test_dot_product(data_type):

View File

@ -6,7 +6,6 @@ import openvino.runtime.opset9 as ov
import numpy as np
import pytest
from tests.runtime import get_runtime
from openvino.runtime.utils.types import get_element_type_str
from openvino.runtime.utils.types import get_element_type
@ -47,13 +46,6 @@ def test_eye_rectangle(num_rows, num_columns, diagonal_index, out_type):
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
"""runtime = get_runtime()
computation = runtime.computation(eye_node)
eye_results = computation()
assert np.allclose(eye_results, expected_results)
"""
@pytest.mark.parametrize(
("num_rows", "num_columns", "diagonal_index", "batch_shape", "out_type"),
@ -96,10 +88,3 @@ def test_eye_batch_shape(num_rows, num_columns, diagonal_index, batch_shape, out
assert eye_node.get_output_size() == 1
assert eye_node.get_output_element_type(0) == get_element_type(out_type)
assert tuple(eye_node.get_output_shape(0)) == expected_results.shape
# TODO: Enable with Eye reference implementation
"""runtime = get_runtime()
computation = runtime.computation(eye_node)
eye_results = computation()
assert np.allclose(eye_results, expected_results)
"""

View File

@ -5,83 +5,64 @@
import openvino.runtime.opset8 as ov
import numpy as np
from tests.test_graph.util import run_op_node
def test_gather():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32,
).reshape((3, 3))
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
input_data = ov.parameter((3, 3), name="input_data", dtype=np.float32)
input_indices = ov.parameter((1, 2), name="input_indices", dtype=np.int32)
input_axis = np.array([1], np.int32)
expected_shape = [3, 1, 2]
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2),
)
result = run_op_node([input_data], ov.gather, input_indices, input_axis)
assert np.allclose(result, expected)
node = ov.gather(input_data, input_indices, input_axis)
assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_gather_with_scalar_axis():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32,
).reshape((3, 3))
input_indices = np.array([0, 2], np.int32).reshape(1, 2)
input_data = ov.parameter((3, 3), name="input_data", dtype=np.float32)
input_indices = ov.parameter((1, 2), name="input_indices", dtype=np.int32)
input_axis = np.array(1, np.int32)
expected_shape = [3, 1, 2]
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2),
)
result = run_op_node([input_data], ov.gather, input_indices, input_axis)
assert np.allclose(result, expected)
node = ov.gather(input_data, input_indices, input_axis)
assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_gather_batch_dims_1():
input_data = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]], np.float32)
input_indices = np.array([[0, 0, 4],
[4, 0, 0]], np.int32)
input_data = ov.parameter((2, 5), name="input_data", dtype=np.float32)
input_indices = ov.parameter((2, 3), name="input_indices", dtype=np.int32)
input_axis = np.array([1], np.int32)
batch_dims = 1
expected_shape = [2, 3]
expected = np.array([[1, 1, 5],
[10, 6, 6]], np.float32)
result = run_op_node([input_data], ov.gather, input_indices, input_axis, batch_dims)
assert np.allclose(result, expected)
node = ov.gather(input_data, input_indices, input_axis, batch_dims)
assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_gather_negative_indices():
input_data = np.array(
[1.0, 1.1, 1.2, 2.0, 2.1, 2.2, 3.0, 3.1, 3.2], np.float32,
).reshape((3, 3))
input_indices = np.array([0, -1], np.int32).reshape(1, 2)
input_data = ov.parameter((3, 3), name="input_data", dtype=np.float32)
input_indices = ov.parameter((1, 2), name="input_indices", dtype=np.int32)
input_axis = np.array([1], np.int32)
expected_shape = [3, 1, 2]
expected = np.array([1.0, 1.2, 2.0, 2.2, 3.0, 3.2], dtype=np.float32).reshape(
(3, 1, 2),
)
result = run_op_node([input_data], ov.gather, input_indices, input_axis)
assert np.allclose(result, expected)
node = ov.gather(input_data, input_indices, input_axis)
assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_gather_batch_dims_1_negative_indices():
input_data = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10]], np.float32)
input_indices = np.array([[0, 1, -2],
[-2, 0, 0]], np.int32)
input_data = ov.parameter((2, 5), name="input_data", dtype=np.float32)
input_indices = ov.parameter((2, 3), name="input_indices", dtype=np.int32)
input_axis = np.array([1], np.int32)
batch_dims = 1
expected_shape = [2, 3]
expected = np.array([[1, 2, 4],
[9, 6, 6]], np.float32)
result = run_op_node([input_data], ov.gather, input_indices, input_axis, batch_dims)
assert np.allclose(result, expected)
node = ov.gather(input_data, input_indices, input_axis, batch_dims)
assert node.get_type_name() == "Gather"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape

View File

@ -4,7 +4,6 @@
import openvino.runtime.opset8 as ov
import numpy as np
from tests.runtime import get_runtime
def get_data():
@ -13,7 +12,6 @@ def get_data():
def test_idft_1d():
runtime = get_runtime()
expected_results = get_data()
complex_input_data = np.fft.fft(np.squeeze(expected_results.view(dtype=np.complex64),
axis=-1), axis=2).astype(np.complex64)
@ -22,13 +20,12 @@ def test_idft_1d():
input_axes = ov.constant(np.array([2], dtype=np.int64))
dft_node = ov.idft(input_tensor, input_axes)
computation = runtime.computation(dft_node)
dft_results = computation()
assert np.allclose(dft_results, expected_results, atol=0.000002)
assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
def test_idft_2d():
runtime = get_runtime()
expected_results = get_data()
complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1),
axes=[1, 2]).astype(np.complex64)
@ -37,13 +34,12 @@ def test_idft_2d():
input_axes = ov.constant(np.array([1, 2], dtype=np.int64))
dft_node = ov.idft(input_tensor, input_axes)
computation = runtime.computation(dft_node)
dft_results = computation()
assert np.allclose(dft_results, expected_results, atol=0.000002)
assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
def test_idft_3d():
runtime = get_runtime()
expected_results = get_data()
complex_input_data = np.fft.fft2(np.squeeze(expected_results.view(dtype=np.complex64), axis=-1),
axes=[0, 1, 2]).astype(np.complex64)
@ -52,70 +48,66 @@ def test_idft_3d():
input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64))
dft_node = ov.idft(input_tensor, input_axes)
computation = runtime.computation(dft_node)
dft_results = computation()
assert np.allclose(dft_results, expected_results, atol=0.000003)
assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
def test_idft_1d_signal_size():
runtime = get_runtime()
input_data = get_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([-2], dtype=np.int64))
input_signal_size = ov.constant(np.array([20], dtype=np.int64))
dft_node = ov.idft(input_tensor, input_axes, input_signal_size)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.ifft(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), n=20,
axis=-2).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.000002)
assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
def test_idft_2d_signal_size_1():
runtime = get_runtime()
input_data = get_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([0, 2], dtype=np.int64))
input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64))
dft_node = ov.idft(input_tensor, input_axes, input_signal_size)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.ifft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5],
axes=[0, 2]).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.000002)
assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
def test_idft_2d_signal_size_2():
runtime = get_runtime()
input_data = get_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([1, 2], dtype=np.int64))
input_signal_size = ov.constant(np.array([4, 5], dtype=np.int64))
dft_node = ov.idft(input_tensor, input_axes, input_signal_size)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.ifft2(np.squeeze(input_data.view(dtype=np.complex64), axis=-1), s=[4, 5],
axes=[1, 2]).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.000002)
assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)
def test_idft_3d_signal_size():
runtime = get_runtime()
input_data = get_data()
input_tensor = ov.constant(input_data)
input_axes = ov.constant(np.array([0, 1, 2], dtype=np.int64))
input_signal_size = ov.constant(np.array([4, 5, 16], dtype=np.int64))
dft_node = ov.idft(input_tensor, input_axes, input_signal_size)
computation = runtime.computation(dft_node)
dft_results = computation()
np_results = np.fft.ifftn(np.squeeze(input_data.view(dtype=np.complex64), axis=-1),
s=[4, 5, 16], axes=[0, 1, 2]).astype(np.complex64)
expected_results = np.stack((np_results.real, np_results.imag), axis=-1)
assert np.allclose(dft_results, expected_results, atol=0.000002)
assert dft_node.get_type_name() == "IDFT"
assert dft_node.get_output_size() == 1
assert list(dft_node.get_output_shape(0)) == list(expected_results.shape)

View File

@ -8,8 +8,7 @@ from openvino.runtime import Shape, Type
def test_log_softmax():
float_dtype = np.float32
data = ov.parameter(Shape([3, 10]), dtype=float_dtype, name="data")
data = ov.parameter(Shape([3, 10]), dtype=np.float32, name="data")
node = ov.log_softmax(data, 1)
assert node.get_type_name() == "LogSoftmax"

View File

@ -3,14 +3,13 @@
# flake8: noqa
import json
import os
import numpy as np
import pytest
import openvino.runtime.opset8 as ov
from openvino.runtime import Model, PartialShape, Shape
from openvino.runtime import Model
from openvino.runtime.passes import Manager
from tests.test_graph.util import count_ops_of_type
from openvino.runtime import Core

View File

@ -5,43 +5,22 @@
import numpy as np
import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime
from tests.test_graph.util import run_op_node
def test_lrn():
input_image_shape = (2, 3, 2, 1)
input_image = np.arange(int(np.prod(input_image_shape))).reshape(input_image_shape).astype("f")
axes = np.array([1], dtype=np.int64)
runtime = get_runtime()
model = ov.lrn(ov.constant(input_image), ov.constant(axes), alpha=1.0, beta=2.0, bias=1.0, size=3)
computation = runtime.computation(model)
result = computation()
assert np.allclose(
result,
np.array(
[
[[[0.0], [0.05325444]], [[0.03402646], [0.01869806]], [[0.06805293], [0.03287071]]],
[[[0.00509002], [0.00356153]], [[0.00174719], [0.0012555]], [[0.00322708], [0.00235574]]],
],
dtype=np.float32,
),
)
assert model.get_type_name() == "LRN"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == [2, 3, 2, 1]
# Test LRN default parameter values
model = ov.lrn(ov.constant(input_image), ov.constant(axes))
computation = runtime.computation(model)
result = computation()
assert np.allclose(
result,
np.array(
[
[[[0.0], [0.35355338]], [[0.8944272], [1.0606602]], [[1.7888544], [1.767767]]],
[[[0.93704253], [0.97827977]], [[1.2493901], [1.2577883]], [[1.5617375], [1.5372968]]],
],
dtype=np.float32,
),
)
assert model.get_type_name() == "LRN"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == [2, 3, 2, 1]
def test_lrn_factory():
@ -50,94 +29,55 @@ def test_lrn_factory():
bias = 2.0
nsize = 3
axis = np.array([1], dtype=np.int32)
inputs = np.array(
[
[
[
[0.31403765, -0.16793324, 1.388258, -0.6902954],
[-0.3994045, -0.7833511, -0.30992958, 0.3557573],
[-0.4682631, 1.1741459, -2.414789, -0.42783254],
],
[
[-0.82199496, -0.03900861, -0.43670088, -0.53810567],
[-0.10769883, 0.75242394, -0.2507971, 1.0447186],
[-1.4777364, 0.19993274, 0.925649, -2.282516],
],
],
],
dtype=np.float32,
)
excepted = np.array(
[
[
[
[0.22205527, -0.11874668, 0.98161197, -0.4881063],
[-0.2824208, -0.553902, -0.21915273, 0.2515533],
[-0.33109877, 0.8302269, -1.7073234, -0.3024961],
],
[
[-0.5812307, -0.02758324, -0.30878326, -0.38049328],
[-0.07615435, 0.53203356, -0.17733987, 0.7387126],
[-1.0448756, 0.14137045, 0.6544598, -1.6138376],
],
],
],
dtype=np.float32,
)
result = run_op_node([inputs], ov.lrn, axis, alpha, beta, bias, nsize)
inputs = ov.parameter((1, 2, 3, 4), name="inputs", dtype=np.float32)
expected_shape = [1, 2, 3, 4]
assert np.allclose(result, excepted)
node = ov.lrn(inputs, axis, alpha, beta, bias, nsize)
assert node.get_type_name() == "LRN"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_batch_norm_inference():
data = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float32)
gamma = np.array([2.0, 3.0, 4.0], dtype=np.float32)
beta = np.array([0.0, 0.0, 0.0], dtype=np.float32)
mean = np.array([0.0, 0.0, 0.0], dtype=np.float32)
variance = np.array([1.0, 1.0, 1.0], dtype=np.float32)
def test_batch_norm():
data = ov.parameter((2, 3), name="data", dtype=np.float32)
gamma = ov.parameter((3,), name="gamma", dtype=np.float32)
beta = ov.parameter((3,), name="beta", dtype=np.float32)
mean = ov.parameter((3,), name="mean", dtype=np.float32)
variance = ov.parameter((3,), name="variance", dtype=np.float32)
epsilon = 9.99e-06
excepted = np.array([[2.0, 6.0, 12.0], [-2.0, -6.0, -12.0]], dtype=np.float32)
expected_shape = [2, 3]
result = run_op_node([data, gamma, beta, mean, variance], ov.batch_norm_inference, epsilon)
assert np.allclose(result, excepted)
node = ov.batch_norm_inference(data, gamma, beta, mean, variance, epsilon)
assert node.get_type_name() == "BatchNormInference"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_mvn_no_variance():
data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32).reshape([1, 3, 3, 3])
data = ov.parameter((1, 3, 3, 3), name="data", dtype=np.float32)
axes = np.array([2, 3], dtype=np.int64)
epsilon = 1e-9
normalize_variance = False
eps_mode = "outside_sqrt"
excepted = np.array([-4, -3, -2, -1, 0, 1, 2, 3, 4,
-4, -3, -2, -1, 0, 1, 2, 3, 4,
-4, -3, -2, -1, 0, 1, 2, 3, 4], dtype=np.float32).reshape([1, 3, 3, 3])
expected_shape = [1, 3, 3, 3]
result = run_op_node([data], ov.mvn, axes, normalize_variance, epsilon, eps_mode)
node = ov.mvn(data, axes, normalize_variance, epsilon, eps_mode)
assert np.allclose(result, excepted)
assert node.get_type_name() == "MVN"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
def test_mvn():
data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 2, 3, 4, 5, 6, 7, 8, 9,
1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32).reshape([1, 3, 3, 3])
data = ov.parameter((1, 3, 3, 3), name="data", dtype=np.float32)
axes = np.array([2, 3], dtype=np.int64)
epsilon = 1e-9
normalize_variance = True
eps_mode = "outside_sqrt"
excepted = np.array([-1.5491934, -1.161895, -0.7745967,
-0.38729835, 0., 0.38729835,
0.7745967, 1.161895, 1.5491934,
-1.5491934, -1.161895, -0.7745967,
-0.38729835, 0., 0.38729835,
0.7745967, 1.161895, 1.5491934,
-1.5491934, -1.161895, -0.7745967,
-0.38729835, 0., 0.38729835,
0.7745967, 1.161895, 1.5491934], dtype=np.float32).reshape([1, 3, 3, 3])
expected_shape = [1, 3, 3, 3]
result = run_op_node([data], ov.mvn, axes, normalize_variance, epsilon, eps_mode)
node = ov.mvn(data, axes, normalize_variance, epsilon, eps_mode)
assert np.allclose(result, excepted)
assert node.get_type_name() == "MVN"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape

View File

@ -8,8 +8,6 @@ import numpy as np
import pytest
import openvino.runtime.opset8 as ov
from tests.runtime import get_runtime
from tests.test_graph.util import run_op_node
@pytest.mark.parametrize(
@ -31,21 +29,18 @@ from tests.test_graph.util import run_op_node
],
)
def test_binary_op(graph_api_helper, numpy_function):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
parameter_b = ov.parameter(shape, name="B", dtype=np.float32)
model = graph_api_helper(parameter_a, parameter_b)
computation = runtime.computation(model, parameter_a, parameter_b)
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
result = computation(value_a, value_b)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape)
@pytest.mark.parametrize(
@ -67,8 +62,6 @@ def test_binary_op(graph_api_helper, numpy_function):
],
)
def test_binary_op_with_scalar(graph_api_helper, numpy_function):
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
@ -76,11 +69,10 @@ def test_binary_op_with_scalar(graph_api_helper, numpy_function):
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = graph_api_helper(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape)
@pytest.mark.parametrize(
@ -88,21 +80,18 @@ def test_binary_op_with_scalar(graph_api_helper, numpy_function):
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
)
def test_binary_logical_op(graph_api_helper, numpy_function):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ov.parameter(shape, name="A", dtype=bool)
parameter_b = ov.parameter(shape, name="B", dtype=bool)
model = graph_api_helper(parameter_a, parameter_b)
computation = runtime.computation(model, parameter_a, parameter_b)
value_a = np.array([[True, False], [False, True]], dtype=bool)
value_b = np.array([[False, True], [False, True]], dtype=bool)
result = computation(value_a, value_b)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape)
@pytest.mark.parametrize(
@ -110,8 +99,6 @@ def test_binary_logical_op(graph_api_helper, numpy_function):
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
)
def test_binary_logical_op_with_scalar(graph_api_helper, numpy_function):
runtime = get_runtime()
value_a = np.array([[True, False], [False, True]], dtype=bool)
value_b = np.array([[False, True], [False, True]], dtype=bool)
@ -119,11 +106,10 @@ def test_binary_logical_op_with_scalar(graph_api_helper, numpy_function):
parameter_a = ov.parameter(shape, name="A", dtype=bool)
model = graph_api_helper(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape)
@pytest.mark.parametrize(
@ -142,8 +128,6 @@ def test_binary_logical_op_with_scalar(graph_api_helper, numpy_function):
],
)
def test_binary_operators(operator, numpy_function):
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[4, 5], [1, 7]], dtype=np.float32)
@ -151,11 +135,10 @@ def test_binary_operators(operator, numpy_function):
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = operator(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape)
@pytest.mark.parametrize(
@ -174,8 +157,6 @@ def test_binary_operators(operator, numpy_function):
],
)
def test_binary_operators_with_scalar(operator, numpy_function):
runtime = get_runtime()
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
@ -183,28 +164,31 @@ def test_binary_operators_with_scalar(operator, numpy_function):
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
model = operator(parameter_a, value_b)
computation = runtime.computation(model, parameter_a)
result = computation(value_a)
expected = numpy_function(value_a, value_b)
assert np.allclose(result, expected)
expected_shape = numpy_function(value_a, value_b).shape
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == list(expected_shape)
def test_multiply():
param_a = np.arange(48, dtype=np.int32).reshape((8, 1, 6, 1))
param_b = np.arange(35, dtype=np.int32).reshape((7, 1, 5))
expected = np.multiply(param_a, param_b)
result = run_op_node([param_a, param_b], ov.multiply)
expected_shape = np.multiply(param_a, param_b).shape
node = ov.multiply(param_a, param_b)
assert np.allclose(result, expected)
assert node.get_type_name() == "Multiply"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == list(expected_shape)
def test_power_v1():
param_a = np.arange(48, dtype=np.float32).reshape((8, 1, 6, 1))
param_b = np.arange(20, dtype=np.float32).reshape((4, 1, 5))
expected = np.power(param_a, param_b)
result = run_op_node([param_a, param_b], ov.power)
expected_shape = np.power(param_a, param_b).shape
node = ov.power(param_a, param_b)
assert np.allclose(result, expected)
assert node.get_type_name() == "Power"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == list(expected_shape)

View File

@ -11,44 +11,30 @@ from tests import xfail_issue_36486
def test_elu_operator_with_scalar_and_array():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
data_value = ov.parameter((2, 2), name="data_value", dtype=np.float32)
alpha_value = np.float32(3)
model = ov.elu(data_value, alpha_value)
computation = runtime.computation(model)
result = computation()
expected = np.array([[-2.9797862, 1.0], [-2.5939941, 3.0]], dtype=np.float32)
assert np.allclose(result, expected)
expected_shape = [2, 2]
assert model.get_type_name() == "Elu"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_elu_operator_with_scalar():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
alpha_value = np.float32(3)
data_shape = [2, 2]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
parameter_data = ov.parameter([2, 2], name="Data", dtype=np.float32)
model = ov.elu(parameter_data, alpha_value)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array([[-2.9797862, 1.0], [-2.5939941, 3.0]], dtype=np.float32)
assert np.allclose(result, expected)
expected_shape = [2, 2]
assert model.get_type_name() == "Elu"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_fake_quantize():
runtime = get_runtime()
data_value = np.arange(24.0, dtype=np.float32).reshape(1, 2, 3, 4)
input_low_value = np.float32(0)
input_high_value = np.float32(23)
output_low_value = np.float32(2)
output_high_value = np.float32(16)
levels = np.int32(4)
data_shape = [1, 2, 3, 4]
@ -67,190 +53,81 @@ def test_fake_quantize():
parameter_output_high,
levels,
)
computation = runtime.computation(
model,
parameter_data,
parameter_input_low,
parameter_input_high,
parameter_output_low,
parameter_output_high,
)
result = computation(data_value, input_low_value, input_high_value, output_low_value, output_high_value)
expected = np.array(
[
[
[
[
[2.0, 2.0, 2.0, 2.0],
[6.6666669, 6.6666669, 6.6666669, 6.6666669],
[6.6666669, 6.6666669, 6.6666669, 6.6666669],
],
[
[11.33333301, 11.33333301, 11.33333301, 11.33333301],
[11.33333301, 11.33333301, 11.33333301, 11.33333301],
[16.0, 16.0, 16.0, 16.0],
],
],
],
],
dtype=np.float32,
)
assert np.allclose(result, expected)
expected_shape = [1, 2, 3, 4]
assert model.get_type_name() == "FakeQuantize"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_depth_to_space():
runtime = get_runtime()
data_value = np.array(
[
[
[[0, 1, 2], [3, 4, 5]],
[[6, 7, 8], [9, 10, 11]],
[[12, 13, 14], [15, 16, 17]],
[[18, 19, 20], [21, 22, 23]],
],
],
dtype=np.float32,
)
mode = "blocks_first"
block_size = np.int32(2)
data_shape = [1, 4, 2, 3]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.depth_to_space(parameter_data, mode, block_size)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array(
[[[[0, 6, 1, 7, 2, 8], [12, 18, 13, 19, 14, 20], [3, 9, 4, 10, 5, 11], [15, 21, 16, 22, 17, 23]]]],
dtype=np.float32,
)
assert np.allclose(result, expected)
expected_shape = [1, 1, 4, 6]
assert model.get_type_name() == "DepthToSpace"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_space_to_batch():
runtime = get_runtime()
data_value = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32)
data_shape = data_value.shape
data_shape = [1, 2, 2, 3]
block_shape = np.array([1, 2, 3, 2], dtype=np.int64)
pads_begin = np.array([0, 0, 1, 0], dtype=np.int64)
pads_end = np.array([0, 0, 0, 1], dtype=np.int64)
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.space_to_batch(parameter_data, block_shape, pads_begin, pads_end)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array(
[
[[[0, 0]]],
[[[0, 0]]],
[[[0, 2]]],
[[[1, 0]]],
[[[3, 5]]],
[[[4, 0]]],
[[[0, 0]]],
[[[0, 0]]],
[[[6, 8]]],
[[[7, 0]]],
[[[9, 11]]],
[[[10, 0]]],
],
dtype=np.float32,
)
assert np.allclose(result, expected)
expected_shape = [12, 1, 1, 2]
assert model.get_type_name() == "SpaceToBatch"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_batch_to_space():
runtime = get_runtime()
data = np.array(
[
[[[0, 0]]],
[[[0, 0]]],
[[[0, 2]]],
[[[1, 0]]],
[[[3, 5]]],
[[[4, 0]]],
[[[0, 0]]],
[[[0, 0]]],
[[[6, 8]]],
[[[7, 0]]],
[[[9, 11]]],
[[[10, 0]]],
],
dtype=np.float32,
)
data_shape = data.shape
data_shape = [12, 1, 1, 2]
block_shape = np.array([1, 2, 3, 2], dtype=np.int64)
crops_begin = np.array([0, 0, 1, 0], dtype=np.int64)
crops_end = np.array([0, 0, 0, 1], dtype=np.int64)
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
model = ov.batch_to_space(parameter_data, block_shape, crops_begin, crops_end)
computation = runtime.computation(model, parameter_data)
result = computation(data)
expected = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32)
assert np.allclose(result, expected)
expected_shape = [1, 2, 2, 3]
assert model.get_type_name() == "BatchToSpace"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_clamp_operator():
runtime = get_runtime()
data_shape = [2, 2]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
min_value = np.float32(3)
max_value = np.float32(12)
model = ov.clamp(parameter_data, min_value, max_value)
computation = runtime.computation(model, parameter_data)
data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32)
result = computation(data_value)
expected = np.clip(data_value, min_value, max_value)
assert np.allclose(result, expected)
def test_clamp_operator_with_array():
runtime = get_runtime()
data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32)
min_value = np.float32(3)
max_value = np.float32(12)
model = ov.clamp(data_value, min_value, max_value)
computation = runtime.computation(model)
result = computation()
expected = np.clip(data_value, min_value, max_value)
assert np.allclose(result, expected)
expected_shape = [2, 2]
assert model.get_type_name() == "Clamp"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_squeeze_operator():
runtime = get_runtime()
data_shape = [1, 2, 1, 3, 1, 1]
parameter_data = ov.parameter(data_shape, name="Data", dtype=np.float32)
data_value = np.arange(6.0, dtype=np.float32).reshape([1, 2, 1, 3, 1, 1])
axes = [2, 4]
model = ov.squeeze(parameter_data, axes)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.arange(6.0, dtype=np.float32).reshape([1, 2, 3, 1])
assert np.allclose(result, expected)
expected_shape = [1, 2, 3, 1]
assert model.get_type_name() == "Squeeze"
assert model.get_output_size() == 1
assert list(model.get_output_shape(0)) == expected_shape
def test_squared_difference_operator():