[PyOV] tests refactoring (#21410)
* [PyOV] tests refactoring * remove ngraoh mentioning * codestyle * replace func
This commit is contained in:
parent
055e3d274f
commit
0e642e984b
@ -2,6 +2,6 @@
|
||||
# Copyright (C) 2018-2023 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# openvino.dll directory path visibility is needed to use _pyngraph module
|
||||
# openvino.dll directory path visibility is needed to use _pyopenvino module
|
||||
# import below causes adding this path to os.environ["PATH"]
|
||||
import openvino # noqa: F401 'imported but unused'
|
||||
|
@ -27,7 +27,7 @@ from openvino._pyopenvino import DescriptorTensor
|
||||
from openvino.runtime.utils.types import get_element_type
|
||||
|
||||
|
||||
def test_graph_function_api():
|
||||
def test_graph_api():
|
||||
shape = [2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_b = ops.parameter(shape, dtype=Type.f32, name="B")
|
||||
@ -39,33 +39,33 @@ def test_graph_function_api():
|
||||
assert parameter_a.partial_shape == PartialShape([2, 2])
|
||||
parameter_a.layout = Layout("NC")
|
||||
assert parameter_a.layout == Layout("NC")
|
||||
function = Model(model, [parameter_a, parameter_b, parameter_c], "TestModel")
|
||||
model = Model(model, [parameter_a, parameter_b, parameter_c], "TestModel")
|
||||
|
||||
function.get_parameters()[1].set_partial_shape(PartialShape([3, 4, 5]))
|
||||
model.get_parameters()[1].set_partial_shape(PartialShape([3, 4, 5]))
|
||||
|
||||
ordered_ops = function.get_ordered_ops()
|
||||
ordered_ops = model.get_ordered_ops()
|
||||
op_types = [op.get_type_name() for op in ordered_ops]
|
||||
assert op_types == ["Parameter", "Parameter", "Parameter", "Add", "Multiply", "Result"]
|
||||
assert len(function.get_ops()) == 6
|
||||
assert function.get_output_size() == 1
|
||||
assert ["A", "B", "C"] == [input.get_node().friendly_name for input in function.inputs]
|
||||
assert ["Result"] == [output.get_node().get_type_name() for output in function.outputs]
|
||||
assert function.input(0).get_node().friendly_name == "A"
|
||||
assert function.output(0).get_node().get_type_name() == "Result"
|
||||
assert function.input(tensor_name="A").get_node().friendly_name == "A"
|
||||
assert function.output().get_node().get_type_name() == "Result"
|
||||
assert function.get_output_op(0).get_type_name() == "Result"
|
||||
assert function.get_output_element_type(0) == parameter_a.get_element_type()
|
||||
assert list(function.get_output_shape(0)) == [2, 2]
|
||||
assert (function.get_parameters()[1].get_partial_shape()) == PartialShape([3, 4, 5])
|
||||
assert len(function.get_parameters()) == 3
|
||||
results = function.get_results()
|
||||
assert len(model.get_ops()) == 6
|
||||
assert model.get_output_size() == 1
|
||||
assert ["A", "B", "C"] == [input.get_node().friendly_name for input in model.inputs]
|
||||
assert ["Result"] == [output.get_node().get_type_name() for output in model.outputs]
|
||||
assert model.input(0).get_node().friendly_name == "A"
|
||||
assert model.output(0).get_node().get_type_name() == "Result"
|
||||
assert model.input(tensor_name="A").get_node().friendly_name == "A"
|
||||
assert model.output().get_node().get_type_name() == "Result"
|
||||
assert model.get_output_op(0).get_type_name() == "Result"
|
||||
assert model.get_output_element_type(0) == parameter_a.get_element_type()
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
assert (model.get_parameters()[1].get_partial_shape()) == PartialShape([3, 4, 5])
|
||||
assert len(model.get_parameters()) == 3
|
||||
results = model.get_results()
|
||||
assert len(results) == 1
|
||||
assert results[0].get_output_element_type(0) == Type.f32
|
||||
assert results[0].get_output_partial_shape(0) == PartialShape([2, 2])
|
||||
results[0].layout = Layout("NC")
|
||||
assert results[0].layout.to_string() == Layout("NC")
|
||||
assert function.get_friendly_name() == "TestModel"
|
||||
assert model.get_friendly_name() == "TestModel"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -340,18 +340,18 @@ def test_repr_dynamic_shape():
|
||||
shape = PartialShape([-1, 2])
|
||||
parameter_a = ov.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_b = ov.parameter(shape, dtype=np.float32, name="B")
|
||||
model = parameter_a + parameter_b
|
||||
function = Model(model, [parameter_a, parameter_b], "simple_dyn_shapes_graph")
|
||||
param_sum = parameter_a + parameter_b
|
||||
model = Model(param_sum, [parameter_a, parameter_b], "simple_dyn_shapes_graph")
|
||||
|
||||
assert (
|
||||
repr(function)
|
||||
repr(model)
|
||||
== "<Model: 'simple_dyn_shapes_graph'\ninputs["
|
||||
+ "\n<ConstOutput: names[A] shape[?,2] type: f32>,"
|
||||
+ "\n<ConstOutput: names[B] shape[?,2] type: f32>\n]"
|
||||
+ "\noutputs[\n<ConstOutput: names[] shape[?,2] type: f32>\n]>"
|
||||
)
|
||||
|
||||
ops = function.get_ordered_ops()
|
||||
ops = model.get_ordered_ops()
|
||||
for op in ops:
|
||||
assert "[?,2]" in repr(op)
|
||||
|
||||
|
@ -20,15 +20,15 @@ def create_model():
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_b = ops.parameter(shape, dtype=np.float32, name="B")
|
||||
parameter_c = ops.parameter(shape, dtype=np.float32, name="C")
|
||||
model = ops.floor(ops.minimum(ops.abs(parameter_a), ops.multiply(parameter_b, parameter_c)))
|
||||
func = Model(model, [parameter_a, parameter_b, parameter_c], "Model")
|
||||
return func
|
||||
floor_op = ops.floor(ops.minimum(ops.abs(parameter_a), ops.multiply(parameter_b, parameter_c)))
|
||||
model = Model(floor_op, [parameter_a, parameter_b, parameter_c], "Model")
|
||||
return model
|
||||
|
||||
|
||||
def test_constant_folding():
|
||||
node_constant = ops.constant(np.array([[0.0, 0.1, -0.1], [-2.5, 2.5, 3.0]], dtype=np.float32))
|
||||
node_ceil = ops.ceiling(node_constant)
|
||||
model = Model(node_ceil, [], "TestFunction")
|
||||
model = Model(node_ceil, [], "TestModel")
|
||||
|
||||
assert count_ops_of_type(model, node_ceil) == 1
|
||||
assert count_ops_of_type(model, node_constant) == 1
|
||||
|
@ -17,17 +17,17 @@ def test_graph_preprocess_mean():
|
||||
shape = [2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
ppp = PrePostProcessor(function)
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
prep = inp.preprocess()
|
||||
prep.mean(1.0)
|
||||
function = ppp.build()
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ordered_ops()]
|
||||
model = ppp.build()
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ordered_ops()]
|
||||
assert len(model_operators) == 4
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [2, 2]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
assert "Constant" in model_operators
|
||||
assert "Subtract" in model_operators
|
||||
|
||||
@ -36,19 +36,19 @@ def test_graph_preprocess_mean_vector():
|
||||
shape = [2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
layout = Layout("NC")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
ppp.input().tensor().set_layout(layout)
|
||||
ppp.input().preprocess().mean([1., 2.])
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ordered_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ordered_ops()]
|
||||
assert len(model_operators) == 4
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [2, 2]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
assert "Constant" in model_operators
|
||||
assert "Subtract" in model_operators
|
||||
|
||||
@ -57,20 +57,20 @@ def test_graph_preprocess_scale_vector():
|
||||
shape = [2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
layout = Layout("NC")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_layout(layout)
|
||||
inp.preprocess().scale([0.5, 2.0])
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ordered_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ordered_ops()]
|
||||
assert len(model_operators) == 4
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [2, 2]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
assert "Constant" in model_operators
|
||||
assert "Divide" in model_operators
|
||||
|
||||
@ -79,22 +79,22 @@ def test_graph_preprocess_mean_scale_convert():
|
||||
shape = [2, 2]
|
||||
param1 = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
param2 = ops.parameter(shape, dtype=np.int32, name="B")
|
||||
function = Model([param1, param2], [param1, param2], "TestFunction")
|
||||
model = Model([param1, param2], [param1, param2], "TestModel")
|
||||
|
||||
@custom_preprocess_function
|
||||
def custom_preprocess(output: Output):
|
||||
return ops.abs(output)
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp2 = ppp.input(1)
|
||||
inp2.tensor().set_element_type(Type.i32)
|
||||
inp2.preprocess().convert_element_type(Type.f32).mean(1.).scale(2.)
|
||||
inp2.preprocess().convert_element_type()
|
||||
inp1 = ppp.input(0)
|
||||
inp1.preprocess().convert_element_type(Type.f32).mean(1.).custom(custom_preprocess)
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Convert",
|
||||
@ -105,11 +105,11 @@ def test_graph_preprocess_mean_scale_convert():
|
||||
"Abs",
|
||||
]
|
||||
assert len(model_operators) == 15
|
||||
assert function.get_output_size() == 2
|
||||
assert list(function.get_output_shape(0)) == [2, 2]
|
||||
assert list(function.get_output_shape(1)) == [2, 2]
|
||||
assert function.get_output_element_type(0) == Type.i32
|
||||
assert function.get_output_element_type(1) == Type.i32
|
||||
assert model.get_output_size() == 2
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
assert list(model.get_output_shape(1)) == [2, 2]
|
||||
assert model.get_output_element_type(0) == Type.i32
|
||||
assert model.get_output_element_type(1) == Type.i32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -118,13 +118,13 @@ def test_graph_preprocess_input_output_by_name():
|
||||
shape = [2, 2]
|
||||
param1 = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
param2 = ops.parameter(shape, dtype=np.int32, name="B")
|
||||
function = Model([param1, param2], [param1, param2], "TestFunction")
|
||||
model = Model([param1, param2], [param1, param2], "TestModel")
|
||||
|
||||
@custom_preprocess_function
|
||||
def custom_preprocess(output: Output):
|
||||
return ops.abs(output)
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp2 = ppp.input("B")
|
||||
inp2.tensor().set_element_type(Type.i32)
|
||||
inp2.preprocess().convert_element_type(Type.f32).mean(1.).scale(2.)
|
||||
@ -134,9 +134,9 @@ def test_graph_preprocess_input_output_by_name():
|
||||
out1.postprocess().custom(custom_preprocess)
|
||||
out2 = ppp.output("B")
|
||||
out2.postprocess().custom(custom_preprocess)
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Convert",
|
||||
@ -147,11 +147,11 @@ def test_graph_preprocess_input_output_by_name():
|
||||
"Abs",
|
||||
]
|
||||
assert len(model_operators) == 16
|
||||
assert function.get_output_size() == 2
|
||||
assert list(function.get_output_shape(0)) == [2, 2]
|
||||
assert list(function.get_output_shape(1)) == [2, 2]
|
||||
assert function.get_output_element_type(0) == Type.i32
|
||||
assert function.get_output_element_type(1) == Type.i32
|
||||
assert model.get_output_size() == 2
|
||||
assert list(model.get_output_shape(0)) == [2, 2]
|
||||
assert list(model.get_output_shape(1)) == [2, 2]
|
||||
assert model.get_output_element_type(0) == Type.i32
|
||||
assert model.get_output_element_type(1) == Type.i32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -160,7 +160,7 @@ def test_graph_preprocess_output_postprocess():
|
||||
shape = [2, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
layout1 = Layout("NC")
|
||||
layout2 = Layout("CN")
|
||||
layout3 = [1, 0]
|
||||
@ -168,7 +168,7 @@ def test_graph_preprocess_output_postprocess():
|
||||
@custom_preprocess_function
|
||||
def custom_postprocess(output: Output):
|
||||
return ops.abs(output)
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_layout(layout1)
|
||||
inp.preprocess().convert_element_type(Type.f32).mean([1.0, 2.0, 3.0])
|
||||
@ -178,9 +178,9 @@ def test_graph_preprocess_output_postprocess():
|
||||
out.postprocess().convert_element_type(Type.f32)
|
||||
out.postprocess().convert_layout(layout2).convert_layout(layout3)
|
||||
out.postprocess().custom(custom_postprocess).convert_element_type(Type.f16).convert_element_type()
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Convert",
|
||||
@ -191,9 +191,9 @@ def test_graph_preprocess_output_postprocess():
|
||||
"Abs",
|
||||
]
|
||||
assert len(model_operators) == 14
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [2, 3]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [2, 3]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -202,12 +202,12 @@ def test_graph_preprocess_spatial_static_shape():
|
||||
shape = [3, 2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
layout = Layout("CHW")
|
||||
|
||||
color_format = ColorFormat.RGB
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_layout(layout).set_spatial_static_shape(2, 2).set_color_format(color_format)
|
||||
inp.preprocess().convert_element_type(Type.f32).mean([1., 2., 3.])
|
||||
@ -215,9 +215,9 @@ def test_graph_preprocess_spatial_static_shape():
|
||||
out = ppp.output()
|
||||
out.tensor().set_layout(layout).set_element_type(Type.f32)
|
||||
out.model().set_layout(layout)
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Convert",
|
||||
@ -226,9 +226,9 @@ def test_graph_preprocess_spatial_static_shape():
|
||||
"Result",
|
||||
]
|
||||
assert len(model_operators) == 7
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [3, 2, 2]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [3, 2, 2]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -237,7 +237,7 @@ def test_graph_preprocess_set_shape():
|
||||
shape = [1, 1, 1]
|
||||
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
@custom_preprocess_function
|
||||
def custom_crop(out_node: Output):
|
||||
@ -247,13 +247,13 @@ def test_graph_preprocess_set_shape():
|
||||
axis = ops.constant(np.array([0, 1, 2]), dtype=np.int32)
|
||||
return ops.slice(out_node, start, stop, step, axis)
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_shape([3, 3, 3])
|
||||
inp.preprocess().custom(custom_crop)
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Constant",
|
||||
@ -261,9 +261,9 @@ def test_graph_preprocess_set_shape():
|
||||
"Slice",
|
||||
]
|
||||
assert len(model_operators) == 7
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [1, 1, 1]
|
||||
assert function.get_output_element_type(0) == Type.i32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [1, 1, 1]
|
||||
assert model.get_output_element_type(0) == Type.i32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -274,25 +274,25 @@ def test_graph_preprocess_set_from_tensor():
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_a.set_layout(Layout("NHWC"))
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
input_data = Tensor(Type.i32, inp_shape)
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_from(input_data)
|
||||
inp.preprocess().resize(ResizeAlgorithm.RESIZE_LINEAR)
|
||||
function = ppp.build()
|
||||
assert function.input().shape == Shape(inp_shape)
|
||||
assert function.input().element_type == Type.i32
|
||||
assert function.output().shape == Shape(shape)
|
||||
assert function.output().element_type == Type.f32
|
||||
model = ppp.build()
|
||||
assert model.input().shape == Shape(inp_shape)
|
||||
assert model.input().element_type == Type.i32
|
||||
assert model.output().shape == Shape(shape)
|
||||
assert model.output().element_type == Type.f32
|
||||
|
||||
|
||||
def test_graph_preprocess_set_from_np_infer():
|
||||
shape = [1, 1, 1]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
@custom_preprocess_function
|
||||
def custom_crop(out_node: Output):
|
||||
@ -306,15 +306,15 @@ def test_graph_preprocess_set_from_np_infer():
|
||||
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
|
||||
[[18, 19, 20], [21, 22, 23], [24, 25, 26]]]).astype(np.int32)
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_from(input_data)
|
||||
inp.preprocess().convert_element_type().custom(custom_crop)
|
||||
function = ppp.build()
|
||||
assert function.input().shape == Shape([3, 3, 3])
|
||||
assert function.input().element_type == Type.i32
|
||||
model = ppp.build()
|
||||
assert model.input().shape == Shape([3, 3, 3])
|
||||
assert model.input().element_type == Type.i32
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Convert",
|
||||
@ -323,9 +323,9 @@ def test_graph_preprocess_set_from_np_infer():
|
||||
"Slice",
|
||||
]
|
||||
assert len(model_operators) == 8
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [1, 1, 1]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [1, 1, 1]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -335,13 +335,13 @@ def test_graph_preprocess_set_memory_type():
|
||||
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
op = ops.relu(parameter_a)
|
||||
model = op
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
ppp.input().tensor().set_memory_type("some_memory_type")
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
assert any(key for key in function.input().rt_info if "memory_type" in key)
|
||||
assert any(key for key in model.input().rt_info if "memory_type" in key)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -380,11 +380,11 @@ def test_graph_preprocess_steps(algorithm, color_format1, color_format2, is_fail
|
||||
shape = [1, 3, 3, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
layout1 = Layout("NCWH")
|
||||
layout2 = Layout("NCHW")
|
||||
|
||||
custom_processor = PrePostProcessor(function)
|
||||
custom_processor = PrePostProcessor(model)
|
||||
inp = custom_processor.input()
|
||||
inp.tensor().set_layout(layout1).set_color_format(color_format1, [])
|
||||
inp.preprocess().mean(1.).resize(algorithm, 3, 3)
|
||||
@ -392,11 +392,11 @@ def test_graph_preprocess_steps(algorithm, color_format1, color_format2, is_fail
|
||||
|
||||
if is_failing:
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
function = custom_processor.build()
|
||||
model = custom_processor.build()
|
||||
assert "is not convertible to" in str(e.value)
|
||||
else:
|
||||
function = custom_processor.build()
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model = custom_processor.build()
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Constant",
|
||||
@ -405,9 +405,9 @@ def test_graph_preprocess_steps(algorithm, color_format1, color_format2, is_fail
|
||||
"Interpolate",
|
||||
]
|
||||
assert len(model_operators) == 15
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [1, 3, 3, 3]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [1, 3, 3, 3]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -420,37 +420,37 @@ def test_graph_preprocess_steps(algorithm, color_format1, color_format2, is_fail
|
||||
def test_graph_preprocess_convert_color(color_format1, color_format2, tensor_in_shape, model_in_shape):
|
||||
parameter_a = ops.parameter(model_in_shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
custom_processor = PrePostProcessor(function)
|
||||
custom_processor = PrePostProcessor(model)
|
||||
inp = custom_processor.input()
|
||||
inp.tensor().set_color_format(color_format1)
|
||||
inp.preprocess().convert_color(color_format2)
|
||||
function = custom_processor.build()
|
||||
model = custom_processor.build()
|
||||
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.inputs[0].shape) == tensor_in_shape
|
||||
assert list(function.get_output_shape(0)) == model_in_shape
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.inputs[0].shape) == tensor_in_shape
|
||||
assert list(model.get_output_shape(0)) == model_in_shape
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
|
||||
|
||||
def test_graph_preprocess_postprocess_layout():
|
||||
shape = [1, 1, 3, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
layout1 = Layout("NCWH")
|
||||
layout2 = Layout("NCHW")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_layout(layout1)
|
||||
inp.preprocess().mean(1.).convert_layout(layout2).reverse_channels()
|
||||
out = ppp.output()
|
||||
out.postprocess().convert_layout([0, 1, 2, 3])
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Constant",
|
||||
@ -460,9 +460,9 @@ def test_graph_preprocess_postprocess_layout():
|
||||
"Transpose",
|
||||
]
|
||||
assert len(model_operators) == 14
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [1, 1, 3, 3]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [1, 1, 3, 3]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -471,16 +471,16 @@ def test_graph_preprocess_reverse_channels():
|
||||
shape = [1, 2, 2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
layout1 = Layout("NCWH")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_layout(layout1)
|
||||
inp.preprocess().mean(1.).reverse_channels()
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Constant",
|
||||
@ -489,9 +489,9 @@ def test_graph_preprocess_reverse_channels():
|
||||
"Range",
|
||||
]
|
||||
assert len(model_operators) == 10
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [1, 2, 2, 2]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [1, 2, 2, 2]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -501,14 +501,14 @@ def test_graph_preprocess_crop():
|
||||
tensor_shape = [1, 2, 3, 3]
|
||||
parameter_a = ops.parameter(orig_shape, dtype=np.float32, name="A")
|
||||
model = ops.relu(parameter_a)
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
ppp.input().tensor().set_shape(tensor_shape)
|
||||
ppp.input().preprocess().crop([0, 0, 1, 1], [1, 2, -1, -1])
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Constant",
|
||||
@ -517,9 +517,9 @@ def test_graph_preprocess_crop():
|
||||
"Slice",
|
||||
]
|
||||
assert len(model_operators) == 8
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [1, 2, 1, 1]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [1, 2, 1, 1]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -528,17 +528,17 @@ def test_graph_preprocess_resize_algorithm():
|
||||
shape = [1, 1, 3, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
resize_alg = ResizeAlgorithm.RESIZE_CUBIC
|
||||
layout1 = Layout("NCWH")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
inp = ppp.input()
|
||||
inp.tensor().set_layout(layout1)
|
||||
inp.preprocess().mean(1.).resize(resize_alg, 3, 3)
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Constant",
|
||||
@ -547,9 +547,9 @@ def test_graph_preprocess_resize_algorithm():
|
||||
"Interpolate",
|
||||
]
|
||||
assert len(model_operators) == 7
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [1, 1, 3, 3]
|
||||
assert function.get_output_element_type(0) == Type.f32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [1, 1, 3, 3]
|
||||
assert model.get_output_element_type(0) == Type.f32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -615,19 +615,19 @@ def test_graph_preprocess_model():
|
||||
</edges>
|
||||
</net>""")
|
||||
core = Core()
|
||||
function = core.read_model(model=model)
|
||||
model = core.read_model(model=model)
|
||||
|
||||
@custom_preprocess_function
|
||||
def custom_preprocess(output: Output):
|
||||
return ops.abs(output)
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
ppp.input(1).preprocess().convert_element_type(Type.f32).scale(0.5)
|
||||
ppp.input(0).preprocess().convert_element_type(Type.f32).mean(5.)
|
||||
ppp.output(0).postprocess().custom(custom_preprocess)
|
||||
function = ppp.build()
|
||||
model = ppp.build()
|
||||
|
||||
model_operators = [op.get_name().split("_")[0] for op in function.get_ops()]
|
||||
model_operators = [op.get_name().split("_")[0] for op in model.get_ops()]
|
||||
expected_ops = [
|
||||
"Parameter",
|
||||
"Constant",
|
||||
@ -639,9 +639,9 @@ def test_graph_preprocess_model():
|
||||
"Divide",
|
||||
]
|
||||
assert len(model_operators) == 13
|
||||
assert function.get_output_size() == 1
|
||||
assert list(function.get_output_shape(0)) == [2, 2, 2]
|
||||
assert function.get_output_element_type(0) == Type.i32
|
||||
assert model.get_output_size() == 1
|
||||
assert list(model.get_output_shape(0)) == [2, 2, 2]
|
||||
assert model.get_output_element_type(0) == Type.i32
|
||||
for op in expected_ops:
|
||||
assert op in model_operators
|
||||
|
||||
@ -650,9 +650,9 @@ def test_graph_preprocess_dump():
|
||||
shape = [1, 3, 224, 224]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="RGB_input")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
ppp.input().tensor().set_layout(Layout("NHWC")).set_element_type(Type.u8)
|
||||
ppp.input().tensor().set_spatial_dynamic_shape()
|
||||
ppp.input().preprocess().convert_element_type(Type.f32).reverse_channels()
|
||||
@ -673,13 +673,13 @@ def test_graph_preprocess_dump():
|
||||
@pytest.mark.parametrize(
|
||||
("layout", "layout_str"),
|
||||
[("NHCW", "[N,H,C,W]"), ("NHWC", "[N,H,W,C]")])
|
||||
def test_ngraph_set_layout_by_string(layout, layout_str):
|
||||
def test_graph_set_layout_by_string(layout, layout_str):
|
||||
shape = [1, 3, 224, 224]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="RGB_input")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
ppp.input().model().set_layout(layout)
|
||||
p_str = str(ppp)
|
||||
assert f"{layout_str}" in p_str
|
||||
@ -688,13 +688,13 @@ def test_ngraph_set_layout_by_string(layout, layout_str):
|
||||
@pytest.mark.parametrize(
|
||||
("layout", "layout_str"),
|
||||
[(Layout("NHCW"), "[N,H,C,W]"), (Layout("NHWC"), "[N,H,W,C]")])
|
||||
def test_ngraph_set_layout_by_layout_class(layout, layout_str):
|
||||
def test_graph_set_layout_by_layout_class(layout, layout_str):
|
||||
shape = [1, 3, 224, 224]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="RGB_input")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
ppp.input().model().set_layout(layout)
|
||||
p_str = str(ppp)
|
||||
assert f"{layout_str}" in p_str
|
||||
@ -703,26 +703,26 @@ def test_ngraph_set_layout_by_layout_class(layout, layout_str):
|
||||
@pytest.mark.parametrize(
|
||||
("layout"),
|
||||
[("1-2-3D"), ("5-5")])
|
||||
def test_ngraph_set_layout_by_str_thow_exception(layout):
|
||||
def test_graph_set_layout_by_str_thow_exception(layout):
|
||||
shape = [1, 3, 224, 224]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="RGB_input")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
ppp.input().model().set_layout(layout)
|
||||
assert "Layout name is invalid" in str(e.value)
|
||||
|
||||
|
||||
def test_ngraph_set_layout_by_layout_class_thow_exception():
|
||||
def test_graph_set_layout_by_layout_class_thow_exception():
|
||||
shape = [1, 3, 224, 224]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="RGB_input")
|
||||
model = parameter_a
|
||||
function = Model(model, [parameter_a], "TestFunction")
|
||||
model = Model(model, [parameter_a], "TestModel")
|
||||
|
||||
ppp = PrePostProcessor(function)
|
||||
ppp = PrePostProcessor(model)
|
||||
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
layout = Layout("1-2-3D")
|
||||
|
@ -3,9 +3,9 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
|
||||
def count_ops_of_type(func, op_type):
|
||||
def count_ops_of_type(model, op_type):
|
||||
count = 0
|
||||
for op in func.get_ops():
|
||||
for op in model.get_ops():
|
||||
if (type(op) is type(op_type)):
|
||||
count += 1
|
||||
return count
|
||||
|
@ -300,9 +300,9 @@ def test_query_model(device):
|
||||
model = get_relu_model()
|
||||
query_model = core.query_model(model=model, device_name=device)
|
||||
ops_model = model.get_ordered_ops()
|
||||
ops_func_names = [op.friendly_name for op in ops_model]
|
||||
ops_model_names = [op.friendly_name for op in ops_model]
|
||||
assert [
|
||||
key for key in query_model.keys() if key not in ops_func_names
|
||||
key for key in query_model.keys() if key not in ops_model_names
|
||||
] == [], "Not all network layers present in query_model results"
|
||||
assert device in next(iter(set(query_model.values()))), "Wrong device for some layers"
|
||||
|
||||
|
@ -6,6 +6,7 @@ import os
|
||||
import numpy as np
|
||||
import pytest
|
||||
import math
|
||||
from contextlib import nullcontext as does_not_raise
|
||||
|
||||
import openvino.runtime.opset13 as ops
|
||||
from openvino import (
|
||||
@ -26,7 +27,7 @@ from openvino.runtime import Output
|
||||
from tests.utils.helpers import generate_add_model, create_filename_for_test
|
||||
|
||||
|
||||
def test_test_descriptor_tensor():
|
||||
def test_descriptor_tensor():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
@ -40,104 +41,54 @@ def test_test_descriptor_tensor():
|
||||
assert td.any_name == "relu_t1"
|
||||
|
||||
|
||||
def test_function_add_outputs_tensor_name():
|
||||
@pytest.mark.parametrize(("output", "expectation", "raise_msg"), [
|
||||
("relu_t1", does_not_raise(), ""),
|
||||
(("relu1", 0), does_not_raise(), ""),
|
||||
("relu_t", pytest.raises(RuntimeError), "relu_t"),
|
||||
(("relu1", 1234), pytest.raises(RuntimeError), "1234"),
|
||||
(("relu_1", 0), pytest.raises(RuntimeError), "relu_1"),
|
||||
(0, pytest.raises(TypeError), "Incorrect type of a value to add as output."),
|
||||
([0, 0], pytest.raises(TypeError), "Incorrect type of a value to add as output at index 0"),
|
||||
])
|
||||
def test_add_outputs(output, expectation, raise_msg):
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
assert "relu_t1" in relu1.get_output_tensor(0).names
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
function = Model(relu2, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
new_outs = function.add_outputs("relu_t1")
|
||||
assert len(function.get_results()) == 2
|
||||
assert len(function.results) == 2
|
||||
assert "relu_t1" in function.outputs[1].get_tensor().names
|
||||
model = Model(relu2, [param], "TestModel")
|
||||
assert len(model.get_results()) == 1
|
||||
assert len(model.results) == 1
|
||||
with expectation as e:
|
||||
new_outs = model.add_outputs(output)
|
||||
assert len(model.get_results()) == 2
|
||||
assert len(model.results) == 2
|
||||
assert "relu_t1" in model.outputs[1].get_tensor().names
|
||||
assert len(new_outs) == 1
|
||||
assert new_outs[0].get_node() == model.outputs[1].get_node()
|
||||
assert new_outs[0].get_index() == model.outputs[1].get_index()
|
||||
if e is not None:
|
||||
assert raise_msg in str(e.value)
|
||||
|
||||
|
||||
def test_add_output_port():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
model = Model(relu2, [param], "TestModel")
|
||||
assert len(model.results) == 1
|
||||
new_outs = model.add_outputs(relu1.output(0))
|
||||
assert len(model.results) == 2
|
||||
assert len(new_outs) == 1
|
||||
assert new_outs[0].get_node() == function.outputs[1].get_node()
|
||||
assert new_outs[0].get_index() == function.outputs[1].get_index()
|
||||
assert new_outs[0].get_node() == model.outputs[1].get_node()
|
||||
assert new_outs[0].get_index() == model.outputs[1].get_index()
|
||||
|
||||
|
||||
def test_function_add_outputs_op_name():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
function = Model(relu2, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
new_outs = function.add_outputs(("relu1", 0))
|
||||
assert len(function.get_results()) == 2
|
||||
assert len(function.results) == 2
|
||||
assert len(new_outs) == 1
|
||||
assert new_outs[0].get_node() == function.outputs[1].get_node()
|
||||
assert new_outs[0].get_index() == function.outputs[1].get_index()
|
||||
|
||||
|
||||
def test_function_add_output_port():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
function = Model(relu2, [param], "TestFunction")
|
||||
assert len(function.results) == 1
|
||||
new_outs = function.add_outputs(relu1.output(0))
|
||||
assert len(function.results) == 2
|
||||
assert len(new_outs) == 1
|
||||
assert new_outs[0].get_node() == function.outputs[1].get_node()
|
||||
assert new_outs[0].get_index() == function.outputs[1].get_index()
|
||||
|
||||
|
||||
def test_function_add_output_incorrect_tensor_name():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
function = Model(relu2, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
function.add_outputs("relu_t")
|
||||
# Verify that absent output name is present in error message
|
||||
assert "relu_t" in str(e.value)
|
||||
|
||||
|
||||
def test_function_add_output_incorrect_idx():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
function = Model(relu2, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
function.add_outputs(("relu1", 1234))
|
||||
# Verify that op name and port number are present in error message
|
||||
assert "relu1" in str(e.value)
|
||||
assert "1234" in str(e.value)
|
||||
|
||||
|
||||
def test_function_add_output_incorrect_name():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
function = Model(relu2, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
function.add_outputs(("relu_1", 0))
|
||||
# Verify that absent op name is present in error message
|
||||
assert "relu_1" in str(e.value)
|
||||
|
||||
|
||||
def test_add_outputs_several_tensors():
|
||||
@pytest.mark.parametrize("args", [["relu_t1", "relu_t2"], [("relu1", 0), ("relu2", 0)]])
|
||||
def test_add_outputs_several_outputs(args):
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
@ -145,65 +96,17 @@ def test_add_outputs_several_tensors():
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
relu2.get_output_tensor(0).set_names({"relu_t2"})
|
||||
relu3 = ops.relu(relu2, name="relu3")
|
||||
function = Model(relu3, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
new_outs = function.add_outputs(["relu_t1", "relu_t2"])
|
||||
assert len(function.get_results()) == 3
|
||||
assert len(function.results) == 3
|
||||
model = Model(relu3, [param], "TestModel")
|
||||
assert len(model.get_results()) == 1
|
||||
assert len(model.results) == 1
|
||||
new_outs = model.add_outputs(args)
|
||||
assert len(model.get_results()) == 3
|
||||
assert len(model.results) == 3
|
||||
assert len(new_outs) == 2
|
||||
assert new_outs[0].get_node() == function.outputs[1].get_node()
|
||||
assert new_outs[0].get_index() == function.outputs[1].get_index()
|
||||
assert new_outs[1].get_node() == function.outputs[2].get_node()
|
||||
assert new_outs[1].get_index() == function.outputs[2].get_index()
|
||||
|
||||
|
||||
def test_add_outputs_several_ports():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
relu2.get_output_tensor(0).set_names({"relu_t2"})
|
||||
relu3 = ops.relu(relu2, name="relu3")
|
||||
function = Model(relu3, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
new_outs = function.add_outputs([("relu1", 0), ("relu2", 0)])
|
||||
assert len(function.get_results()) == 3
|
||||
assert len(function.results) == 3
|
||||
assert len(new_outs) == 2
|
||||
assert new_outs[0].get_node() == function.outputs[1].get_node()
|
||||
assert new_outs[0].get_index() == function.outputs[1].get_index()
|
||||
assert new_outs[1].get_node() == function.outputs[2].get_node()
|
||||
assert new_outs[1].get_index() == function.outputs[2].get_index()
|
||||
|
||||
|
||||
def test_add_outputs_incorrect_value():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
function = Model(relu2, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
with pytest.raises(TypeError) as e:
|
||||
function.add_outputs(0)
|
||||
assert "Incorrect type of a value to add as output." in str(e.value)
|
||||
|
||||
|
||||
def test_add_outputs_incorrect_outputs_list():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu1 = ops.relu(param, name="relu1")
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
function = Model(relu1, [param], "TestFunction")
|
||||
assert len(function.get_results()) == 1
|
||||
assert len(function.results) == 1
|
||||
with pytest.raises(TypeError) as e:
|
||||
function.add_outputs([0, 0])
|
||||
assert "Incorrect type of a value to add as output at index 0" in str(e.value)
|
||||
assert new_outs[0].get_node() == model.outputs[1].get_node()
|
||||
assert new_outs[0].get_index() == model.outputs[1].get_index()
|
||||
assert new_outs[1].get_node() == model.outputs[2].get_node()
|
||||
assert new_outs[1].get_index() == model.outputs[2].get_index()
|
||||
|
||||
|
||||
def test_validate_nodes_and_infer_types():
|
||||
@ -221,41 +124,41 @@ def test_get_result_index():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu = ops.relu(param, name="relu")
|
||||
function = Model(relu, [param], "TestFunction")
|
||||
assert len(function.outputs) == 1
|
||||
assert function.get_result_index(function.outputs[0]) == 0
|
||||
model = Model(relu, [param], "TestModel")
|
||||
assert len(model.outputs) == 1
|
||||
assert model.get_result_index(model.outputs[0]) == 0
|
||||
|
||||
|
||||
def test_get_result_index_invalid():
|
||||
shape1 = PartialShape([1])
|
||||
param1 = ops.parameter(shape1, dtype=np.float32, name="data1")
|
||||
relu1 = ops.relu(param1, name="relu1")
|
||||
function = Model(relu1, [param1], "TestFunction")
|
||||
model = Model(relu1, [param1], "TestModel")
|
||||
|
||||
shape2 = PartialShape([2])
|
||||
param2 = ops.parameter(shape2, dtype=np.float32, name="data2")
|
||||
relu2 = ops.relu(param2, name="relu2")
|
||||
invalid_output = relu2.outputs()[0]
|
||||
assert len(function.outputs) == 1
|
||||
assert function.get_result_index(invalid_output) == -1
|
||||
assert len(model.outputs) == 1
|
||||
assert model.get_result_index(invalid_output) == -1
|
||||
|
||||
|
||||
def test_parameter_index():
|
||||
input_shape = PartialShape([1])
|
||||
param = ops.parameter(input_shape, dtype=np.float32, name="data")
|
||||
relu = ops.relu(param, name="relu")
|
||||
function = Model(relu, [param], "TestFunction")
|
||||
assert function.get_parameter_index(param) == 0
|
||||
model = Model(relu, [param], "TestModel")
|
||||
assert model.get_parameter_index(param) == 0
|
||||
|
||||
|
||||
def test_parameter_index_invalid():
|
||||
shape1 = PartialShape([1])
|
||||
param1 = ops.parameter(shape1, dtype=np.float32, name="data1")
|
||||
relu = ops.relu(param1, name="relu")
|
||||
function = Model(relu, [param1], "TestFunction")
|
||||
model = Model(relu, [param1], "TestModel")
|
||||
shape2 = PartialShape([2])
|
||||
param2 = ops.parameter(shape2, dtype=np.float32, name="data2")
|
||||
assert function.get_parameter_index(param2) == -1
|
||||
assert model.get_parameter_index(param2) == -1
|
||||
|
||||
|
||||
def test_replace_parameter():
|
||||
@ -265,31 +168,29 @@ def test_replace_parameter():
|
||||
param2 = ops.parameter(shape2, dtype=np.float32, name="data")
|
||||
relu = ops.relu(param1, name="relu")
|
||||
|
||||
function = Model(relu, [param1], "TestFunction")
|
||||
param_index = function.get_parameter_index(param1)
|
||||
function.replace_parameter(param_index, param2)
|
||||
assert function.get_parameter_index(param2) == param_index
|
||||
assert function.get_parameter_index(param1) == -1
|
||||
model = Model(relu, [param1], "TestModel")
|
||||
param_index = model.get_parameter_index(param1)
|
||||
model.replace_parameter(param_index, param2)
|
||||
assert model.get_parameter_index(param2) == param_index
|
||||
assert model.get_parameter_index(param1) == -1
|
||||
|
||||
|
||||
def test_evaluate():
|
||||
@pytest.mark.parametrize(("args1", "args2", "expectation", "raise_msg"), [
|
||||
(Tensor("float32", Shape([2, 1])),
|
||||
[Tensor(np.array([2, 1], dtype=np.float32).reshape(2, 1)),
|
||||
Tensor(np.array([3, 7], dtype=np.float32).reshape(2, 1))], does_not_raise(), ""),
|
||||
(Tensor("float32", Shape([2, 1])),
|
||||
[Tensor("float32", Shape([3, 1])),
|
||||
Tensor("float32", Shape([3, 1]))], pytest.raises(RuntimeError), "Cannot evaluate model!"),
|
||||
])
|
||||
def test_evaluate(args1, args2, expectation, raise_msg):
|
||||
model = generate_add_model()
|
||||
input1 = np.array([2, 1], dtype=np.float32).reshape(2, 1)
|
||||
input2 = np.array([3, 7], dtype=np.float32).reshape(2, 1)
|
||||
out_tensor = Tensor("float32", Shape([2, 1]))
|
||||
|
||||
assert model.evaluate([out_tensor], [Tensor(input1), Tensor(input2)])
|
||||
assert np.allclose(out_tensor.data, np.array([5, 8]).reshape(2, 1))
|
||||
|
||||
|
||||
def test_evaluate_invalid_input_shape():
|
||||
model = generate_add_model()
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
assert model.evaluate(
|
||||
[Tensor("float32", Shape([2, 1]))],
|
||||
[Tensor("float32", Shape([3, 1])), Tensor("float32", Shape([3, 1]))],
|
||||
)
|
||||
assert "Cannot evaluate model!" in str(e.value)
|
||||
with expectation as e:
|
||||
out_tensor = args1
|
||||
assert model.evaluate([out_tensor], args2)
|
||||
assert np.allclose(out_tensor.data, np.array([5, 8]).reshape(2, 1))
|
||||
if e is not None:
|
||||
assert raise_msg in str(e.value)
|
||||
|
||||
|
||||
def test_get_batch():
|
||||
@ -308,7 +209,7 @@ def test_get_batch_chwn():
|
||||
param3 = ops.parameter(Shape([3, 1, 3, 4]), dtype=np.float32, name="data3")
|
||||
add = ops.add(param1, param2)
|
||||
add2 = ops.add(add, param3)
|
||||
model = Model(add2, [param1, param2, param3], "TestFunction")
|
||||
model = Model(add2, [param1, param2, param3], "TestModel")
|
||||
param_method = model.get_parameters()[0]
|
||||
param_attr = model.parameters[0]
|
||||
param_method.set_layout(Layout("CHWN"))
|
||||
@ -316,7 +217,8 @@ def test_get_batch_chwn():
|
||||
assert get_batch(model) == 4
|
||||
|
||||
|
||||
def test_set_batch_dimension():
|
||||
@pytest.mark.parametrize("batch_arg", [Dimension(1), 1])
|
||||
def test_set_batch(batch_arg):
|
||||
model = generate_add_model()
|
||||
model_param1_method = model.get_parameters()[0]
|
||||
model_param2_method = model.get_parameters()[1]
|
||||
@ -327,28 +229,7 @@ def test_set_batch_dimension():
|
||||
model_param1_attr.set_layout(Layout("NC"))
|
||||
assert get_batch(model) == 2
|
||||
# set batch to 1
|
||||
set_batch(model, Dimension(1))
|
||||
assert get_batch(model) == 1
|
||||
# check if shape of param 1 has changed
|
||||
assert model_param1_method.get_output_shape(0) == PartialShape([1, 1])
|
||||
assert model_param1_attr.get_output_shape(0) == PartialShape([1, 1])
|
||||
# check if shape of param 2 has not changed
|
||||
assert model_param2_method.get_output_shape(0) == PartialShape([2, 1])
|
||||
assert model_param2_attr.get_output_shape(0) == PartialShape([2, 1])
|
||||
|
||||
|
||||
def test_set_batch_int():
|
||||
model = generate_add_model()
|
||||
model_param1_method = model.get_parameters()[0]
|
||||
model_param2_method = model.get_parameters()[1]
|
||||
model_param1_attr = model.parameters[0]
|
||||
model_param2_attr = model.parameters[1]
|
||||
# check batch == 2
|
||||
model_param1_method.set_layout(Layout("NC"))
|
||||
model_param1_attr.set_layout(Layout("NC"))
|
||||
assert get_batch(model) == 2
|
||||
# set batch to 1
|
||||
set_batch(model, 1)
|
||||
set_batch(model, batch_arg)
|
||||
assert get_batch(model) == 1
|
||||
# check if shape of param 1 has changed
|
||||
assert model_param1_method.get_output_shape(0) == PartialShape([1, 1])
|
||||
@ -511,7 +392,7 @@ def test_serialize_rt_info(request, tmp_path):
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
assert "relu_t1" in relu1.get_output_tensor(0).names
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
model = Model(relu2, [param], "TestFunction")
|
||||
model = Model(relu2, [param], "TestModel")
|
||||
|
||||
assert model is not None
|
||||
|
||||
@ -594,7 +475,7 @@ def test_serialize_complex_rt_info(request, tmp_path):
|
||||
relu1.get_output_tensor(0).set_names({"relu_t1"})
|
||||
assert "relu_t1" in relu1.get_output_tensor(0).names
|
||||
relu2 = ops.relu(relu1, name="relu2")
|
||||
model = Model(relu2, [param], "TestFunction")
|
||||
model = Model(relu2, [param], "TestModel")
|
||||
|
||||
assert model is not None
|
||||
|
||||
|
@ -38,7 +38,7 @@ from tests.utils.helpers import generate_image, generate_relu_compiled_model
|
||||
(ov.Type.i4, np.int8),
|
||||
],
|
||||
)
|
||||
def test_init_with_ngraph(ov_type, numpy_dtype):
|
||||
def test_init_with_ov_type(ov_type, numpy_dtype):
|
||||
ov_tensors = []
|
||||
ov_tensors.append(ov.Tensor(type=ov_type, shape=ov.Shape([1, 3, 32, 32])))
|
||||
ov_tensors.append(ov.Tensor(type=ov_type, shape=[1, 3, 32, 32]))
|
||||
|
@ -206,7 +206,7 @@ def generate_add_model() -> openvino._pyopenvino.Model:
|
||||
param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
|
||||
param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
|
||||
add = ops.add(param1, param2)
|
||||
return Model(add, [param1, param2], "TestFunction")
|
||||
return Model(add, [param1, param2], "TestModel")
|
||||
|
||||
|
||||
def create_filename_for_test(test_name, tmp_path, is_xml_path=False, is_bin_path=False):
|
||||
|
Loading…
Reference in New Issue
Block a user