[PyOV] Test refactoring (#12248)
This commit is contained in:
parent
4df5a104f3
commit
d7cf585485
@ -24,17 +24,17 @@ pytest tests/
|
||||
|
||||
Test framework *pytest* allows to filter tests with `-k` flag.
|
||||
```shell
|
||||
pytest tests/test_inference_engine/test_core.py -k "test_available_devices"
|
||||
pytest tests/test_runtime/test_core.py -k "test_available_devices"
|
||||
```
|
||||
|
||||
Alternatively, the full name and path to the test case could be passed.
|
||||
```shell
|
||||
pytest tests/test_inference_engine/test_core.py::test_available_devices
|
||||
pytest tests/test_runtime/test_core.py::test_available_devices
|
||||
```
|
||||
|
||||
To print test names and increase verbosity, use `-v` flag.
|
||||
```shell
|
||||
pytest tests/test_inference_engine/test_core.py -v
|
||||
pytest tests/test_runtime/test_core.py -v
|
||||
```
|
||||
*Tip: look at pytest's documentation for more useful tricks: https://docs.pytest.org/en/latest/*
|
||||
|
||||
@ -52,7 +52,7 @@ Let's add a new test for OpenVINO:tm: Python API.
|
||||
|
||||
First, the test should confirm that the new pybind11-based class of `MyTensor` is behaving correctly. Navigate to tests folder and create a new file that describes tests within it. It should be along the lines of:
|
||||
|
||||
tests/test_inference_engine/test_mytensor.py
|
||||
tests/test_runtime/test_mytensor.py
|
||||
|
||||
|
||||
**Don't forget to include license on the top of each new file!**
|
||||
@ -60,7 +60,7 @@ First, the test should confirm that the new pybind11-based class of `MyTensor` i
|
||||
Note that name of the file is connected to the class/module to be tested. This is exactly why tests are structured in folders that are describing what tests are supposed to be there. Always add tests to correct places, new folders and files should be created only when necessary. Quick overview of the structure:
|
||||
|
||||
tests/test_frontend <-- frontend manager and extensions
|
||||
tests/test_inference_engine <-- runtime classes such as Core and Tensor
|
||||
tests/test_runtime <-- runtime classes such as Core and Tensor
|
||||
tests/test_graph <-- operators and their implementation
|
||||
tests/test_onnx <-- ONNX Frontend tests and validation
|
||||
tests/test_transformations <-- optimization passes for OV Models
|
||||
@ -80,7 +80,7 @@ def test_mytensor_creation():
|
||||
|
||||
Rebuilding step is not necessary here as long as there are no updates to codebase itself. Run the test with:
|
||||
```shell
|
||||
pytest tests/test_inference_engine/test_mytensor.py -v
|
||||
pytest tests/test_runtime/test_mytensor.py -v
|
||||
```
|
||||
|
||||
In actual tests it is a good pratice to parametrize them, thus making tests compact and reducing number of handwritten test cases. Additionally, adding checks for shared functions to the basic tests is a common technique. Let's replace the test with:
|
||||
@ -98,8 +98,8 @@ def test_mytensor_creation(source):
|
||||
|
||||
Run the tests, output should be similar to:
|
||||
```shell
|
||||
tests/test_inference_engine/test_mytensor.py::test_mytensor_creation[source0] PASSED [ 50%]
|
||||
tests/test_inference_engine/test_mytensor.py::test_mytensor_creation[source1] PASSED [100%]
|
||||
tests/test_runtime/test_mytensor.py::test_mytensor_creation[source0] PASSED [ 50%]
|
||||
tests/test_runtime/test_mytensor.py::test_mytensor_creation[source1] PASSED [100%]
|
||||
```
|
||||
|
||||
Notice that the test name is shared between cases. In a real-life pull request, all of the functionalities should be tested to ensure the quality of the solution. Always focus on general usage and edge-case scenarios. On the other hand, remember that excessive testing is not advised as it may result in duplicate test cases and impact validation pipelines. A good "rule-of-thumb" list of practices while adding tests to the project is:
|
||||
|
@ -17,7 +17,7 @@ passenv =
|
||||
commands=
|
||||
{envbindir}/python setup.py bdist_wheel
|
||||
{envbindir}/pip install --no-index --pre --find-links=dist/ openvino
|
||||
pytest --backend={env:OV_BACKEND} tests -v -k 'not _cuda' --ignore=tests/test_onnx/test_zoo_models.py --ignore=tests/test_utils --ignore=tests/test_inference_engine
|
||||
pytest --backend={env:OV_BACKEND} tests -v -k 'not _cuda' --ignore=tests/test_onnx/test_zoo_models.py --ignore=tests/test_utils --ignore=tests/test_runtime
|
||||
pytest --backend={env:OV_BACKEND} tests_compatibility/test_ngraph -v -k 'not _cuda' --ignore=tests_compatibility/test_onnx/test_zoo_models.py
|
||||
|
||||
[testenv:zoo_models]
|
||||
|
@ -5,7 +5,7 @@
|
||||
import pytest
|
||||
|
||||
# test.BACKEND_NAME is a configuration variable determining which
|
||||
# nGraph backend tests will use. It's set during pytest configuration time.
|
||||
# OV backend tests will use. It's set during pytest configuration time.
|
||||
# See `pytest_configure` hook in `conftest.py` for more details.
|
||||
BACKEND_NAME = None
|
||||
|
||||
@ -28,36 +28,36 @@ xfail_accuracy = xfail_test(reason="Accuracy")
|
||||
xfail_issue_69444 = xfail_test(reason="ONNX Resize - AssertionError: Mismatched elements.")
|
||||
skip_issue_67415 = pytest.mark.skip(reason="RuntimeError: Unsupported data type for when filling blob!")
|
||||
xfail_issue_67415 = xfail_test(reason="RuntimeError: Unsupported data type for when filling blob!")
|
||||
xfail_issue_33488 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_33488 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"MaxUnpool")
|
||||
skip_issue_38084 = pytest.mark.skip(reason="Aborted (core dumped) Assertion "
|
||||
"`(layer->get_output_partial_shape(i).is_static())' failed.")
|
||||
xfail_issue_33589 = xfail_test(reason="nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_33589 = xfail_test(reason="OV does not support the following ONNX operations: "
|
||||
"IsNaN and isInf")
|
||||
xfail_issue_33595 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_33595 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"Unique")
|
||||
xfail_issue_33596 = xfail_test(reason="RuntimeError: nGraph does not support different sequence operations: "
|
||||
xfail_issue_33596 = xfail_test(reason="RuntimeError: OV does not support different sequence operations: "
|
||||
"ConcatFromSequence, SequenceConstruct, SequenceAt, SplitToSequence, "
|
||||
"SequenceEmpty, SequenceInsert, SequenceErase, SequenceLength ")
|
||||
xfail_issue_33606 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_33606 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"Det")
|
||||
xfail_issue_33651 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_33651 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"TfIdfVectorizer")
|
||||
xfail_issue_33581 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_33581 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"GatherElements")
|
||||
xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported")
|
||||
xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable")
|
||||
xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted "
|
||||
"to HardSigmoid_IE")
|
||||
xfail_issue_38091 = xfail_test(reason="AssertionError: Mismatched elements")
|
||||
xfail_issue_38699 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_38699 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"ai.onnx.preview.training.Gradient")
|
||||
xfail_issue_38701 = xfail_test(reason="RuntimeError: unsupported element type: STRING")
|
||||
xfail_issue_38706 = xfail_test(reason="RuntimeError: output_3.0 has zero dimension which is not allowed")
|
||||
xfail_issue_38708 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Slice): y>': "
|
||||
"Axes input must be constant")
|
||||
xfail_issue_38710 = xfail_test(reason="RuntimeError: data has zero dimension which is not allowed")
|
||||
xfail_issue_38713 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_38713 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"ai.onnx.preview.training.Momentum")
|
||||
xfail_issue_38724 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Resize): Y>': "
|
||||
"tf_crop_and_resize - this type of coordinate transformation mode "
|
||||
@ -66,9 +66,9 @@ xfail_issue_38724 = xfail_test(reason="RuntimeError: While validating ONNX node
|
||||
"half_pixel")
|
||||
xfail_issue_38725 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Loop): "
|
||||
"value info has no element type specified")
|
||||
xfail_issue_38734 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_38734 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"ai.onnx.preview.training.Adam")
|
||||
xfail_issue_38735 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_38735 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"ai.onnx.preview.training.Adagrad")
|
||||
xfail_issue_48052 = xfail_test(reason="Dropout op is not supported in traning mode")
|
||||
xfail_issue_45180 = xfail_test(reason="RuntimeError: Unsupported dynamic op: ReduceSum")
|
||||
@ -82,7 +82,7 @@ xfail_issue_47323 = xfail_test(reason="RuntimeError: The plugin does not support
|
||||
xfail_issue_73538 = xfail_test(reason="OneHot: Unsupported negative indices, "
|
||||
"AssertionError: Mismatched elements.")
|
||||
# Model MSFT issues:
|
||||
xfail_issue_37957 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
|
||||
xfail_issue_37957 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: "
|
||||
"com.microsoft.CropAndResize, com.microsoft.GatherND, "
|
||||
"com.microsoft.Pad, com.microsoft.Range")
|
||||
xfail_issue_39669 = xfail_test(reason="AssertionError: This model has no test data")
|
||||
@ -126,7 +126,7 @@ xfail_issue_68212 = xfail_test(reason="Unsupported reading model with bytes stre
|
||||
|
||||
xfail_issue_78843 = xfail_test(reason="Missing reference output files for ssd mobilenet models")
|
||||
|
||||
xfail_issue_81976 = xfail_test(reason="RuntimeError: z node not found in graph cache")
|
||||
xfail_issue_81976 = xfail_test(reason="RuntimeError: z node not found in OV cache")
|
||||
xfail_issue_82038 = xfail_test(reason="ScatterElements, ScatterND, AssertionError: Result mismatch")
|
||||
xfail_issue_82039 = xfail_test(reason="Unsupported data type Optional, RuntimeError: [ NOT_IMPLEMENTED ] "
|
||||
"CPU plugin: Input image format UNSPECIFIED is not supported yet...")
|
||||
|
@ -34,31 +34,31 @@ def get_runtime():
|
||||
|
||||
|
||||
class Runtime(object):
|
||||
"""Represents an nGraph runtime environment."""
|
||||
"""Represents a graph runtime environment."""
|
||||
|
||||
def __init__(self, backend_name: str) -> None:
|
||||
self.backend_name = backend_name
|
||||
log.debug(f"Creating Inference Engine for {backend_name}")
|
||||
log.debug(f"Creating runtime for {backend_name}")
|
||||
self.backend = Core()
|
||||
assert backend_name in self.backend.available_devices, 'The requested device "' + backend_name + '" is not supported!'
|
||||
|
||||
def set_config(self, config: Dict[str, str]) -> None:
|
||||
"""Set the inference engine configuration."""
|
||||
"""Set the runtime configuration."""
|
||||
self.backend.set_property(device_name=self.backend_name, properties=config)
|
||||
|
||||
def computation(self, node_or_function: Union[Node, Model], *inputs: Node) -> "Computation":
|
||||
def computation(self, node_or_model: Union[Node, Model], *inputs: Node) -> "Computation":
|
||||
"""Return a callable Computation object."""
|
||||
if isinstance(node_or_function, Node):
|
||||
ng_function = Model(node_or_function, inputs, node_or_function.name)
|
||||
return Computation(self, ng_function)
|
||||
elif isinstance(node_or_function, Model):
|
||||
return Computation(self, node_or_function)
|
||||
if isinstance(node_or_model, Node):
|
||||
model = Model(node_or_model, inputs, node_or_model.name)
|
||||
return Computation(self, model)
|
||||
elif isinstance(node_or_model, Model):
|
||||
return Computation(self, node_or_model)
|
||||
else:
|
||||
raise TypeError(
|
||||
"Runtime.computation must be called with an OpenVINO Model object "
|
||||
"or an OpenVINO node object an optionally Parameter node objects. "
|
||||
"Called with: %s",
|
||||
node_or_function,
|
||||
node_or_model,
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
@ -66,13 +66,13 @@ class Runtime(object):
|
||||
|
||||
|
||||
class Computation(object):
|
||||
"""nGraph callable computation object."""
|
||||
"""Graph callable computation object."""
|
||||
|
||||
def __init__(self, runtime: Runtime, ng_function: Model) -> None:
|
||||
def __init__(self, runtime: Runtime, model: Model) -> None:
|
||||
self.runtime = runtime
|
||||
self.function = ng_function
|
||||
self.parameters = ng_function.get_parameters()
|
||||
self.results = ng_function.get_results()
|
||||
self.model = model
|
||||
self.parameters = model.get_parameters()
|
||||
self.results = model.get_results()
|
||||
self.network_cache = {}
|
||||
|
||||
def convert_buffers(self, source_buffers, target_dtypes):
|
||||
@ -101,7 +101,7 @@ class Computation(object):
|
||||
|
||||
def __repr__(self) -> str:
|
||||
params_string = ", ".join([param.name for param in self.parameters])
|
||||
return f"<Computation: {self.function.get_name()}({params_string})>"
|
||||
return f"<Computation: {self.model.get_name()}({params_string})>"
|
||||
|
||||
def __call__(self, *input_values: NumericData) -> List[NumericData]:
|
||||
"""Run computation on input values and return result."""
|
||||
@ -116,12 +116,12 @@ class Computation(object):
|
||||
param_names = [param.friendly_name for param in self.parameters]
|
||||
input_shapes = [get_shape(input_value) for input_value in input_values]
|
||||
if self.network_cache.get(str(input_shapes)) is None:
|
||||
function = self.function
|
||||
self.network_cache[str(input_shapes)] = function
|
||||
model = self.model
|
||||
self.network_cache[str(input_shapes)] = model
|
||||
else:
|
||||
function = self.network_cache[str(input_shapes)]
|
||||
model = self.network_cache[str(input_shapes)]
|
||||
|
||||
compiled_model = self.runtime.backend.compile_model(function, self.runtime.backend_name)
|
||||
compiled_model = self.runtime.backend.compile_model(model, self.runtime.backend_name)
|
||||
is_bfloat16 = any(parameter.get_output_element_type(0) == Type.bf16 for parameter in self.parameters)
|
||||
if is_bfloat16:
|
||||
input_values = self.convert_to_tensors(input_values)
|
||||
|
@ -142,9 +142,9 @@ def create_onnx_model_for_op_extension():
|
||||
return make_model(graph, producer_name="ONNX Frontend")
|
||||
|
||||
|
||||
def run_function(function, *inputs, expected):
|
||||
def run_model(model, *inputs, expected):
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(function)
|
||||
computation = runtime.computation(model)
|
||||
actual = computation(*inputs)
|
||||
assert len(actual) == len(expected)
|
||||
for i in range(len(actual)):
|
||||
@ -194,13 +194,13 @@ def test_convert():
|
||||
model = fe.load(onnx_model_filename)
|
||||
assert model
|
||||
|
||||
function = fe.convert(model)
|
||||
assert function
|
||||
converted_model = fe.convert(model)
|
||||
assert converted_model
|
||||
|
||||
input_1 = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
input_2 = np.array([[2, 3], [4, 5]], dtype=np.float32)
|
||||
expected = np.array([[1.5, 5], [10.5, 18]], dtype=np.float32)
|
||||
run_function(function, input_1, input_2, expected=[expected])
|
||||
run_model(converted_model, input_1, input_2, expected=[expected])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("model_filename", "inputs", "expected"), [
|
||||
@ -223,19 +223,19 @@ def test_decode_and_convert(model_filename, inputs, expected):
|
||||
model = fe.load(model_filename)
|
||||
assert model
|
||||
|
||||
decoded_function = fe.decode(model)
|
||||
assert decoded_function
|
||||
decoded_model = fe.decode(model)
|
||||
assert decoded_model
|
||||
|
||||
for op in decoded_function.get_ordered_ops():
|
||||
for op in decoded_model.get_ordered_ops():
|
||||
assert op.get_type_name() in ["Parameter", "Constant", "ONNXFrameworkNode",
|
||||
"ONNXSubgraphFrameworkNode", "Result"]
|
||||
|
||||
fe.convert(decoded_function)
|
||||
assert decoded_function
|
||||
for op in decoded_function.get_ordered_ops():
|
||||
fe.convert(decoded_model)
|
||||
assert decoded_model
|
||||
for op in decoded_model.get_ordered_ops():
|
||||
assert op.get_type_name() not in ["ONNXFrameworkNode", "ONNXSubgraphFrameworkNode"]
|
||||
|
||||
run_function(decoded_function, *inputs, expected=[expected])
|
||||
run_model(decoded_model, *inputs, expected=[expected])
|
||||
|
||||
|
||||
def test_load_by_model():
|
||||
@ -490,18 +490,18 @@ def test_op_extension_specify_opset(opset_prefix):
|
||||
from openvino.frontend.onnx import OpExtension
|
||||
from openvino.runtime import Core
|
||||
|
||||
ie = Core()
|
||||
core = Core()
|
||||
|
||||
# check the model is valid
|
||||
model = ie.read_model(onnx_model_for_op_extension_test)
|
||||
model = core.read_model(onnx_model_for_op_extension_test)
|
||||
assert model
|
||||
|
||||
# add extensions
|
||||
fw_operation = "Floor"
|
||||
ov_operation = opset_prefix + fw_operation
|
||||
ie.add_extension(OpExtension(ov_operation, fw_operation))
|
||||
core.add_extension(OpExtension(ov_operation, fw_operation))
|
||||
|
||||
model = ie.read_model(onnx_model_for_op_extension_test)
|
||||
model = core.read_model(onnx_model_for_op_extension_test)
|
||||
assert model
|
||||
|
||||
|
||||
@ -513,14 +513,14 @@ def test_op_extension_specify_wrong_opset(opset_prefix):
|
||||
from openvino.frontend.onnx import OpExtension
|
||||
from openvino.runtime import Core
|
||||
|
||||
ie = Core()
|
||||
core = Core()
|
||||
|
||||
# add extensions
|
||||
fw_operation = "Floor"
|
||||
ov_operation = opset_prefix + fw_operation
|
||||
ie.add_extension(OpExtension(ov_operation, fw_operation))
|
||||
core.add_extension(OpExtension(ov_operation, fw_operation))
|
||||
with pytest.raises(RuntimeError):
|
||||
ie.read_model(onnx_model_for_op_extension_test)
|
||||
core.read_model(onnx_model_for_op_extension_test)
|
||||
|
||||
|
||||
def test_op_extension_via_onnx_extension_set_attrs_values():
|
||||
@ -530,19 +530,19 @@ def test_op_extension_via_onnx_extension_set_attrs_values():
|
||||
from openvino.frontend.onnx import OpExtension
|
||||
from openvino.runtime import Core
|
||||
|
||||
ie = Core()
|
||||
core = Core()
|
||||
|
||||
# check the model is valid
|
||||
model = ie.read_model(onnx_model_for_op_extension_test)
|
||||
model = core.read_model(onnx_model_for_op_extension_test)
|
||||
assert model
|
||||
|
||||
# add extensions
|
||||
ie.add_extension(OpExtension("Multiply", "Mul", {}, {"auto_broadcast": "numpy"}))
|
||||
ie.add_extension(OpExtension("Elu", {}, {"alpha": 1.}))
|
||||
ie.add_extension(OpExtension("Floor"))
|
||||
ie.add_extension(OpExtension("Concat", {}, {"axis": 0}))
|
||||
ie.add_extension(OpExtension("Convert", "Cast", {}, {"destination_type": "i64"}))
|
||||
ie.add_extension(OpExtension("AvgPool", "AveragePool", {}, {"kernel": [2, 2],
|
||||
core.add_extension(OpExtension("Multiply", "Mul", {}, {"auto_broadcast": "numpy"}))
|
||||
core.add_extension(OpExtension("Elu", {}, {"alpha": 1.}))
|
||||
core.add_extension(OpExtension("Floor"))
|
||||
core.add_extension(OpExtension("Concat", {}, {"axis": 0}))
|
||||
core.add_extension(OpExtension("Convert", "Cast", {}, {"destination_type": "i64"}))
|
||||
core.add_extension(OpExtension("AvgPool", "AveragePool", {}, {"kernel": [2, 2],
|
||||
"strides": [2, 2],
|
||||
"pads_begin": [0, 0],
|
||||
"pads_end": [1, 1],
|
||||
@ -550,7 +550,7 @@ def test_op_extension_via_onnx_extension_set_attrs_values():
|
||||
"auto_pad": "same_upper",
|
||||
"rounding_type": "floor"}))
|
||||
|
||||
model = ie.read_model(onnx_model_for_op_extension_test)
|
||||
model = core.read_model(onnx_model_for_op_extension_test)
|
||||
assert model
|
||||
|
||||
|
||||
@ -561,18 +561,18 @@ def test_op_extension_via_frontend_extension_set_attrs_values():
|
||||
from openvino.frontend import OpExtension
|
||||
from openvino.runtime import Core
|
||||
|
||||
ie = Core()
|
||||
core = Core()
|
||||
# check the model is valid
|
||||
model = ie.read_model(onnx_model_for_op_extension_test)
|
||||
model = core.read_model(onnx_model_for_op_extension_test)
|
||||
assert model
|
||||
|
||||
# add extensions
|
||||
ie.add_extension(OpExtension("Multiply", "Mul", {}, {"auto_broadcast": "numpy"}))
|
||||
ie.add_extension(OpExtension("Elu", "Elu", {}, {"alpha": 1.}))
|
||||
ie.add_extension(OpExtension("Floor"))
|
||||
ie.add_extension(OpExtension("Concat", {}, {"axis": 0}))
|
||||
ie.add_extension(OpExtension("Convert", "Cast", {}, {"destination_type": "i64"}))
|
||||
ie.add_extension(OpExtension("AvgPool", "AveragePool", {}, {"kernel": [2, 2],
|
||||
core.add_extension(OpExtension("Multiply", "Mul", {}, {"auto_broadcast": "numpy"}))
|
||||
core.add_extension(OpExtension("Elu", "Elu", {}, {"alpha": 1.}))
|
||||
core.add_extension(OpExtension("Floor"))
|
||||
core.add_extension(OpExtension("Concat", {}, {"axis": 0}))
|
||||
core.add_extension(OpExtension("Convert", "Cast", {}, {"destination_type": "i64"}))
|
||||
core.add_extension(OpExtension("AvgPool", "AveragePool", {}, {"kernel": [2, 2],
|
||||
"strides": [2, 2],
|
||||
"pads_begin": [0, 0],
|
||||
"pads_end": [1, 1],
|
||||
@ -580,7 +580,7 @@ def test_op_extension_via_frontend_extension_set_attrs_values():
|
||||
"auto_pad": "same_upper",
|
||||
"rounding_type": "floor"}))
|
||||
|
||||
model = ie.read_model(onnx_model_for_op_extension_test)
|
||||
model = core.read_model(onnx_model_for_op_extension_test)
|
||||
assert model
|
||||
|
||||
|
||||
@ -591,16 +591,16 @@ def test_op_extension_via_frontend_extension_map_attributes():
|
||||
from openvino.frontend import OpExtension
|
||||
from openvino.runtime import Core
|
||||
|
||||
ie = Core()
|
||||
core = Core()
|
||||
# check the model is valid
|
||||
model = ie.read_model(onnx_model_for_op_extension_test)
|
||||
model = core.read_model(onnx_model_for_op_extension_test)
|
||||
assert model
|
||||
|
||||
# add extensions
|
||||
ie.add_extension(OpExtension("Elu", "Elu", {"alpha": "alpha"}))
|
||||
ie.add_extension(OpExtension("Concat", {"axis": "axis"}, {"axis": 0}))
|
||||
core.add_extension(OpExtension("Elu", "Elu", {"alpha": "alpha"}))
|
||||
core.add_extension(OpExtension("Concat", {"axis": "axis"}, {"axis": 0}))
|
||||
|
||||
ie.add_extension(OpExtension("AvgPool", "AveragePool", {"kernel": "kernel_shape",
|
||||
core.add_extension(OpExtension("AvgPool", "AveragePool", {"kernel": "kernel_shape",
|
||||
"strides": "strides",
|
||||
"auto_pad": "auto_pad"},
|
||||
{"pads_begin": [0, 0],
|
||||
@ -608,7 +608,7 @@ def test_op_extension_via_frontend_extension_map_attributes():
|
||||
"exclude-pad": True,
|
||||
"rounding_type": "floor"}))
|
||||
|
||||
model = ie.read_model(onnx_model_for_op_extension_test)
|
||||
model = core.read_model(onnx_model_for_op_extension_test)
|
||||
assert model
|
||||
|
||||
|
||||
|
@ -372,7 +372,7 @@ def skip_if_onnx_frontend_is_disabled():
|
||||
|
||||
# Function to compare ng Functions (ops names, types and shapes).
|
||||
# Note that the functions uses get_ordered_ops, so the topological order of ops should be also preserved.
|
||||
def compare_functions(current, expected): # noqa: C901 the function is too complex
|
||||
def compare_models(current, expected): # noqa: C901 the function is too complex
|
||||
result = True
|
||||
msg = ""
|
||||
if current.get_friendly_name() != expected.get_friendly_name():
|
||||
@ -428,12 +428,11 @@ def test_extract_subgraph():
|
||||
place2 = model.get_place_by_tensor_name(tensor_name="add_out").get_input_port(input_port_index=1) # in2
|
||||
place3 = model.get_place_by_tensor_name(tensor_name="add_out")
|
||||
model.extract_subgraph(inputs=[place1, place2], outputs=[place3])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("extract_subgraph.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("extract_subgraph.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -448,12 +447,11 @@ def test_extract_subgraph_2():
|
||||
place1 = model.get_place_by_tensor_name(tensor_name="add_out")
|
||||
place2 = model.get_place_by_tensor_name(tensor_name="out3")
|
||||
model.extract_subgraph(inputs=[], outputs=[place1, place2])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("extract_subgraph_2.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("extract_subgraph_2.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -469,12 +467,11 @@ def test_extract_subgraph_3():
|
||||
place2 = model.get_place_by_tensor_name(tensor_name="out1")
|
||||
place3 = model.get_place_by_tensor_name(tensor_name="out2")
|
||||
model.extract_subgraph(inputs=[place1], outputs=[place2, place3])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("extract_subgraph_3.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("extract_subgraph_3.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -496,10 +493,9 @@ def test_extract_subgraph_4():
|
||||
model.extract_subgraph(inputs=[place1, place2, place3], outputs=[place4, place5, place6])
|
||||
result_func = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("extract_subgraph_4.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("extract_subgraph_4.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_func, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -518,12 +514,11 @@ def test_extract_subgraph_by_op_place_as_input():
|
||||
out2 = model.get_place_by_tensor_name(tensor_name="out2")
|
||||
|
||||
model.extract_subgraph(inputs=[split_op, mul_op], outputs=[out1, out2, out4])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("extract_subgraph_4.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("extract_subgraph_4.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -541,12 +536,11 @@ def test_extract_subgraph_by_op_place_as_output():
|
||||
add_op = add_out_tensor.get_producing_operation()
|
||||
|
||||
model.extract_subgraph(inputs=[in1_tensor, in2_tensor], outputs=[add_op])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("extract_subgraph_5.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("extract_subgraph_5.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -563,12 +557,11 @@ def test_extract_subgraph_by_op_place_as_output_2():
|
||||
mul_op = out4.get_producing_operation()
|
||||
|
||||
model.extract_subgraph(inputs=[split_op, mul_op], outputs=[])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("test_override_all_inputs.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("test_override_all_inputs.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -587,12 +580,11 @@ def test_extract_subgraph_by_port_place_as_output():
|
||||
in2_tensor = model.get_place_by_tensor_name(tensor_name="in2")
|
||||
|
||||
model.extract_subgraph(inputs=[in1_tensor, in2_tensor], outputs=[add_op_out_port])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("extract_subgraph.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("extract_subgraph.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -607,12 +599,11 @@ def test_override_all_outputs():
|
||||
place1 = model.get_place_by_tensor_name(tensor_name="add_out")
|
||||
place2 = model.get_place_by_tensor_name(tensor_name="out3")
|
||||
model.override_all_outputs(outputs=[place1, place2])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("test_override_all_outputs.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("test_override_all_outputs.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -626,12 +617,11 @@ def test_override_all_outputs_2():
|
||||
|
||||
place1 = model.get_place_by_tensor_name(tensor_name="out4")
|
||||
model.override_all_outputs(outputs=[place1])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("test_override_all_outputs_2.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("test_override_all_outputs_2.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -646,12 +636,11 @@ def test_override_all_outputs_3():
|
||||
place1 = model.get_place_by_tensor_name(tensor_name="out1")
|
||||
place2 = model.get_place_by_tensor_name(tensor_name="out1")
|
||||
model.override_all_outputs(outputs=[place1, place2])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("test_override_all_outputs_3.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("test_override_all_outputs_3.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -670,12 +659,11 @@ def test_override_all_outputs_invalid_place():
|
||||
place1 = model.get_place_by_tensor_name(tensor_name="out1")
|
||||
place2 = model.get_place_by_tensor_name(tensor_name="out1")
|
||||
model.override_all_outputs(outputs=[place1, place2, invalid_place])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("test_override_all_outputs_3.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("test_override_all_outputs_3.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -694,12 +682,11 @@ def test_override_all_inputs():
|
||||
place3 = out4_tensor.get_producing_operation().get_input_port(input_port_index=1)
|
||||
place4 = model.get_place_by_tensor_name(tensor_name="in3")
|
||||
model.override_all_inputs(inputs=[place1, place2, place3, place4])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("test_override_all_inputs.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("test_override_all_inputs.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -721,12 +708,11 @@ def test_override_all_inputs_invalid_place():
|
||||
place1 = out1_tensor.get_producing_operation().get_input_port(input_port_index=0)
|
||||
place2 = out1_tensor.get_producing_operation().get_input_port(input_port_index=1)
|
||||
model.override_all_inputs(inputs=[place1, place2, invalid_place])
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("input_model_3.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("input_model_3.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -774,12 +760,11 @@ def test_set_partial_shape():
|
||||
model.set_partial_shape(place2, PartialShape([8, 16]))
|
||||
place3 = model.get_place_by_tensor_name(tensor_name="in3")
|
||||
model.set_partial_shape(place3, PartialShape([4, 6]))
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
expected_model = fe.load("test_partial_shape.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
expected_model = fe.convert(fe.load("test_partial_shape.onnx"))
|
||||
|
||||
res = compare_functions(result_func, expected_func)
|
||||
res = compare_models(result_model, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -1002,14 +987,14 @@ def test_add_output_place_is_output():
|
||||
model = fe.load("input_model.onnx")
|
||||
assert model
|
||||
|
||||
orig_func = fe.convert(model)
|
||||
orig_model = fe.convert(model)
|
||||
|
||||
place = model.get_place_by_tensor_name(tensor_name="out1")
|
||||
model.add_output(place)
|
||||
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
res = compare_functions(orig_func, result_func)
|
||||
res = compare_models(orig_model, result_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -1021,14 +1006,13 @@ def test_add_output_place_is_input():
|
||||
model = fe.load("input_model.onnx")
|
||||
assert model
|
||||
|
||||
orig_model = fe.convert(model)
|
||||
|
||||
place = model.get_place_by_tensor_name(tensor_name="in1")
|
||||
model.add_output(place)
|
||||
result_func = fe.convert(model)
|
||||
result_model = fe.convert(model)
|
||||
|
||||
orig_model = fe.load("input_model.onnx")
|
||||
orig_func = fe.convert(orig_model)
|
||||
|
||||
res = compare_functions(orig_func, result_func)
|
||||
res = compare_models(orig_model, result_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -1238,11 +1222,10 @@ def test_remove_output():
|
||||
place = model.get_place_by_tensor_name(tensor_name="out4")
|
||||
model.remove_output(place)
|
||||
|
||||
expected_model = fe.load("remove_output.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
model_func = fe.convert(model)
|
||||
expected_model = fe.convert(fe.load("remove_output.onnx"))
|
||||
model_converted = fe.convert(model)
|
||||
|
||||
res = compare_functions(model_func, expected_func)
|
||||
res = compare_models(model_converted, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -1257,11 +1240,10 @@ def test_remove_output_when_place_is_input():
|
||||
place = model.get_place_by_tensor_name(tensor_name="in1")
|
||||
model.remove_output(place)
|
||||
|
||||
expected_model = fe.load("input_model.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
model_func = fe.convert(model)
|
||||
expected_model = fe.convert(fe.load("input_model.onnx"))
|
||||
model_converted = fe.convert(model)
|
||||
|
||||
res = compare_functions(model_func, expected_func)
|
||||
res = compare_models(model_converted, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -1307,11 +1289,10 @@ def test_cut_and_add_new_input_place():
|
||||
|
||||
model.cut_and_add_new_input(place, "new_input")
|
||||
|
||||
expected_model = fe.load("cut_and_add_new_input_place.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
model_func = fe.convert(model)
|
||||
expected_model = fe.convert(fe.load("cut_and_add_new_input_place.onnx"))
|
||||
model_converted = fe.convert(model)
|
||||
|
||||
res = compare_functions(model_func, expected_func)
|
||||
res = compare_models(model_converted, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -1329,11 +1310,10 @@ def test_cut_and_add_new_input_edge():
|
||||
|
||||
model.cut_and_add_new_input(edge_mul0, "new_input")
|
||||
|
||||
expected_model = fe.load("cut_and_add_new_input_edge.onnx")
|
||||
expected_func = fe.convert(expected_model)
|
||||
model_func = fe.convert(model)
|
||||
expected_model = fe.convert(fe.load("cut_and_add_new_input_edge.onnx"))
|
||||
model_converted = fe.convert(model)
|
||||
|
||||
res = compare_functions(model_func, expected_func)
|
||||
res = compare_models(model_converted, expected_model)
|
||||
assert res
|
||||
|
||||
|
||||
@ -1350,10 +1330,10 @@ def test_set_tensor_value():
|
||||
place1 = model.get_place_by_tensor_name(tensor_name="in1")
|
||||
model.set_tensor_value(place1, new_values)
|
||||
|
||||
model_func = fe.convert(model)
|
||||
model_converted = fe.convert(model)
|
||||
|
||||
iteration = None
|
||||
current_ops = model_func.get_ordered_ops()
|
||||
current_ops = model_converted.get_ordered_ops()
|
||||
|
||||
for i in range(len(current_ops)):
|
||||
if (current_ops[i].get_friendly_name() == "in1"):
|
||||
|
@ -18,10 +18,10 @@ from openvino.pyopenvino import DescriptorTensor
|
||||
from openvino.runtime.op import Parameter
|
||||
from tests.runtime import get_runtime
|
||||
from openvino.runtime.utils.types import get_dtype
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
def test_ngraph_function_api():
|
||||
def test_graph_function_api():
|
||||
shape = [2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_b = ops.parameter(shape, dtype=Type.f32, name="B")
|
@ -6,8 +6,8 @@ import numpy as np
|
||||
|
||||
import openvino.runtime.opset8 as ov
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.test_ops import convolution2d
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.test_ops import convolution2d
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
def test_convolution_2d():
|
@ -7,7 +7,7 @@ import numpy as np
|
||||
import openvino.runtime.opset8 as ov
|
||||
from openvino.runtime import Type, Shape
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
def test_reverse_sequence():
|
@ -33,15 +33,15 @@ def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype,
|
||||
atol = 0.0 if np.issubdtype(data_type, np.integer) else 1e-04
|
||||
|
||||
# generate input tensors
|
||||
ng_inputs = []
|
||||
graph_inputs = []
|
||||
np_inputs = []
|
||||
for i in range(num_inputs):
|
||||
input_i = np.random.randint(1, 10 + 1, size=input_shapes[i]).astype(data_type)
|
||||
np_inputs.append(input_i)
|
||||
ng_inputs.append(ov.parameter(input_i.shape, dtype=data_type))
|
||||
graph_inputs.append(ov.parameter(input_i.shape, dtype=data_type))
|
||||
|
||||
expected_result = np.einsum(equation, *np_inputs)
|
||||
einsum_model = ov.einsum(ng_inputs, equation)
|
||||
einsum_model = ov.einsum(graph_inputs, equation)
|
||||
|
||||
# check the output shape and type
|
||||
assert einsum_model.get_type_name() == "Einsum"
|
||||
@ -51,7 +51,7 @@ def einsum_op_exec(input_shapes: list, equation: str, data_type: np.dtype,
|
||||
|
||||
# check inference result
|
||||
if with_value:
|
||||
computation = runtime.computation(einsum_model, *ng_inputs)
|
||||
computation = runtime.computation(einsum_model, *graph_inputs)
|
||||
actual_result = computation(*np_inputs)
|
||||
np.allclose(actual_result, expected_result, atol=atol)
|
||||
|
@ -5,7 +5,7 @@
|
||||
import openvino.runtime.opset8 as ov
|
||||
import numpy as np
|
||||
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
def test_gather():
|
@ -12,26 +12,26 @@ import pytest
|
||||
import openvino.runtime.opset8 as ov
|
||||
from openvino.runtime import Model, PartialShape, Shape
|
||||
from openvino.runtime.passes import Manager
|
||||
from tests.test_ngraph.util import count_ops_of_type
|
||||
from tests.test_graph.util import count_ops_of_type
|
||||
from openvino.runtime import Core
|
||||
|
||||
|
||||
def test_constant_folding():
|
||||
node_constant = ov.constant(np.array([[0.0, 0.1, -0.1], [-2.5, 2.5, 3.0]], dtype=np.float32))
|
||||
node_ceil = ov.ceiling(node_constant)
|
||||
func = Model(node_ceil, [], "TestFunction")
|
||||
model = Model(node_ceil, [], "TestFunction")
|
||||
|
||||
assert count_ops_of_type(func, node_ceil) == 1
|
||||
assert count_ops_of_type(func, node_constant) == 1
|
||||
assert count_ops_of_type(model, node_ceil) == 1
|
||||
assert count_ops_of_type(model, node_constant) == 1
|
||||
|
||||
pass_manager = Manager()
|
||||
pass_manager.register_pass("ConstantFolding")
|
||||
pass_manager.run_passes(func)
|
||||
pass_manager.run_passes(model)
|
||||
|
||||
assert count_ops_of_type(func, node_ceil) == 0
|
||||
assert count_ops_of_type(func, node_constant) == 1
|
||||
assert count_ops_of_type(model, node_ceil) == 0
|
||||
assert count_ops_of_type(model, node_constant) == 1
|
||||
|
||||
new_const = func.get_results()[0].input(0).get_source_output().get_node()
|
||||
new_const = model.get_results()[0].input(0).get_source_output().get_node()
|
||||
values_out = new_const.get_vector()
|
||||
values_expected = [0.0, 1.0, 0.0, -2.0, 3.0, 3.0]
|
||||
|
||||
@ -53,10 +53,10 @@ def test_serialize_seperate_paths_kwargs():
|
||||
pass_manager.register_pass(pass_name="Serialize", xml_path=xml_path, bin_path=bin_path)
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -77,10 +77,10 @@ def test_serialize_seperate_paths_args():
|
||||
pass_manager.register_pass("Serialize", xml_path, bin_path)
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -100,10 +100,10 @@ def test_serialize_pass_mixed_args_kwargs():
|
||||
pass_manager.register_pass("Serialize", xml_path, bin_path=bin_path)
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -124,10 +124,10 @@ def test_serialize_pass_mixed_args_kwargs_v2():
|
||||
pass_manager.register_pass("Serialize", xml_path=xml_path, bin_path=bin_path)
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -155,9 +155,9 @@ def test_serialize_results():
|
||||
pass_manager.register_pass("Serialize", xml_path=xml_path, bin_path=bin_path)
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
const = func.get_results()[0].input(0).get_source_output().get_node()
|
||||
new_const = res_func.get_results()[0].input(0).get_source_output().get_node()
|
||||
new_const = res_model.get_results()[0].input(0).get_source_output().get_node()
|
||||
|
||||
assert const == new_const
|
||||
|
||||
@ -180,10 +180,10 @@ def test_serialize_pass_tuple():
|
||||
pass_manager.register_pass("Serialize", output_files=(xml_path, bin_path))
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -204,10 +204,10 @@ def test_default_version():
|
||||
pass_manager.register_pass("Serialize", output_files=(xml_path, bin_path))
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -228,10 +228,10 @@ def test_default_version_IR_V11_tuple():
|
||||
pass_manager.register_pass("Serialize", output_files=(xml_path, bin_path), version="IR_V11")
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -252,10 +252,10 @@ def test_default_version_IR_V11_seperate_paths():
|
||||
pass_manager.register_pass("Serialize", xml_path=xml_path, bin_path=bin_path, version="IR_V11")
|
||||
pass_manager.run_passes(func)
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
@ -6,7 +6,7 @@ import numpy as np
|
||||
|
||||
import openvino.runtime.opset8 as ov
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
def test_lrn():
|
@ -9,11 +9,11 @@ import pytest
|
||||
|
||||
import openvino.runtime.opset8 as ov
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_helper", "numpy_function"),
|
||||
("graph_api_helper", "numpy_function"),
|
||||
[
|
||||
(ov.add, np.add),
|
||||
(ov.divide, np.divide),
|
||||
@ -30,14 +30,14 @@ from tests.test_ngraph.util import run_op_node
|
||||
(ov.less_equal, np.less_equal),
|
||||
],
|
||||
)
|
||||
def test_binary_op(ng_api_helper, numpy_function):
|
||||
def test_binary_op(graph_api_helper, numpy_function):
|
||||
runtime = get_runtime()
|
||||
|
||||
shape = [2, 2]
|
||||
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
|
||||
parameter_b = ov.parameter(shape, name="B", dtype=np.float32)
|
||||
|
||||
model = ng_api_helper(parameter_a, parameter_b)
|
||||
model = graph_api_helper(parameter_a, parameter_b)
|
||||
computation = runtime.computation(model, parameter_a, parameter_b)
|
||||
|
||||
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
@ -49,7 +49,7 @@ def test_binary_op(ng_api_helper, numpy_function):
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_helper", "numpy_function"),
|
||||
("graph_api_helper", "numpy_function"),
|
||||
[
|
||||
(ov.add, np.add),
|
||||
(ov.divide, np.divide),
|
||||
@ -66,7 +66,7 @@ def test_binary_op(ng_api_helper, numpy_function):
|
||||
(ov.less_equal, np.less_equal),
|
||||
],
|
||||
)
|
||||
def test_binary_op_with_scalar(ng_api_helper, numpy_function):
|
||||
def test_binary_op_with_scalar(graph_api_helper, numpy_function):
|
||||
runtime = get_runtime()
|
||||
|
||||
value_a = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
@ -75,7 +75,7 @@ def test_binary_op_with_scalar(ng_api_helper, numpy_function):
|
||||
shape = [2, 2]
|
||||
parameter_a = ov.parameter(shape, name="A", dtype=np.float32)
|
||||
|
||||
model = ng_api_helper(parameter_a, value_b)
|
||||
model = graph_api_helper(parameter_a, value_b)
|
||||
computation = runtime.computation(model, parameter_a)
|
||||
|
||||
result = computation(value_a)
|
||||
@ -84,17 +84,17 @@ def test_binary_op_with_scalar(ng_api_helper, numpy_function):
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_helper", "numpy_function"),
|
||||
("graph_api_helper", "numpy_function"),
|
||||
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
|
||||
)
|
||||
def test_binary_logical_op(ng_api_helper, numpy_function):
|
||||
def test_binary_logical_op(graph_api_helper, numpy_function):
|
||||
runtime = get_runtime()
|
||||
|
||||
shape = [2, 2]
|
||||
parameter_a = ov.parameter(shape, name="A", dtype=np.bool)
|
||||
parameter_b = ov.parameter(shape, name="B", dtype=np.bool)
|
||||
|
||||
model = ng_api_helper(parameter_a, parameter_b)
|
||||
model = graph_api_helper(parameter_a, parameter_b)
|
||||
computation = runtime.computation(model, parameter_a, parameter_b)
|
||||
|
||||
value_a = np.array([[True, False], [False, True]], dtype=np.bool)
|
||||
@ -106,10 +106,10 @@ def test_binary_logical_op(ng_api_helper, numpy_function):
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_helper", "numpy_function"),
|
||||
("graph_api_helper", "numpy_function"),
|
||||
[(ov.logical_and, np.logical_and), (ov.logical_or, np.logical_or), (ov.logical_xor, np.logical_xor)],
|
||||
)
|
||||
def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function):
|
||||
def test_binary_logical_op_with_scalar(graph_api_helper, numpy_function):
|
||||
runtime = get_runtime()
|
||||
|
||||
value_a = np.array([[True, False], [False, True]], dtype=np.bool)
|
||||
@ -118,7 +118,7 @@ def test_binary_logical_op_with_scalar(ng_api_helper, numpy_function):
|
||||
shape = [2, 2]
|
||||
parameter_a = ov.parameter(shape, name="A", dtype=np.bool)
|
||||
|
||||
model = ng_api_helper(parameter_a, value_b)
|
||||
model = graph_api_helper(parameter_a, value_b)
|
||||
computation = runtime.computation(model, parameter_a)
|
||||
|
||||
result = computation(value_a)
|
@ -6,7 +6,7 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
import openvino.runtime.opset8 as ov
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
@ -1,4 +1,4 @@
|
||||
# -*- coding: utf-8 -*-/home/bszmelcz/openvino/src/bindings/python/tests/test_ngraph/test_ops_util_variable.py
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2018-2022 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
@ -7,7 +7,7 @@ import numpy as np
|
||||
import pytest
|
||||
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node, run_op_numeric_data
|
||||
from tests.test_graph.util import run_op_node, run_op_numeric_data
|
||||
|
||||
|
||||
def test_concat():
|
@ -8,13 +8,13 @@ import pytest
|
||||
import openvino.runtime.opset9 as ov
|
||||
from openvino.runtime import Shape, Type
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
R_TOLERANCE = 1e-6 # global relative tolerance
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_fn", "numpy_fn", "range_start", "range_end"),
|
||||
("graph_api_fn", "numpy_fn", "range_start", "range_end"),
|
||||
[
|
||||
(ov.absolute, np.abs, -1, 1),
|
||||
(ov.abs, np.abs, -1, 1),
|
||||
@ -40,17 +40,17 @@ R_TOLERANCE = 1e-6 # global relative tolerance
|
||||
(ov.tanh, np.tanh, -100.0, 100.0),
|
||||
],
|
||||
)
|
||||
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
|
||||
def test_unary_op_array(graph_api_fn, numpy_fn, range_start, range_end):
|
||||
np.random.seed(133391)
|
||||
input_data = (range_start + np.random.rand(2, 3, 4) * (range_end - range_start)).astype(np.float32)
|
||||
expected = numpy_fn(input_data)
|
||||
|
||||
result = run_op_node([input_data], ng_api_fn)
|
||||
result = run_op_node([input_data], graph_api_fn)
|
||||
assert np.allclose(result, expected, rtol=0.001)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_fn", "numpy_fn", "input_data"),
|
||||
("graph_api_fn", "numpy_fn", "input_data"),
|
||||
[
|
||||
pytest.param(ov.absolute, np.abs, np.float32(-3)),
|
||||
pytest.param(ov.abs, np.abs, np.float32(-3)),
|
||||
@ -73,10 +73,10 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
|
||||
pytest.param(ov.tanh, np.tanh, np.float32(0.1234)),
|
||||
],
|
||||
)
|
||||
def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
|
||||
def test_unary_op_scalar(graph_api_fn, numpy_fn, input_data):
|
||||
expected = numpy_fn(input_data)
|
||||
|
||||
result = run_op_node([input_data], ng_api_fn)
|
||||
result = run_op_node([input_data], graph_api_fn)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
@ -14,7 +14,7 @@ from tests.runtime import get_runtime
|
||||
from openvino.preprocess import PrePostProcessor, ColorFormat, ResizeAlgorithm
|
||||
|
||||
|
||||
def test_ngraph_preprocess_mean():
|
||||
def test_graph_preprocess_mean():
|
||||
shape = [2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
@ -35,7 +35,7 @@ def test_ngraph_preprocess_mean():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_mean_vector():
|
||||
def test_graph_preprocess_mean_vector():
|
||||
shape = [2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
@ -56,7 +56,7 @@ def test_ngraph_preprocess_mean_vector():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_scale_vector():
|
||||
def test_graph_preprocess_scale_vector():
|
||||
shape = [2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
@ -78,7 +78,7 @@ def test_ngraph_preprocess_scale_vector():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_mean_scale_convert():
|
||||
def test_graph_preprocess_mean_scale_convert():
|
||||
shape = [2, 2]
|
||||
param1 = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
param2 = ops.parameter(shape, dtype=np.int32, name="B")
|
||||
@ -109,7 +109,7 @@ def test_ngraph_preprocess_mean_scale_convert():
|
||||
assert np.equal(output2, expected_output2).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_input_output_by_name():
|
||||
def test_graph_preprocess_input_output_by_name():
|
||||
shape = [2, 2]
|
||||
param1 = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
param2 = ops.parameter(shape, dtype=np.int32, name="B")
|
||||
@ -143,7 +143,7 @@ def test_ngraph_preprocess_input_output_by_name():
|
||||
assert np.equal(output2, expected_output2).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_output_postprocess():
|
||||
def test_graph_preprocess_output_postprocess():
|
||||
shape = [2, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
model = parameter_a
|
||||
@ -177,7 +177,7 @@ def test_ngraph_preprocess_output_postprocess():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_spatial_static_shape():
|
||||
def test_graph_preprocess_spatial_static_shape():
|
||||
shape = [2, 2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
model = parameter_a
|
||||
@ -205,7 +205,7 @@ def test_ngraph_preprocess_spatial_static_shape():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_set_shape():
|
||||
def test_graph_preprocess_set_shape():
|
||||
shape = [1, 1, 1]
|
||||
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
model = parameter_a
|
||||
@ -236,7 +236,7 @@ def test_ngraph_preprocess_set_shape():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_set_from_tensor():
|
||||
def test_graph_preprocess_set_from_tensor():
|
||||
shape = [1, 224, 224, 3]
|
||||
inp_shape = [1, 480, 640, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
@ -256,7 +256,7 @@ def test_ngraph_preprocess_set_from_tensor():
|
||||
assert function.output().element_type == Type.f32
|
||||
|
||||
|
||||
def test_ngraph_preprocess_set_from_np_infer():
|
||||
def test_graph_preprocess_set_from_np_infer():
|
||||
shape = [1, 1, 1]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
@ -290,7 +290,7 @@ def test_ngraph_preprocess_set_from_np_infer():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_set_memory_type():
|
||||
def test_graph_preprocess_set_memory_type():
|
||||
shape = [1, 1, 1]
|
||||
parameter_a = ops.parameter(shape, dtype=np.int32, name="A")
|
||||
op = ops.relu(parameter_a)
|
||||
@ -324,7 +324,7 @@ def test_ngraph_preprocess_set_memory_type():
|
||||
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.BGR, ColorFormat.NV12_SINGLE_PLANE, True),
|
||||
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.BGR, ColorFormat.NV12_TWO_PLANES, True),
|
||||
(ResizeAlgorithm.RESIZE_NEAREST, ColorFormat.BGR, ColorFormat.UNDEFINED, True)])
|
||||
def test_ngraph_preprocess_steps(algorithm, color_format1, color_format2, is_failing):
|
||||
def test_graph_preprocess_steps(algorithm, color_format1, color_format2, is_failing):
|
||||
shape = [1, 1, 3, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
@ -353,7 +353,7 @@ def test_ngraph_preprocess_steps(algorithm, color_format1, color_format2, is_fai
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_postprocess_layout():
|
||||
def test_graph_preprocess_postprocess_layout():
|
||||
shape = [1, 1, 3, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
@ -378,7 +378,7 @@ def test_ngraph_preprocess_postprocess_layout():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_reverse_channels():
|
||||
def test_graph_preprocess_reverse_channels():
|
||||
shape = [1, 2, 2, 2]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
@ -400,7 +400,7 @@ def test_ngraph_preprocess_reverse_channels():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_crop():
|
||||
def test_graph_preprocess_crop():
|
||||
orig_shape = [1, 2, 1, 1]
|
||||
tensor_shape = [1, 2, 3, 3]
|
||||
parameter_a = ops.parameter(orig_shape, dtype=np.float32, name="A")
|
||||
@ -421,7 +421,7 @@ def test_ngraph_preprocess_crop():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_resize_algorithm():
|
||||
def test_graph_preprocess_resize_algorithm():
|
||||
shape = [1, 1, 3, 3]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="A")
|
||||
model = parameter_a
|
||||
@ -444,7 +444,7 @@ def test_ngraph_preprocess_resize_algorithm():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_model():
|
||||
def test_graph_preprocess_model():
|
||||
model = bytes(b"""<net name="add_model" version="10">
|
||||
<layers>
|
||||
<layer id="0" name="x" type="Parameter" version="opset1">
|
||||
@ -527,7 +527,7 @@ def test_ngraph_preprocess_model():
|
||||
assert np.equal(output, expected_output).all()
|
||||
|
||||
|
||||
def test_ngraph_preprocess_dump():
|
||||
def test_graph_preprocess_dump():
|
||||
shape = [1, 3, 224, 224]
|
||||
parameter_a = ops.parameter(shape, dtype=np.float32, name="RGB_input")
|
||||
model = parameter_a
|
||||
@ -541,7 +541,6 @@ def test_ngraph_preprocess_dump():
|
||||
ppp.input().preprocess().resize(ResizeAlgorithm.RESIZE_LINEAR)
|
||||
ppp.input().model().set_layout(ov.Layout("NCHW"))
|
||||
p_str = str(ppp)
|
||||
print(ppp)
|
||||
assert "Pre-processing steps (5):" in p_str
|
||||
assert "convert type (f32):" in p_str
|
||||
assert "reverse channels:" in p_str
|
@ -9,11 +9,11 @@ from openvino.runtime import PartialShape, Dimension
|
||||
import openvino.runtime.opset9 as ov
|
||||
from openvino.runtime.utils.types import make_constant_node
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_helper", "numpy_function", "reduction_axes"),
|
||||
("graph_api_helper", "numpy_function", "reduction_axes"),
|
||||
[
|
||||
(ov.reduce_max, np.max, np.array([0, 1, 2, 3])),
|
||||
(ov.reduce_min, np.min, np.array([0, 1, 2, 3])),
|
||||
@ -29,18 +29,18 @@ from tests.test_ngraph.util import run_op_node
|
||||
(ov.reduce_prod, np.prod, np.array([0, 2])),
|
||||
],
|
||||
)
|
||||
def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
|
||||
def test_reduction_ops(graph_api_helper, numpy_function, reduction_axes):
|
||||
shape = [2, 4, 3, 2]
|
||||
np.random.seed(133391)
|
||||
input_data = np.random.randn(*shape).astype(np.float32)
|
||||
|
||||
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
||||
result = run_op_node([input_data], ng_api_helper, reduction_axes)
|
||||
result = run_op_node([input_data], graph_api_helper, reduction_axes)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_helper", "numpy_function", "reduction_axes"),
|
||||
("graph_api_helper", "numpy_function", "reduction_axes"),
|
||||
[
|
||||
(ov.reduce_logical_and, np.logical_and.reduce, np.array([0])),
|
||||
(ov.reduce_logical_or, np.logical_or.reduce, np.array([0])),
|
||||
@ -50,13 +50,13 @@ def test_reduction_ops(ng_api_helper, numpy_function, reduction_axes):
|
||||
(ov.reduce_logical_or, np.logical_or.reduce, np.array([0, 1, 2, 3])),
|
||||
],
|
||||
)
|
||||
def test_reduction_logical_ops(ng_api_helper, numpy_function, reduction_axes):
|
||||
def test_reduction_logical_ops(graph_api_helper, numpy_function, reduction_axes):
|
||||
shape = [2, 4, 3, 2]
|
||||
np.random.seed(133391)
|
||||
input_data = np.random.randn(*shape).astype(np.bool)
|
||||
|
||||
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
||||
result = run_op_node([input_data], ng_api_helper, reduction_axes)
|
||||
result = run_op_node([input_data], graph_api_helper, reduction_axes)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
@ -73,20 +73,20 @@ def test_topk():
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("ng_api_helper", "numpy_function", "reduction_axes"),
|
||||
("graph_api_helper", "numpy_function", "reduction_axes"),
|
||||
[
|
||||
(ov.reduce_mean, np.mean, np.array([0, 1, 2, 3])),
|
||||
(ov.reduce_mean, np.mean, np.array([0])),
|
||||
(ov.reduce_mean, np.mean, np.array([0, 2])),
|
||||
],
|
||||
)
|
||||
def test_reduce_mean_op(ng_api_helper, numpy_function, reduction_axes):
|
||||
def test_reduce_mean_op(graph_api_helper, numpy_function, reduction_axes):
|
||||
shape = [2, 4, 3, 2]
|
||||
np.random.seed(133391)
|
||||
input_data = np.random.randn(*shape).astype(np.float32)
|
||||
|
||||
expected = numpy_function(input_data, axis=tuple(reduction_axes))
|
||||
result = run_op_node([input_data], ng_api_helper, reduction_axes)
|
||||
result = run_op_node([input_data], graph_api_helper, reduction_axes)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
@ -6,7 +6,7 @@ import numpy as np
|
||||
|
||||
import openvino.runtime.opset8 as ov
|
||||
from tests.runtime import get_runtime
|
||||
from tests.test_ngraph.util import run_op_node
|
||||
from tests.test_graph.util import run_op_node
|
||||
|
||||
|
||||
def test_onehot():
|
@ -22,7 +22,7 @@ def run_op_node(input_data, op_fun, *args):
|
||||
|
||||
`op_fun` has to accept a node as an argument.
|
||||
|
||||
This function converts passed raw input data to nGraph Constant Node and that form is passed
|
||||
This function converts passed raw input data to graph Constant Node and that form is passed
|
||||
to `op_fun`.
|
||||
|
||||
:param input_data: The input data for performed computation.
|
||||
@ -59,7 +59,7 @@ def run_op_numeric_data(input_data, op_fun, *args):
|
||||
|
||||
This function passess input data AS IS. This mean that in case they're a scalar (integral,
|
||||
or floating point value) or a NumPy's ndarray object they will be automatically converted
|
||||
to nGraph's Constant Nodes.
|
||||
to graph's Constant Nodes.
|
||||
|
||||
:param input_data: The input data for performed computation.
|
||||
:param op_fun: The function handler for operation we want to carry out.
|
@ -12,8 +12,8 @@ from tests.runtime import get_runtime
|
||||
|
||||
def test_import_onnx_with_external_data():
|
||||
model_path = os.path.join(os.path.dirname(__file__), "models/external_data.onnx")
|
||||
ie = Core()
|
||||
func = ie.read_model(model=model_path)
|
||||
core = Core()
|
||||
model = core.read_model(model=model_path)
|
||||
|
||||
dtype = np.float32
|
||||
value_a = np.array([1.0, 3.0, 5.0], dtype=dtype)
|
||||
@ -21,6 +21,6 @@ def test_import_onnx_with_external_data():
|
||||
# third input [5.0, 1.0, 3.0] read from external file
|
||||
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(func)
|
||||
computation = runtime.computation(model)
|
||||
result = computation(value_a, value_b)
|
||||
assert np.allclose(result, np.array([3.0, 3.0, 3.0], dtype=dtype))
|
||||
|
@ -15,8 +15,8 @@ from tests.test_onnx.utils.onnx_helpers import import_onnx_model
|
||||
|
||||
def test_import_onnx_function():
|
||||
model_path = os.path.join(os.path.dirname(__file__), "models/add_abc.onnx")
|
||||
ie = Core()
|
||||
func = ie.read_model(model=model_path)
|
||||
core = Core()
|
||||
model = core.read_model(model=model_path)
|
||||
|
||||
dtype = np.float32
|
||||
value_a = np.array([1.0], dtype=dtype)
|
||||
@ -24,7 +24,7 @@ def test_import_onnx_function():
|
||||
value_c = np.array([3.0], dtype=dtype)
|
||||
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(func)
|
||||
computation = runtime.computation(model)
|
||||
result = computation(value_a, value_b, value_c)
|
||||
assert np.allclose(result, np.array([6], dtype=dtype))
|
||||
|
||||
@ -44,10 +44,10 @@ def test_simple_graph():
|
||||
)
|
||||
model = make_model(graph, producer_name="ngraph ONNX Importer")
|
||||
|
||||
ng_model_function = import_onnx_model(model)
|
||||
graph_model_function = import_onnx_model(model)
|
||||
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(ng_model_function)
|
||||
computation = runtime.computation(graph_model_function)
|
||||
assert np.array_equal(
|
||||
computation(
|
||||
np.array([1], dtype=np.float32),
|
||||
|
@ -44,8 +44,8 @@ def make_onnx_model_for_conv_op(x_shape, weights_shape, transpose=False, **attri
|
||||
def import_and_compute_conv(inputs, weights, transpose=False, **attributes):
|
||||
inputs, weights = np.array(inputs), np.array(weights)
|
||||
onnx_model = make_onnx_model_for_conv_op(inputs.shape, weights.shape, transpose=transpose, **attributes)
|
||||
ng_model_function = import_onnx_model(onnx_model)
|
||||
computation = get_runtime().computation(ng_model_function)
|
||||
model = import_onnx_model(onnx_model)
|
||||
computation = get_runtime().computation(model)
|
||||
return computation(inputs, weights)[0]
|
||||
|
||||
|
||||
@ -263,15 +263,15 @@ def test_pad_opset_1():
|
||||
outputs = np.pad(inputs, pad_width=1, mode="constant")
|
||||
|
||||
model = get_node_model("Pad", inputs, paddings=[1, 1, 1, 1])
|
||||
ng_results = run_model(model, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_model(model, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
inputs = np.random.randn(1, 3, 4, 5).astype(np.float32)
|
||||
outputs = np.pad(inputs, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode="constant")
|
||||
|
||||
model = get_node_model("Pad", inputs, mode="constant", paddings=[0, 0, 1, 3, 0, 0, 2, 4])
|
||||
ng_results = run_model(model, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_model(model, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
# incorrect paddings rank
|
||||
inputs = np.ones((2, 2), dtype=np.float32)
|
||||
@ -290,15 +290,15 @@ def test_pad_opset_2():
|
||||
outputs = np.pad(inputs, pad_width=1, mode="constant")
|
||||
|
||||
model = get_node_model("Pad", inputs, opset=2, pads=[1, 1, 1, 1])
|
||||
ng_results = run_model(model, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_model(model, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
inputs = np.random.randn(1, 3, 4, 5).astype(np.float32)
|
||||
outputs = np.pad(inputs, pad_width=((0, 0), (0, 0), (1, 2), (3, 4)), mode="constant")
|
||||
|
||||
model = get_node_model("Pad", inputs, opset=2, mode="constant", pads=[0, 0, 1, 3, 0, 0, 2, 4])
|
||||
ng_results = run_model(model, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_model(model, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
# incorrect pads rank
|
||||
inputs = np.ones((2, 2), dtype=np.float32)
|
||||
@ -312,13 +312,13 @@ def test_pad_negative_values_begin():
|
||||
|
||||
# Axis 1 begin
|
||||
model = get_node_model("Pad", inputs, opset=2, pads=[-1, 0, 0, 0])
|
||||
ng_result = run_model(model, [inputs])[0]
|
||||
assert np.array_equal(ng_result, np.array([[1, 1]]))
|
||||
graph_result = run_model(model, [inputs])[0]
|
||||
assert np.array_equal(graph_result, np.array([[1, 1]]))
|
||||
|
||||
# Axis 2 begin
|
||||
model = get_node_model("Pad", inputs, opset=2, pads=[0, -1, 0, 0])
|
||||
ng_result = run_model(model, [inputs])[0]
|
||||
assert np.array_equal(ng_result, np.array([[1], [1]]))
|
||||
graph_result = run_model(model, [inputs])[0]
|
||||
assert np.array_equal(graph_result, np.array([[1], [1]]))
|
||||
|
||||
|
||||
def test_pad_negative_values_end():
|
||||
@ -326,28 +326,28 @@ def test_pad_negative_values_end():
|
||||
|
||||
# Axis 1 end
|
||||
model = get_node_model("Pad", inputs, opset=2, pads=[0, 0, -1, 0])
|
||||
ng_result = run_model(model, [inputs])[0]
|
||||
assert np.array_equal(ng_result, np.array([[1.0, 1.0]]))
|
||||
graph_result = run_model(model, [inputs])[0]
|
||||
assert np.array_equal(graph_result, np.array([[1.0, 1.0]]))
|
||||
|
||||
# Axis 2 end
|
||||
model = get_node_model("Pad", inputs, opset=2, pads=[0, 0, 0, -1])
|
||||
ng_result = run_model(model, [inputs])[0]
|
||||
assert np.array_equal(ng_result, np.array([[1], [1]]))
|
||||
graph_result = run_model(model, [inputs])[0]
|
||||
assert np.array_equal(graph_result, np.array([[1], [1]]))
|
||||
|
||||
|
||||
def test_pool_average(ndarray_1x1x4x4):
|
||||
inputs = ndarray_1x1x4x4
|
||||
node = onnx.helper.make_node("AveragePool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2), strides=(2, 2))
|
||||
outputs = np.array([[13.5, 15.5], [21.5, 23.5]], dtype=np.float32).reshape([1, 1, 2, 2])
|
||||
ng_results = run_node(node, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_node(node, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
node = onnx.helper.make_node(
|
||||
"AveragePool", inputs=["x"], outputs=["y"], kernel_shape=(2, 2), strides=(2, 2), pads=(1, 1, 1, 1),
|
||||
)
|
||||
outputs = np.array([[11, 12.5, 14], [17, 18.5, 20], [23, 24.5, 26]], dtype=np.float32).reshape([1, 1, 3, 3])
|
||||
ng_results = run_node(node, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_node(node, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
|
||||
def test_pool_average_3d(ndarray_1x1x4x4):
|
||||
@ -356,8 +356,8 @@ def test_pool_average_3d(ndarray_1x1x4x4):
|
||||
outputs = np.array([[[13.5, 15.5], [21.5, 23.5]], [[13.5, 15.5], [21.5, 23.5]]], dtype=np.float32).reshape(
|
||||
[1, 1, 2, 2, 2],
|
||||
)
|
||||
ng_results = run_node(node, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_node(node, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
|
||||
def test_pool_max(ndarray_1x1x4x4):
|
||||
@ -366,8 +366,8 @@ def test_pool_max(ndarray_1x1x4x4):
|
||||
inputs = ndarray_1x1x4x4
|
||||
outputs = np.array([[16, 18], [24, 26]], dtype=np.float32).reshape([1, 1, 2, 2])
|
||||
|
||||
ng_results = run_node(node, [inputs], opset_version=7)
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_node(node, [inputs], opset_version=7)
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
|
||||
def test_pool_global_max(ndarray_1x1x4x4):
|
||||
@ -376,8 +376,8 @@ def test_pool_global_max(ndarray_1x1x4x4):
|
||||
inputs = ndarray_1x1x4x4
|
||||
outputs = np.array([26], dtype=np.float32).reshape([1, 1, 1, 1])
|
||||
|
||||
ng_results = run_node(node, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_node(node, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
|
||||
def test_pool_global_average(ndarray_1x1x4x4):
|
||||
@ -386,8 +386,8 @@ def test_pool_global_average(ndarray_1x1x4x4):
|
||||
inputs = ndarray_1x1x4x4
|
||||
outputs = np.array([18.5], dtype=np.float32).reshape([1, 1, 1, 1])
|
||||
|
||||
ng_results = run_node(node, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_node(node, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
||||
|
||||
def test_pool_global_average_3d(ndarray_1x1x4x4):
|
||||
@ -395,5 +395,5 @@ def test_pool_global_average_3d(ndarray_1x1x4x4):
|
||||
|
||||
node = onnx.helper.make_node("GlobalAveragePool", inputs=["x"], outputs=["y"])
|
||||
outputs = np.array([18.5], dtype=np.float32).reshape([1, 1, 1, 1, 1])
|
||||
ng_results = run_node(node, [inputs])
|
||||
assert np.array_equal(ng_results, [outputs])
|
||||
graph_results = run_node(node, [inputs])
|
||||
assert np.array_equal(graph_results, [outputs])
|
||||
|
@ -26,14 +26,14 @@ def test_logical(onnx_op, numpy_func, data_type):
|
||||
input_a = np.array([[0, 1, -1], [0, 1, -1], [0, 1, -1]]).astype(data_type)
|
||||
input_b = np.array([[0, 0, 0], [1, 1, 1], [-1, -1, -1]]).astype(data_type)
|
||||
expected_output = numpy_func(input_a, input_b)
|
||||
ng_results = run_node(node, [input_a, input_b], opset_version=4)
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_a, input_b], opset_version=4)
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
input_a = np.array([[0, 1, -1], [0, 1, -1], [0, 1, -1]]).astype(data_type)
|
||||
input_b = np.array(1).astype(data_type)
|
||||
expected_output = numpy_func(input_a, input_b)
|
||||
ng_results = run_node(node, [input_a, input_b], opset_version=4)
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_a, input_b], opset_version=4)
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
def test_logical_not():
|
||||
@ -41,5 +41,5 @@ def test_logical_not():
|
||||
expected_output = np.logical_not(input_data)
|
||||
|
||||
node = onnx.helper.make_node("Not", inputs=["X"], outputs=["Y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
@ -32,8 +32,8 @@ def import_and_compute_matmul(input_left, input_right):
|
||||
input_data_right = np.array(input_right).astype(np.float32)
|
||||
onnx_model = make_onnx_model_for_matmul_op(input_data_left, input_data_right)
|
||||
transformer = get_runtime()
|
||||
ng_model_function = import_onnx_model(onnx_model)
|
||||
computation = transformer.computation(ng_model_function)
|
||||
model = import_onnx_model(onnx_model)
|
||||
computation = transformer.computation(model)
|
||||
return computation(input_data_left, input_data_right)[0]
|
||||
|
||||
|
||||
@ -89,8 +89,8 @@ def import_and_compute_gemm(input_a, input_b, input_c, **kwargs):
|
||||
|
||||
onnx_model = make_onnx_model_for_gemm_op(input_a, input_b, input_c, **kwargs)
|
||||
transformer = get_runtime()
|
||||
ng_model_function = import_onnx_model(onnx_model)
|
||||
computation = transformer.computation(ng_model_function)
|
||||
model = import_onnx_model(onnx_model)
|
||||
computation = transformer.computation(model)
|
||||
return computation(input_a, input_b, input_c)[0]
|
||||
|
||||
|
||||
|
@ -65,9 +65,9 @@ def import_and_compute_with_axes_as_const(op_type, data, axes, **node_attrs):
|
||||
|
||||
model = onnx.helper.make_model(graph, producer_name="ngraph ONNX Importer")
|
||||
model.opset_import[0].version = 13
|
||||
ng_model_function = import_onnx_model(model)
|
||||
graph_model = import_onnx_model(model)
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(ng_model_function)
|
||||
computation = runtime.computation(graph_model)
|
||||
return computation(data_input)[0]
|
||||
|
||||
|
||||
@ -141,15 +141,15 @@ def test_reduce_l1(reduction_axes):
|
||||
|
||||
expected = np.sum(np.abs(input_data), keepdims=True, axis=reduction_axes)
|
||||
node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], axes=reduction_axes)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
expected = np.sum(np.abs(input_data), keepdims=False, axis=reduction_axes)
|
||||
node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
|
||||
def test_reduce_l1_default_axes():
|
||||
@ -159,15 +159,15 @@ def test_reduce_l1_default_axes():
|
||||
|
||||
expected = np.sum(np.abs(input_data), keepdims=True)
|
||||
node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"])
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
expected = np.array(np.sum(np.abs(input_data), keepdims=False))
|
||||
node = onnx.helper.make_node("ReduceL1", inputs=["x"], outputs=["y"], keepdims=0)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)])
|
||||
@ -179,15 +179,15 @@ def test_reduce_l2(reduction_axes):
|
||||
expected = np.sqrt(np.sum(np.square(input_data), keepdims=True, axis=reduction_axes))
|
||||
node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], axes=reduction_axes)
|
||||
raw_result = run_node(node, [input_data])
|
||||
ng_result = np.array(raw_result.pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(raw_result.pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
expected = np.sqrt(np.sum(np.square(input_data), keepdims=False, axis=reduction_axes))
|
||||
node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
|
||||
def test_reduce_l2_default_axes():
|
||||
@ -197,15 +197,15 @@ def test_reduce_l2_default_axes():
|
||||
|
||||
expected = np.sqrt(np.sum(np.square(input_data), keepdims=True))
|
||||
node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"])
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
expected = np.array(np.sqrt(np.sum(np.square(input_data), keepdims=False)))
|
||||
node = onnx.helper.make_node("ReduceL2", inputs=["x"], outputs=["y"], keepdims=0)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("reduction_axes", [(0,), (0, 2), (0, 1, 2)])
|
||||
@ -216,15 +216,15 @@ def test_reduce_log_sum(reduction_axes):
|
||||
|
||||
expected = np.log(np.sum(input_data, keepdims=True, axis=reduction_axes))
|
||||
node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], axes=reduction_axes)
|
||||
ng_result = run_node(node, [input_data]).pop()
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = run_node(node, [input_data]).pop()
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
expected = np.log(np.sum(input_data, keepdims=False, axis=reduction_axes))
|
||||
node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes)
|
||||
ng_result = run_node(node, [input_data]).pop()
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = run_node(node, [input_data]).pop()
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
|
||||
def test_reduce_log_sum_default_axes():
|
||||
@ -234,15 +234,15 @@ def test_reduce_log_sum_default_axes():
|
||||
|
||||
expected = np.log(np.sum(input_data, keepdims=True))
|
||||
node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"])
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
expected = np.log(np.sum(input_data, keepdims=False))
|
||||
node = onnx.helper.make_node("ReduceLogSum", inputs=["x"], outputs=["y"], keepdims=0)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
|
||||
def test_reduce_log_sum_exp():
|
||||
@ -284,15 +284,15 @@ def test_reduce_sum_square(reduction_axes):
|
||||
|
||||
expected = np.sum(np.square(input_data), keepdims=True, axis=reduction_axes)
|
||||
node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], axes=reduction_axes)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
expected = np.sum(np.square(input_data), keepdims=False, axis=reduction_axes)
|
||||
node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], keepdims=0, axes=reduction_axes)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
|
||||
def test_reduce_sum_square_default_axes():
|
||||
@ -302,15 +302,15 @@ def test_reduce_sum_square_default_axes():
|
||||
|
||||
expected = np.sum(np.square(input_data), keepdims=True)
|
||||
node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"])
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
expected = np.sum(np.square(input_data), keepdims=False)
|
||||
node = onnx.helper.make_node("ReduceSumSquare", inputs=["x"], outputs=["y"], keepdims=0)
|
||||
ng_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, ng_result.shape)
|
||||
assert np.allclose(expected, ng_result)
|
||||
graph_result = np.array(run_node(node, [input_data]).pop())
|
||||
assert np.array_equal(expected.shape, graph_result.shape)
|
||||
assert np.allclose(expected, graph_result)
|
||||
|
||||
|
||||
def test_reduce_argmin():
|
||||
|
@ -27,8 +27,8 @@ def test_reshape():
|
||||
)
|
||||
expected_output = input_data.reshape([256, 10])
|
||||
|
||||
ng_results = run_node(reshape_node, [input_data], opset_version=4)
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(reshape_node, [input_data], opset_version=4)
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
def test_reshape_opset5():
|
||||
@ -67,12 +67,12 @@ def test_reshape_opset5():
|
||||
|
||||
model = make_model(graph, producer_name="ngraph ONNX Importer")
|
||||
model.opset_import[0].version = 5
|
||||
ng_model_function = import_onnx_model(model)
|
||||
graph_model_function = import_onnx_model(model)
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(ng_model_function)
|
||||
ng_results = computation(input_data)
|
||||
computation = runtime.computation(graph_model_function)
|
||||
graph_results = computation(input_data)
|
||||
expected_output = np.reshape(input_data, shape)
|
||||
assert np.array_equal(ng_results[0], expected_output)
|
||||
assert np.array_equal(graph_results[0], expected_output)
|
||||
|
||||
|
||||
@pytest.mark.xfail(reason="RuntimeError: Reshape z has dynamic second input!")
|
||||
@ -81,8 +81,8 @@ def test_reshape_opset5_param_err():
|
||||
output_shape = np.array([4, 2, 3], dtype=np.int32)
|
||||
input_data = np.random.random_sample(original_shape).astype(np.float32)
|
||||
reshape_node = onnx.helper.make_node("Reshape", inputs=["x", "y"], outputs=["z"])
|
||||
ng_result = run_node(reshape_node, [input_data, output_shape], opset_version=5)
|
||||
assert ng_result[0].shape == output_shape
|
||||
graph_result = run_node(reshape_node, [input_data, output_shape], opset_version=5)
|
||||
assert graph_result[0].shape == output_shape
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -98,8 +98,8 @@ def test_reshape_opset5_param_err():
|
||||
def test_flatten(axis, expected_output):
|
||||
data = np.arange(120, dtype=np.int32).reshape([2, 3, 4, 5])
|
||||
node = onnx.helper.make_node("Flatten", inputs=["x"], outputs=["y"], axis=axis)
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
def test_flatten_exception():
|
||||
@ -115,15 +115,15 @@ def test_transpose():
|
||||
|
||||
node = onnx.helper.make_node("Transpose", inputs=["x"], outputs=["y"])
|
||||
expected_output = data.T
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
node = onnx.helper.make_node(
|
||||
"Transpose", inputs=["x"], outputs=["y"], perm=(3, 1, 0, 2),
|
||||
)
|
||||
expected_output = np.transpose(data, axes=(3, 1, 0, 2))
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
@xfail_issue_35927
|
||||
@ -132,47 +132,47 @@ def test_slice_opset1():
|
||||
|
||||
expected_output = np.array([[5, 6, 7]])
|
||||
model = get_node_model("Slice", data, axes=[0, 1], starts=[1, 0], ends=[2, 3])
|
||||
ng_results = run_model(model, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_model(model, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
expected_output = np.array([[2, 3, 4]])
|
||||
model = get_node_model("Slice", data, starts=[0, 1], ends=[-1, 1000])
|
||||
ng_results = run_model(model, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_model(model, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
data = np.random.randn(20, 10, 5).astype(np.float32)
|
||||
expected_output = data[0:3, 0:10]
|
||||
model = get_node_model("Slice", data, axes=[0, 1], starts=[0, 0], ends=[3, 10])
|
||||
ng_results = run_model(model, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_model(model, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
# default axes
|
||||
data = np.random.randn(20, 10, 5).astype(np.float32)
|
||||
expected_output = data[:, :, 3:4]
|
||||
model = get_node_model("Slice", data, starts=[0, 0, 3], ends=[20, 10, 4])
|
||||
ng_results = run_model(model, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_model(model, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
# end out of bounds
|
||||
data = np.random.randn(20, 10, 5).astype(np.float32)
|
||||
expected_output = data[:, 1:1000]
|
||||
model = get_node_model("Slice", data, axes=[1], starts=[1], ends=[1000])
|
||||
ng_results = run_model(model, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_model(model, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
# negative value
|
||||
data = np.random.randn(20, 10, 5).astype(np.float32)
|
||||
expected_output = data[:, 0:-1]
|
||||
model = get_node_model("Slice", data, axes=[1], starts=[0], ends=[-1])
|
||||
ng_results = run_model(model, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_model(model, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
# start ouf of bounds
|
||||
data = np.random.randn(20, 10, 5).astype(np.float32)
|
||||
expected_output = data[:, 1000:1000]
|
||||
model = get_node_model("Slice", data, axes=[1], starts=[1000], ends=[1000])
|
||||
ng_results = run_model(model, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_model(model, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
def test_concat():
|
||||
@ -180,20 +180,20 @@ def test_concat():
|
||||
input_2 = np.array([[5, 6]], dtype=np.int32)
|
||||
|
||||
node = onnx.helper.make_node("Concat", inputs=["x"], outputs=["z"], axis=0)
|
||||
ng_results = run_node(node, [input_1])
|
||||
assert np.array_equal(ng_results, [input_1])
|
||||
graph_results = run_node(node, [input_1])
|
||||
assert np.array_equal(graph_results, [input_1])
|
||||
|
||||
expected_output = np.concatenate((input_1, input_2), axis=0)
|
||||
node = onnx.helper.make_node("Concat", inputs=["x", "y"], outputs=["z"], axis=0)
|
||||
ng_results = run_node(node, [input_1, input_2])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_1, input_2])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
input_1 = np.array([[1, 2], [3, 4]], dtype=np.int32)
|
||||
input_2 = np.array([[5, 6]], dtype=np.int32).T
|
||||
expected_output = np.concatenate((input_1, input_2), axis=1)
|
||||
node = onnx.helper.make_node("Concat", inputs=["x", "y"], outputs=["z"], axis=1)
|
||||
ng_results = run_node(node, [input_1, input_2])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_1, input_2])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
test_cases = {
|
||||
"1d": ([1, 2], [3, 4]),
|
||||
@ -215,8 +215,8 @@ def test_concat():
|
||||
axis=i,
|
||||
)
|
||||
expected_output = np.concatenate(values, i)
|
||||
ng_results = run_node(node, np.array(values, dtype=np.int32))
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, np.array(values, dtype=np.int32))
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
@xfail_issue_44968
|
||||
@ -226,15 +226,15 @@ def test_squeeze():
|
||||
|
||||
axes = np.array([0, 3]).astype(np.int64)
|
||||
node = onnx.helper.make_node("Squeeze", inputs=["x", "axes"], outputs=["y"])
|
||||
ng_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
data = np.random.randn(1, 3, 4, 5).astype(np.float32)
|
||||
expected_output = np.squeeze(data, axis=0)
|
||||
axes = np.array([0]).astype(np.int64)
|
||||
node = onnx.helper.make_node("Squeeze", inputs=["x", "axes"], outputs=["y"])
|
||||
ng_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
@xfail_issue_44858
|
||||
@ -243,20 +243,20 @@ def test_unsqueeze():
|
||||
expected_output = np.expand_dims(data, axis=0)
|
||||
axes = np.array([0]).astype(np.int64)
|
||||
node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"])
|
||||
ng_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
expected_output = np.reshape(data, [1, 3, 4, 5, 1])
|
||||
axes = np.array([0, 4]).astype(np.int64)
|
||||
node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"])
|
||||
ng_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
expected_output = np.reshape(data, [1, 3, 1, 4, 5])
|
||||
axes = np.array([0, 2]).astype(np.int64)
|
||||
node = onnx.helper.make_node("Unsqueeze", inputs=["x", "axes"], outputs=["y"])
|
||||
ng_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data, axes])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -302,8 +302,8 @@ def test_unsqueeze():
|
||||
)
|
||||
def test_split_2d(node, expected_output):
|
||||
data = np.arange(8, dtype=np.int32).reshape(2, 4)
|
||||
ng_results = run_node(node, [data])
|
||||
assert all_arrays_equal(ng_results, expected_output)
|
||||
graph_results = run_node(node, [data])
|
||||
assert all_arrays_equal(graph_results, expected_output)
|
||||
|
||||
|
||||
def test_split_2d_splits_input():
|
||||
@ -316,8 +316,8 @@ def test_split_2d_splits_input():
|
||||
np.array([[0, 1, 2], [4, 5, 6]], dtype=np.int32),
|
||||
np.array([[3], [7]], dtype=np.int32),
|
||||
]
|
||||
ng_results = run_node(node, [data, splits])
|
||||
assert all_arrays_equal(ng_results, expected_outputs)
|
||||
graph_results = run_node(node, [data, splits])
|
||||
assert all_arrays_equal(graph_results, expected_outputs)
|
||||
|
||||
|
||||
def test_split_1d():
|
||||
@ -329,8 +329,8 @@ def test_split_1d():
|
||||
np.array([1.0, 2.0, 3.0]).astype(np.float32),
|
||||
np.array([4.0, 5.0, 6.0]).astype(np.float32),
|
||||
]
|
||||
ng_results = run_node(node, [data])
|
||||
assert all_arrays_equal(ng_results, expected_outputs)
|
||||
graph_results = run_node(node, [data])
|
||||
assert all_arrays_equal(graph_results, expected_outputs)
|
||||
|
||||
splits = np.array([2, 3, 1]).astype(np.int64)
|
||||
node = onnx.helper.make_node(
|
||||
@ -341,8 +341,8 @@ def test_split_1d():
|
||||
np.array([3.0, 4.0, 5.0]).astype(np.float32),
|
||||
np.array([6.0]).astype(np.float32),
|
||||
]
|
||||
ng_results = run_node(node, [data, splits])
|
||||
assert all_arrays_equal(ng_results, expected_outputs)
|
||||
graph_results = run_node(node, [data, splits])
|
||||
assert all_arrays_equal(graph_results, expected_outputs)
|
||||
|
||||
# Default values
|
||||
data = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32)
|
||||
@ -353,8 +353,8 @@ def test_split_1d():
|
||||
np.array([3.0, 4.0]).astype(np.float32),
|
||||
np.array([5.0, 6.0]).astype(np.float32),
|
||||
]
|
||||
ng_results = run_node(node, [data])
|
||||
assert all_arrays_equal(ng_results, expected_outputs)
|
||||
graph_results = run_node(node, [data])
|
||||
assert all_arrays_equal(graph_results, expected_outputs)
|
||||
|
||||
splits = np.array([2, 4]).astype(np.int64)
|
||||
node = onnx.helper.make_node(
|
||||
@ -364,8 +364,8 @@ def test_split_1d():
|
||||
np.array([1.0, 2.0]).astype(np.float32),
|
||||
np.array([3.0, 4.0, 5.0, 6.0]).astype(np.float32),
|
||||
]
|
||||
ng_results = run_node(node, [data, splits])
|
||||
assert all_arrays_equal(ng_results, expected_outputs)
|
||||
graph_results = run_node(node, [data, splits])
|
||||
assert all_arrays_equal(graph_results, expected_outputs)
|
||||
|
||||
|
||||
def test_depth_to_space():
|
||||
@ -379,8 +379,8 @@ def test_depth_to_space():
|
||||
)
|
||||
|
||||
node = onnx.helper.make_node("DepthToSpace", inputs=["x"], outputs=["y"], blocksize=blocksize)
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
# (1, 4, 2, 3) input tensor
|
||||
data = np.array(
|
||||
@ -407,5 +407,5 @@ def test_depth_to_space():
|
||||
],
|
||||
).astype(np.float32)
|
||||
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
@ -24,8 +24,8 @@ from tests.test_onnx.utils import get_node_model, import_onnx_model, run_model,
|
||||
def test_abs(input_data):
|
||||
expected_output = np.abs(input_data)
|
||||
node = onnx.helper.make_node("Abs", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -40,8 +40,8 @@ def test_sqrt(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.sqrt(input_data)
|
||||
node = onnx.helper.make_node("Sqrt", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.allclose(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.allclose(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -56,8 +56,8 @@ def test_exp(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.exp(input_data)
|
||||
node = onnx.helper.make_node("Exp", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.allclose(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.allclose(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -72,8 +72,8 @@ def test_log(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.log(input_data)
|
||||
node = onnx.helper.make_node("Log", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.allclose(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.allclose(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -87,8 +87,8 @@ def test_log(input_data):
|
||||
def test_neg(input_data):
|
||||
expected_output = np.negative(input_data)
|
||||
node = onnx.helper.make_node("Neg", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -103,8 +103,8 @@ def test_floor(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.floor(input_data)
|
||||
node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -119,8 +119,8 @@ def test_ceil(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.ceil(input_data)
|
||||
node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@ -163,8 +163,8 @@ def test_reciprocal(input_data):
|
||||
input_data = input_data.astype(np.float32)
|
||||
expected_output = np.reciprocal(input_data)
|
||||
node = onnx.helper.make_node("Reciprocal", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [input_data])
|
||||
assert np.allclose(ng_results, [expected_output])
|
||||
graph_results = run_node(node, [input_data])
|
||||
assert np.allclose(graph_results, [expected_output])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("axis", "dim1", "dim2"), [(0, 1, 60), (1, 3, 20), (2, 12, 5)])
|
||||
@ -176,8 +176,8 @@ def test_hardmax(axis, dim1, dim2):
|
||||
data = np.random.rand(3, 4, 5).astype(np.float32)
|
||||
expected = hardmax_2d(data.reshape(dim1, dim2)).reshape(3, 4, 5)
|
||||
node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=axis)
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
|
||||
def test_hardmax_special_cases():
|
||||
@ -190,25 +190,25 @@ def test_hardmax_special_cases():
|
||||
# default axis=1
|
||||
expected = hardmax_2d(data.reshape(3, 20)).reshape(3, 4, 5)
|
||||
node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
expected = hardmax_2d(data.reshape(12, 5)).reshape(3, 4, 5)
|
||||
node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=-1)
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], axis=3)
|
||||
with pytest.raises(RuntimeError):
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
|
||||
# For multiple occurrences of the maximal values, the first occurrence is selected
|
||||
# for one-hot output
|
||||
data = np.array([[3, 3, 3, 1]]).astype(np.float32)
|
||||
expected = np.array([[1, 0, 0, 0]]).astype(np.float32)
|
||||
node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
|
||||
def test_hardsigmoid():
|
||||
@ -222,13 +222,13 @@ def test_hardsigmoid():
|
||||
|
||||
expected = hardsigmoid(data, alpha, beta)
|
||||
node = onnx.helper.make_node("HardSigmoid", inputs=["x"], outputs=["y"], alpha=alpha, beta=beta)
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
expected = hardsigmoid(data)
|
||||
node = onnx.helper.make_node("HardSigmoid", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
|
||||
def test_logsoftmax():
|
||||
@ -242,27 +242,27 @@ def test_logsoftmax():
|
||||
|
||||
node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=0)
|
||||
expected = logsoftmax_2d(data.reshape(1, 60)).reshape(3, 4, 5)
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=1)
|
||||
expected = logsoftmax_2d(data.reshape(3, 20)).reshape(3, 4, 5)
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
# default axis is 1
|
||||
node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=2)
|
||||
expected = logsoftmax_2d(data.reshape(12, 5)).reshape(3, 4, 5)
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
node = onnx.helper.make_node("LogSoftmax", inputs=["x"], outputs=["y"], axis=3)
|
||||
with pytest.raises(RuntimeError):
|
||||
ng_results = run_node(node, [data], opset_version=12)
|
||||
graph_results = run_node(node, [data], opset_version=12)
|
||||
|
||||
|
||||
def test_softplus():
|
||||
@ -274,8 +274,8 @@ def test_softplus():
|
||||
|
||||
node = onnx.helper.make_node("Softplus", inputs=["x"], outputs=["y"])
|
||||
expected = softplus(data)
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
|
||||
def test_softsign():
|
||||
@ -287,8 +287,8 @@ def test_softsign():
|
||||
|
||||
node = onnx.helper.make_node("Softsign", inputs=["x"], outputs=["y"])
|
||||
expected = softsign(data)
|
||||
ng_results = run_node(node, [data])
|
||||
assert np.allclose(ng_results, [expected])
|
||||
graph_results = run_node(node, [data])
|
||||
assert np.allclose(graph_results, [expected])
|
||||
|
||||
|
||||
def test_identity():
|
||||
@ -297,8 +297,8 @@ def test_identity():
|
||||
input_data = np.random.randn(*shape).astype(np.float32)
|
||||
|
||||
identity_node = make_node("Identity", inputs=["x"], outputs=["y"])
|
||||
ng_results = run_node(identity_node, [input_data])
|
||||
assert np.array_equal(ng_results, [input_data])
|
||||
graph_results = run_node(identity_node, [input_data])
|
||||
assert np.array_equal(graph_results, [input_data])
|
||||
|
||||
node1 = make_node("Add", inputs=["A", "B"], outputs=["add1"], name="add_node1")
|
||||
node2 = make_node("Identity", inputs=["add1"], outputs=["identity1"], name="identity_node1")
|
||||
@ -314,13 +314,13 @@ def test_identity():
|
||||
[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, shape)],
|
||||
)
|
||||
model = make_model(graph, producer_name="ngraph ONNX Importer")
|
||||
ng_model_function = import_onnx_model(model)
|
||||
graph_model = import_onnx_model(model)
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(ng_model_function)
|
||||
ng_results = computation(input_data, input_data)
|
||||
computation = runtime.computation(graph_model)
|
||||
graph_results = computation(input_data, input_data)
|
||||
expected_result = np.abs(input_data + input_data)
|
||||
|
||||
assert np.array_equal(ng_results[0], expected_result)
|
||||
assert np.array_equal(graph_results[0], expected_result)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("val_type", "input_data"), [(np.dtype(bool), np.zeros((2, 2), dtype=int))])
|
||||
@ -455,8 +455,8 @@ def test_constant(value_type):
|
||||
),
|
||||
)
|
||||
|
||||
ng_results = run_node(node, [])
|
||||
assert np.allclose(ng_results, [values])
|
||||
graph_results = run_node(node, [])
|
||||
assert np.allclose(graph_results, [values])
|
||||
|
||||
|
||||
def test_constant_err():
|
||||
@ -473,8 +473,8 @@ def test_constant_err():
|
||||
),
|
||||
)
|
||||
|
||||
ng_results = run_node(node, [])
|
||||
assert np.allclose(ng_results, [values])
|
||||
graph_results = run_node(node, [])
|
||||
assert np.allclose(graph_results, [values])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -25,8 +25,8 @@ def test_variadic(onnx_op, numpy_func):
|
||||
)
|
||||
expected_output = reduce(numpy_func, data)
|
||||
|
||||
ng_results = run_node(node, data)
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, data)
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
||||
|
||||
def test_mean():
|
||||
@ -40,5 +40,5 @@ def test_mean():
|
||||
)
|
||||
expected_output = reduce(np.add, data) / len(data)
|
||||
|
||||
ng_results = run_node(node, data)
|
||||
assert np.array_equal(ng_results, [expected_output])
|
||||
graph_results = run_node(node, data)
|
||||
assert np.array_equal(graph_results, [expected_output])
|
||||
|
@ -17,7 +17,7 @@ from tests.test_onnx.utils.onnx_helpers import import_onnx_model
|
||||
|
||||
def run_node(onnx_node, data_inputs, **kwargs):
|
||||
# type: (onnx.NodeProto, List[np.ndarray], Dict[Text, Any]) -> List[np.ndarray]
|
||||
"""Convert ONNX node to ngraph node and perform computation on input data.
|
||||
"""Convert ONNX node to a graph node and perform computation on input data.
|
||||
|
||||
:param onnx_node: ONNX NodeProto describing a computation node
|
||||
:param data_inputs: list of numpy ndarrays with input data
|
||||
@ -29,15 +29,15 @@ def run_node(onnx_node, data_inputs, **kwargs):
|
||||
|
||||
def run_model(onnx_model, data_inputs):
|
||||
# type: (onnx.ModelProto, List[np.ndarray]) -> List[np.ndarray]
|
||||
"""Convert ONNX model to an ngraph model and perform computation on input data.
|
||||
"""Convert ONNX model to a graph model and perform computation on input data.
|
||||
|
||||
:param onnx_model: ONNX ModelProto describing an ONNX model
|
||||
:param data_inputs: list of numpy ndarrays with input data
|
||||
:return: list of numpy ndarrays with computed output
|
||||
"""
|
||||
ng_model_function = import_onnx_model(onnx_model)
|
||||
graph_model = import_onnx_model(onnx_model)
|
||||
runtime = get_runtime()
|
||||
computation = runtime.computation(ng_model_function)
|
||||
computation = runtime.computation(graph_model)
|
||||
return computation(*data_inputs)
|
||||
|
||||
|
||||
|
@ -21,12 +21,12 @@ from tests.test_onnx.utils.onnx_helpers import import_onnx_model, np_dtype_to_te
|
||||
|
||||
|
||||
class OpenVinoOnnxBackendRep(BackendRep):
|
||||
def __init__(self, ng_model_function, device="CPU"): # type: (List[Model], str) -> None
|
||||
def __init__(self, graph_model, device="CPU"): # type: (List[Model], str) -> None
|
||||
super().__init__()
|
||||
self.device = device
|
||||
self.ng_model_function = ng_model_function
|
||||
self.graph_model = graph_model
|
||||
self.runtime = get_runtime()
|
||||
self.computation = self.runtime.computation(ng_model_function)
|
||||
self.computation = self.runtime.computation(graph_model)
|
||||
|
||||
def run(self, inputs, **kwargs): # type: (Any, **Any) -> Tuple[Any, ...]
|
||||
"""Run computation on model."""
|
||||
@ -56,8 +56,8 @@ class OpenVinoOnnxBackend(Backend):
|
||||
**kwargs, # type: Any
|
||||
): # type: (...) -> OpenVinoOnnxBackendRep
|
||||
super().prepare(onnx_model, device, **kwargs)
|
||||
ng_model_function = import_onnx_model(onnx_model)
|
||||
return OpenVinoOnnxBackendRep(ng_model_function, device)
|
||||
graph_model = import_onnx_model(onnx_model)
|
||||
return OpenVinoOnnxBackendRep(graph_model, device)
|
||||
|
||||
@classmethod
|
||||
def run_model(
|
||||
|
@ -21,7 +21,7 @@ def np_dtype_to_tensor_type(data_type: np.dtype) -> int:
|
||||
def import_onnx_model(model: onnx.ModelProto) -> Model:
|
||||
onnx.checker.check_model(model)
|
||||
model_byte_string = model.SerializeToString()
|
||||
ie = Core()
|
||||
func = ie.read_model(bytes(model_byte_string), Tensor(type=np.uint8, shape=[]))
|
||||
core = Core()
|
||||
model = core.read_model(bytes(model_byte_string), Tensor(type=np.uint8, shape=[]))
|
||||
|
||||
return func
|
||||
return model
|
||||
|
@ -19,9 +19,9 @@ test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
|
||||
def test_get_property_model_name(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
network_name = exec_net.get_property("NETWORK_NAME")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
network_name = compiled_model.get_property("NETWORK_NAME")
|
||||
assert network_name == "test_model"
|
||||
|
||||
|
||||
@ -30,26 +30,26 @@ def test_get_property(device):
|
||||
core = Core()
|
||||
if core.get_property(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
|
||||
pytest.skip("Can't run on ARM plugin due-to CPU dependent test")
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
profiling_enabled = exec_net.get_property("PERF_COUNT")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
profiling_enabled = compiled_model.get_property("PERF_COUNT")
|
||||
assert not profiling_enabled
|
||||
|
||||
|
||||
def test_get_runtime_model(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
runtime_func = exec_net.get_runtime_model()
|
||||
assert isinstance(runtime_func, Model)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
runtime_model = compiled_model.get_runtime_model()
|
||||
assert isinstance(runtime_model, Model)
|
||||
|
||||
|
||||
def test_export_import():
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled = core.compile_model(model, "CPU")
|
||||
compiled_model = core.compile_model(model, "CPU")
|
||||
|
||||
user_stream = compiled.export_model()
|
||||
user_stream = compiled_model.export_model()
|
||||
|
||||
new_compiled = core.import_model(user_stream, "CPU")
|
||||
|
||||
@ -64,11 +64,11 @@ def test_export_import_advanced():
|
||||
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled = core.compile_model(model, "CPU")
|
||||
compiled_model = core.compile_model(model, "CPU")
|
||||
|
||||
user_stream = io.BytesIO()
|
||||
|
||||
compiled.export_model(user_stream)
|
||||
compiled_model.export_model(user_stream)
|
||||
|
||||
new_compiled = core.import_model(user_stream, "CPU")
|
||||
|
||||
@ -80,9 +80,9 @@ def test_export_import_advanced():
|
||||
|
||||
def test_get_input_i(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.input(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.input(0)
|
||||
input_node = net_input.get_node()
|
||||
name = input_node.friendly_name
|
||||
assert isinstance(net_input, ConstOutput)
|
||||
@ -91,9 +91,9 @@ def test_get_input_i(device):
|
||||
|
||||
def test_get_input_tensor_name(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.input("data")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.input("data")
|
||||
input_node = net_input.get_node()
|
||||
name = input_node.friendly_name
|
||||
assert isinstance(net_input, ConstOutput)
|
||||
@ -102,9 +102,9 @@ def test_get_input_tensor_name(device):
|
||||
|
||||
def test_get_input(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.input()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.input()
|
||||
input_node = net_input.get_node()
|
||||
name = input_node.friendly_name
|
||||
assert isinstance(net_input, ConstOutput)
|
||||
@ -113,25 +113,25 @@ def test_get_input(device):
|
||||
|
||||
def test_get_output_i(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
output = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
output = compiled_model.output(0)
|
||||
assert isinstance(output, ConstOutput)
|
||||
|
||||
|
||||
def test_get_output(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
output = exec_net.output()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
output = compiled_model.output()
|
||||
assert isinstance(output, ConstOutput)
|
||||
|
||||
|
||||
def test_input_set_friendly_name(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.input("data")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.input("data")
|
||||
input_node = net_input.get_node()
|
||||
input_node.set_friendly_name("input_1")
|
||||
name = input_node.friendly_name
|
||||
@ -141,9 +141,9 @@ def test_input_set_friendly_name(device):
|
||||
|
||||
def test_output_set_friendly_name(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
output = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
output = compiled_model.output(0)
|
||||
output_node = output.get_node()
|
||||
output_node.set_friendly_name("output_1")
|
||||
name = output_node.friendly_name
|
||||
@ -153,70 +153,70 @@ def test_output_set_friendly_name(device):
|
||||
|
||||
def test_outputs(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
outputs = exec_net.outputs
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
outputs = compiled_model.outputs
|
||||
assert isinstance(outputs, list)
|
||||
assert len(outputs) == 1
|
||||
|
||||
|
||||
def test_outputs_items(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
outputs = exec_net.outputs
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
outputs = compiled_model.outputs
|
||||
assert isinstance(outputs[0], ConstOutput)
|
||||
|
||||
|
||||
def test_output_type(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
output = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
output = compiled_model.output(0)
|
||||
output_type = output.get_element_type().get_type_name()
|
||||
assert output_type == "f32"
|
||||
|
||||
|
||||
def test_output_shape(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
output = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
output = compiled_model.output(0)
|
||||
expected_shape = Shape([1, 10])
|
||||
assert str(output.get_shape()) == str(expected_shape)
|
||||
|
||||
|
||||
def test_input_get_index(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.input(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.input(0)
|
||||
expected_idx = 0
|
||||
assert net_input.get_index() == expected_idx
|
||||
|
||||
|
||||
def test_inputs(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
inputs = exec_net.inputs
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
inputs = compiled_model.inputs
|
||||
assert isinstance(inputs, list)
|
||||
assert len(inputs) == 1
|
||||
|
||||
|
||||
def test_inputs_items(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
inputs = exec_net.inputs
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
inputs = compiled_model.inputs
|
||||
assert isinstance(inputs[0], ConstOutput)
|
||||
|
||||
|
||||
def test_inputs_get_friendly_name(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
inputs = exec_net.inputs
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
inputs = compiled_model.inputs
|
||||
input_0 = inputs[0]
|
||||
node = input_0.get_node()
|
||||
name = node.friendly_name
|
||||
@ -225,9 +225,9 @@ def test_inputs_get_friendly_name(device):
|
||||
|
||||
def test_inputs_set_friendly_name(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
inputs = exec_net.inputs
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
inputs = compiled_model.inputs
|
||||
input_0 = inputs[0]
|
||||
node = input_0.get_node()
|
||||
node.set_friendly_name("input_0")
|
||||
@ -237,68 +237,68 @@ def test_inputs_set_friendly_name(device):
|
||||
|
||||
def test_inputs_docs(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
inputs = exec_net.inputs
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
inputs = compiled_model.inputs
|
||||
input_0 = inputs[0]
|
||||
expected_string = "openvino.runtime.ConstOutput represents port/node output."
|
||||
assert input_0.__doc__ == expected_string
|
||||
|
||||
|
||||
def test_infer_new_request_numpy(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = generate_image()
|
||||
exec_net = ie.compile_model(func, device)
|
||||
res = exec_net.infer_new_request({"data": img})
|
||||
compiled_model = core.compile_model(model, device)
|
||||
res = compiled_model.infer_new_request({"data": img})
|
||||
assert np.argmax(res[list(res)[0]]) == 9
|
||||
|
||||
|
||||
def test_infer_new_request_tensor_numpy_copy(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
res_tensor = exec_net.infer_new_request({"data": tensor})
|
||||
res_img = exec_net.infer_new_request({"data": tensor})
|
||||
compiled_model = core.compile_model(model, device)
|
||||
res_tensor = compiled_model.infer_new_request({"data": tensor})
|
||||
res_img = compiled_model.infer_new_request({"data": img})
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == 9
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax(res_img[list(res_img)[0]])
|
||||
|
||||
|
||||
def test_infer_tensor_numpy_shared_memory(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = generate_image()
|
||||
img = np.ascontiguousarray(img)
|
||||
tensor = Tensor(img, shared_memory=True)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
res_tensor = exec_net.infer_new_request({"data": tensor})
|
||||
res_img = exec_net.infer_new_request({"data": tensor})
|
||||
compiled_model = core.compile_model(model, device)
|
||||
res_tensor = compiled_model.infer_new_request({"data": tensor})
|
||||
res_img = compiled_model.infer_new_request({"data": img})
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == 9
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax(res_img[list(res_img)[0]])
|
||||
|
||||
|
||||
def test_infer_new_request_wrong_port_name(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
exec_net.infer_new_request({"_data_": tensor})
|
||||
compiled_model.infer_new_request({"_data_": tensor})
|
||||
assert "Check" in str(e.value)
|
||||
|
||||
|
||||
def test_infer_tensor_wrong_input_data(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = generate_image()
|
||||
img = np.ascontiguousarray(img)
|
||||
tensor = Tensor(img, shared_memory=True)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
with pytest.raises(TypeError) as e:
|
||||
exec_net.infer_new_request({0.: tensor})
|
||||
compiled_model.infer_new_request({0.: tensor})
|
||||
assert "Incompatible key type for input: 0.0" in str(e.value)
|
||||
|
||||
|
||||
@ -308,10 +308,10 @@ def test_infer_numpy_model_from_buffer(device):
|
||||
weights = f.read()
|
||||
with open(test_net_xml, "rb") as f:
|
||||
xml = f.read()
|
||||
func = core.read_model(model=xml, weights=weights)
|
||||
model = core.read_model(model=xml, weights=weights)
|
||||
img = generate_image()
|
||||
exec_net = core.compile_model(func, device)
|
||||
res = exec_net.infer_new_request({"data": img})
|
||||
compiled_model = core.compile_model(model, device)
|
||||
res = compiled_model.infer_new_request({"data": img})
|
||||
assert np.argmax(res[list(res)[0]]) == 9
|
||||
|
||||
|
||||
@ -321,11 +321,11 @@ def test_infer_tensor_model_from_buffer(device):
|
||||
weights = f.read()
|
||||
with open(test_net_xml, "rb") as f:
|
||||
xml = f.read()
|
||||
func = core.read_model(model=xml, weights=weights)
|
||||
model = core.read_model(model=xml, weights=weights)
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
exec_net = core.compile_model(func, device)
|
||||
res = exec_net.infer_new_request({"data": tensor})
|
||||
compiled_model = core.compile_model(model, device)
|
||||
res = compiled_model.infer_new_request({"data": tensor})
|
||||
assert np.argmax(res[list(res)[0]]) == 9
|
||||
|
||||
|
@ -71,10 +71,10 @@ def test_core_class():
|
||||
|
||||
|
||||
def test_compile_model(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
assert isinstance(exec_net, CompiledModel)
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
assert isinstance(compiled_model, CompiledModel)
|
||||
|
||||
|
||||
def test_compile_model_without_device():
|
||||
@ -86,43 +86,43 @@ def test_compile_model_without_device():
|
||||
|
||||
def test_read_model_from_ir():
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
assert isinstance(model, Model)
|
||||
|
||||
func = core.read_model(model=test_net_xml)
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=test_net_xml)
|
||||
assert isinstance(model, Model)
|
||||
|
||||
|
||||
def test_read_model_from_tensor():
|
||||
core = Core()
|
||||
model = open(test_net_xml).read()
|
||||
tensor = tensor_from_file(test_net_bin)
|
||||
func = core.read_model(model=model, weights=tensor)
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=model, weights=tensor)
|
||||
assert isinstance(model, Model)
|
||||
|
||||
|
||||
def test_read_model_as_path():
|
||||
core = Core()
|
||||
func = core.read_model(model=Path(test_net_xml), weights=Path(test_net_bin))
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=Path(test_net_xml), weights=Path(test_net_bin))
|
||||
assert isinstance(model, Model)
|
||||
|
||||
func = core.read_model(model=test_net_xml, weights=Path(test_net_bin))
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=test_net_xml, weights=Path(test_net_bin))
|
||||
assert isinstance(model, Model)
|
||||
|
||||
func = core.read_model(model=Path(test_net_xml))
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=Path(test_net_xml))
|
||||
assert isinstance(model, Model)
|
||||
|
||||
|
||||
def test_read_model_from_onnx():
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_onnx)
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=test_net_onnx)
|
||||
assert isinstance(model, Model)
|
||||
|
||||
|
||||
def test_read_model_from_onnx_as_path():
|
||||
core = Core()
|
||||
func = core.read_model(model=Path(test_net_onnx))
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=Path(test_net_onnx))
|
||||
assert isinstance(model, Model)
|
||||
|
||||
|
||||
def test_read_net_from_buffer():
|
||||
@ -131,8 +131,8 @@ def test_read_net_from_buffer():
|
||||
weights = f.read()
|
||||
with open(model_path()[0], "rb") as f:
|
||||
xml = f.read()
|
||||
func = core.read_model(model=xml, weights=weights)
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=xml, weights=weights)
|
||||
assert isinstance(model, Model)
|
||||
|
||||
|
||||
def test_net_from_buffer_valid():
|
||||
@ -141,16 +141,16 @@ def test_net_from_buffer_valid():
|
||||
weights = f.read()
|
||||
with open(model_path()[0], "rb") as f:
|
||||
xml = f.read()
|
||||
func = core.read_model(model=xml, weights=weights)
|
||||
ref_func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
assert func.get_parameters() == ref_func.get_parameters()
|
||||
assert func.get_results() == ref_func.get_results()
|
||||
assert func.get_ordered_ops() == ref_func.get_ordered_ops()
|
||||
model = core.read_model(model=xml, weights=weights)
|
||||
ref_model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
assert model.get_parameters() == ref_model.get_parameters()
|
||||
assert model.get_results() == ref_model.get_results()
|
||||
assert model.get_ordered_ops() == ref_model.get_ordered_ops()
|
||||
|
||||
|
||||
def test_get_version(device):
|
||||
ie = Core()
|
||||
version = ie.get_versions(device)
|
||||
core = Core()
|
||||
version = core.get_versions(device)
|
||||
assert isinstance(version, dict), "Returned version must be a dictionary"
|
||||
assert device in version, f"{device} plugin version wasn't found in versions"
|
||||
assert hasattr(version[device], "major"), "Returned version has no field 'major'"
|
||||
@ -160,8 +160,8 @@ def test_get_version(device):
|
||||
|
||||
|
||||
def test_available_devices(device):
|
||||
ie = Core()
|
||||
devices = ie.available_devices
|
||||
core = Core()
|
||||
devices = core.available_devices
|
||||
assert device in devices, (
|
||||
f"Current device '{device}' is not listed in "
|
||||
f"available devices '{', '.join(devices)}'"
|
||||
@ -169,8 +169,8 @@ def test_available_devices(device):
|
||||
|
||||
|
||||
def test_get_property():
|
||||
ie = Core()
|
||||
conf = ie.get_property("CPU", "CPU_BIND_THREAD")
|
||||
core = Core()
|
||||
conf = core.get_property("CPU", "CPU_BIND_THREAD")
|
||||
assert conf == "YES"
|
||||
|
||||
|
||||
@ -179,8 +179,8 @@ def test_get_property():
|
||||
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test",
|
||||
)
|
||||
def test_get_property_list_of_str():
|
||||
ie = Core()
|
||||
param = ie.get_property("CPU", "OPTIMIZATION_CAPABILITIES")
|
||||
core = Core()
|
||||
param = core.get_property("CPU", "OPTIMIZATION_CAPABILITIES")
|
||||
assert isinstance(param, list), (
|
||||
"Parameter value for 'OPTIMIZATION_CAPABILITIES' "
|
||||
f"metric must be a list but {type(param)} is returned"
|
||||
@ -195,8 +195,8 @@ def test_get_property_list_of_str():
|
||||
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test",
|
||||
)
|
||||
def test_get_property_tuple_of_two_ints():
|
||||
ie = Core()
|
||||
param = ie.get_property("CPU", "RANGE_FOR_STREAMS")
|
||||
core = Core()
|
||||
param = core.get_property("CPU", "RANGE_FOR_STREAMS")
|
||||
assert isinstance(param, tuple), (
|
||||
"Parameter value for 'RANGE_FOR_STREAMS' "
|
||||
f"metric must be tuple but {type(param)} is returned"
|
||||
@ -211,8 +211,8 @@ def test_get_property_tuple_of_two_ints():
|
||||
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test",
|
||||
)
|
||||
def test_get_property_tuple_of_three_ints():
|
||||
ie = Core()
|
||||
param = ie.get_property("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS")
|
||||
core = Core()
|
||||
param = core.get_property("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS")
|
||||
assert isinstance(param, tuple), (
|
||||
"Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' "
|
||||
f"metric must be tuple but {type(param)} is returned"
|
||||
@ -228,8 +228,8 @@ def test_get_property_tuple_of_three_ints():
|
||||
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test",
|
||||
)
|
||||
def test_get_property_str():
|
||||
ie = Core()
|
||||
param = ie.get_property("CPU", "FULL_DEVICE_NAME")
|
||||
core = Core()
|
||||
param = core.get_property("CPU", "FULL_DEVICE_NAME")
|
||||
assert isinstance(param, str), (
|
||||
"Parameter value for 'FULL_DEVICE_NAME' "
|
||||
f"metric must be string but {type(param)} is returned"
|
||||
@ -237,40 +237,40 @@ def test_get_property_str():
|
||||
|
||||
|
||||
def test_query_model(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
query_res = ie.query_model(model=func, device_name=device)
|
||||
ops_func = func.get_ordered_ops()
|
||||
ops_func_names = [op.friendly_name for op in ops_func]
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
query_model = core.query_model(model=model, device_name=device)
|
||||
ops_model = model.get_ordered_ops()
|
||||
ops_func_names = [op.friendly_name for op in ops_model]
|
||||
assert [
|
||||
key for key in query_res.keys() if key not in ops_func_names
|
||||
key for key in query_model.keys() if key not in ops_func_names
|
||||
] == [], "Not all network layers present in query_model results"
|
||||
assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers"
|
||||
assert next(iter(set(query_model.values()))) == device, "Wrong device for some layers"
|
||||
|
||||
|
||||
@pytest.mark.dynamic_library()
|
||||
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
|
||||
def test_register_plugin():
|
||||
ie = Core()
|
||||
ie.register_plugin("openvino_intel_cpu_plugin", "BLA")
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie.compile_model(func, "BLA")
|
||||
core = Core()
|
||||
core.register_plugin("openvino_intel_cpu_plugin", "BLA")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(model, "BLA")
|
||||
assert isinstance(exec_net, CompiledModel), "Cannot load the network to the registered plugin with name 'BLA'"
|
||||
|
||||
|
||||
@pytest.mark.dynamic_library()
|
||||
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
|
||||
def test_register_plugins():
|
||||
ie = Core()
|
||||
core = Core()
|
||||
if platform == "linux" or platform == "linux2":
|
||||
ie.register_plugins(plugins_xml)
|
||||
core.register_plugins(plugins_xml)
|
||||
elif platform == "darwin":
|
||||
ie.register_plugins(plugins_osx_xml)
|
||||
core.register_plugins(plugins_osx_xml)
|
||||
elif platform == "win32":
|
||||
ie.register_plugins(plugins_win_xml)
|
||||
core.register_plugins(plugins_win_xml)
|
||||
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie.compile_model(func, "CUSTOM")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(model, "CUSTOM")
|
||||
assert isinstance(exec_net, CompiledModel), (
|
||||
"Cannot load the network to "
|
||||
"the registered plugin with name 'CUSTOM' "
|
||||
@ -280,11 +280,11 @@ def test_register_plugins():
|
||||
|
||||
@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well")
|
||||
def test_unregister_plugin(device):
|
||||
ie = Core()
|
||||
ie.unload_plugin(device)
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
core = Core()
|
||||
core.unload_plugin(device)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
ie.load_network(func, device)
|
||||
core.load_network(model, device)
|
||||
assert (
|
||||
f"Device with '{device}' name is not registered in the InferenceEngine"
|
||||
in str(e.value)
|
||||
@ -319,8 +319,8 @@ def test_add_extension():
|
||||
assert isinstance(model, Model)
|
||||
|
||||
|
||||
def test_read_model_from_buffer_no_weights(device):
|
||||
model = bytes(
|
||||
def test_read_model_from_buffer_no_weights():
|
||||
bytes_model = bytes(
|
||||
b"""<net name="add_model" version="10">
|
||||
<layers>
|
||||
<layer id="0" name="x" type="Parameter" version="opset1">
|
||||
@ -381,16 +381,16 @@ def test_read_model_from_buffer_no_weights(device):
|
||||
</edges>
|
||||
</net>""")
|
||||
core = Core()
|
||||
func = core.read_model(model=model)
|
||||
assert isinstance(func, Model)
|
||||
model = core.read_model(model=bytes_model)
|
||||
assert isinstance(model, Model)
|
||||
|
||||
|
||||
def test_infer_new_request_return_type(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
core = Core()
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = generate_image()
|
||||
exec_net = ie.compile_model(func, device)
|
||||
res = exec_net.infer_new_request({"data": img})
|
||||
compiled_model = core.compile_model(model, device)
|
||||
res = compiled_model.infer_new_request({"data": img})
|
||||
arr = res[list(res)[0]][0]
|
||||
|
||||
assert isinstance(arr, np.ndarray)
|
@ -289,10 +289,10 @@ def test_get_batch_chwn():
|
||||
param3 = ops.parameter(Shape([3, 1, 3, 4]), dtype=np.float32, name="data3")
|
||||
add = ops.add(param1, param2)
|
||||
add2 = ops.add(add, param3)
|
||||
func = Model(add2, [param1, param2, param3], "TestFunction")
|
||||
param = func.get_parameters()[0]
|
||||
model = Model(add2, [param1, param2, param3], "TestFunction")
|
||||
param = model.get_parameters()[0]
|
||||
param.set_layout(Layout("CHWN"))
|
||||
assert get_batch(func) == 4
|
||||
assert get_batch(model) == 4
|
||||
|
||||
|
||||
def test_set_batch_dimension():
|
||||
@ -368,8 +368,8 @@ def test_reshape(device):
|
||||
ref_shape[0] = 3
|
||||
model.reshape(ref_shape)
|
||||
core = Core()
|
||||
compiled = core.compile_model(model, device)
|
||||
assert compiled.input().partial_shape == ref_shape
|
||||
compiled_model = core.compile_model(model, device)
|
||||
assert compiled_model.input().partial_shape == ref_shape
|
||||
|
||||
|
||||
def test_reshape_with_python_types(device):
|
@ -39,8 +39,8 @@ def create_simple_request_and_inputs(device):
|
||||
model = Model(ops.add(param_a, param_b), [param_a, param_b])
|
||||
|
||||
core = Core()
|
||||
compiled = core.compile_model(model, device)
|
||||
request = compiled.create_infer_request()
|
||||
compiled_model = core.compile_model(model, device)
|
||||
request = compiled_model.create_infer_request()
|
||||
|
||||
arr_1 = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
arr_2 = np.array([[3, 4], [1, 2]], dtype=np.float32)
|
||||
@ -60,8 +60,8 @@ def concat_model_with_data(device, ov_type, numpy_dtype):
|
||||
|
||||
model = Model(ops.concat(params, 0), params)
|
||||
core = Core()
|
||||
compiled = core.compile_model(model, device)
|
||||
request = compiled.create_infer_request()
|
||||
compiled_model = core.compile_model(model, device)
|
||||
request = compiled_model.create_infer_request()
|
||||
tensor1 = Tensor(ov_type, input_shape)
|
||||
tensor1.data[:] = np.array([6, 7, 8, 9, 0])
|
||||
array1 = np.array([1, 2, 3, 4, 5], dtype=numpy_dtype)
|
||||
@ -90,10 +90,10 @@ def test_get_profiling_info(device):
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
core.set_property(device, {"PERF_COUNT": "YES"})
|
||||
compiled = core.compile_model(model, device)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
img = generate_image()
|
||||
request = compiled.create_infer_request()
|
||||
tensor_name = compiled.input("data").any_name
|
||||
request = compiled_model.create_infer_request()
|
||||
tensor_name = compiled_model.input("data").any_name
|
||||
request.infer({tensor_name: img})
|
||||
assert request.latency > 0
|
||||
prof_info = request.get_profiling_info()
|
||||
@ -152,7 +152,7 @@ def test_tensor_setter(device):
|
||||
def test_set_tensors(device):
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
|
||||
data1 = generate_image()
|
||||
tensor1 = Tensor(data1)
|
||||
@ -163,7 +163,7 @@ def test_set_tensors(device):
|
||||
data4 = np.zeros(shape=(1, 10), dtype=np.float32)
|
||||
tensor4 = Tensor(data4)
|
||||
|
||||
request = compiled.create_infer_request()
|
||||
request = compiled_model.create_infer_request()
|
||||
request.set_tensors({"data": tensor1, "fc_out": tensor2})
|
||||
t1 = request.get_tensor("data")
|
||||
t2 = request.get_tensor("fc_out")
|
||||
@ -171,16 +171,16 @@ def test_set_tensors(device):
|
||||
assert np.allclose(tensor2.data, t2.data, atol=1e-2, rtol=1e-2)
|
||||
|
||||
request.set_output_tensors({0: tensor2})
|
||||
output_node = compiled.outputs[0]
|
||||
output_node = compiled_model.outputs[0]
|
||||
t3 = request.get_tensor(output_node)
|
||||
assert np.allclose(tensor2.data, t3.data, atol=1e-2, rtol=1e-2)
|
||||
|
||||
request.set_input_tensors({0: tensor1})
|
||||
output_node = compiled.inputs[0]
|
||||
output_node = compiled_model.inputs[0]
|
||||
t4 = request.get_tensor(output_node)
|
||||
assert np.allclose(tensor1.data, t4.data, atol=1e-2, rtol=1e-2)
|
||||
|
||||
output_node = compiled.inputs[0]
|
||||
output_node = compiled_model.inputs[0]
|
||||
request.set_tensor(output_node, tensor3)
|
||||
t5 = request.get_tensor(output_node)
|
||||
assert np.allclose(tensor3.data, t5.data, atol=1e-2, rtol=1e-2)
|
||||
@ -230,9 +230,9 @@ def test_batched_tensors(device):
|
||||
|
||||
model = Model([res1], [data1])
|
||||
|
||||
compiled = core.compile_model(model, "TEMPLATE")
|
||||
compiled_model = core.compile_model(model, "TEMPLATE")
|
||||
|
||||
req = compiled.create_infer_request()
|
||||
req = compiled_model.create_infer_request()
|
||||
|
||||
# Allocate 8 chunks, set 'user tensors' to 0, 2, 4, 6 chunks
|
||||
buffer = np.zeros([batch * 2, *batch_shape[1:]], dtype=np.float32)
|
||||
@ -269,8 +269,8 @@ def test_inputs_outputs_property(device):
|
||||
params = [ops.parameter(input_shape, np.uint8) for _ in range(num_inputs)]
|
||||
model = Model(ops.split(ops.concat(params, 0), 0, num_inputs), params)
|
||||
core = Core()
|
||||
compiled = core.compile_model(model, device)
|
||||
request = compiled.create_infer_request()
|
||||
compiled_model = core.compile_model(model, device)
|
||||
request = compiled_model.create_infer_request()
|
||||
data = [np.atleast_1d(i) for i in range(num_inputs)]
|
||||
results = request.infer(data).values()
|
||||
for result, output_tensor in zip(results, request.outputs):
|
||||
@ -282,9 +282,9 @@ def test_inputs_outputs_property(device):
|
||||
def test_cancel(device):
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
img = generate_image()
|
||||
request = compiled.create_infer_request()
|
||||
request = compiled_model.create_infer_request()
|
||||
|
||||
request.start_async({0: img})
|
||||
request.cancel()
|
||||
@ -302,12 +302,12 @@ def test_cancel(device):
|
||||
def test_start_async(device):
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
img = generate_image()
|
||||
jobs = 3
|
||||
requests = []
|
||||
for _ in range(jobs):
|
||||
requests.append(compiled.create_infer_request())
|
||||
requests.append(compiled_model.create_infer_request())
|
||||
|
||||
def callback(callbacks_info):
|
||||
time.sleep(0.01)
|
||||
@ -387,7 +387,6 @@ def test_infer_mixed_values(device, ov_type, numpy_dtype):
|
||||
|
||||
request.infer([tensor1, array1])
|
||||
|
||||
print(request.outputs[0].data)
|
||||
assert np.array_equal(request.outputs[0].data, np.concatenate((tensor1.data, array1)))
|
||||
|
||||
|
||||
@ -412,7 +411,6 @@ def test_async_mixed_values(device, ov_type, numpy_dtype):
|
||||
request.start_async([tensor1, array1])
|
||||
request.wait()
|
||||
|
||||
print(request.outputs[0].data)
|
||||
assert np.array_equal(request.outputs[0].data, np.concatenate((tensor1.data, array1)))
|
||||
|
||||
|
||||
@ -465,8 +463,8 @@ def test_infer_queue(device):
|
||||
num_request = 4
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled, num_request)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled_model, num_request)
|
||||
jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)]
|
||||
|
||||
def callback(request, job_id):
|
||||
@ -486,8 +484,8 @@ def test_infer_queue_is_ready(device):
|
||||
core = Core()
|
||||
param = ops.parameter([10])
|
||||
model = Model(ops.relu(param), [param])
|
||||
compiled = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled, 1)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled_model, 1)
|
||||
|
||||
def callback(request, _):
|
||||
time.sleep(0.001)
|
||||
@ -504,8 +502,8 @@ def test_infer_queue_fail_on_cpp_model(device):
|
||||
num_request = 4
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled, num_request)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled_model, num_request)
|
||||
|
||||
def callback(request, _):
|
||||
request.get_tensor("Unknown")
|
||||
@ -526,8 +524,8 @@ def test_infer_queue_fail_on_py_model(device):
|
||||
num_request = 1
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled, num_request)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled_model, num_request)
|
||||
|
||||
def callback(request, _):
|
||||
request = request + 21
|
||||
@ -547,8 +545,8 @@ def test_infer_queue_get_idle_handle(device):
|
||||
param = ops.parameter([10])
|
||||
model = Model(ops.relu(param), [param])
|
||||
core = Core()
|
||||
compiled = core.compile_model(model, device)
|
||||
queue = AsyncInferQueue(compiled, 2)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
queue = AsyncInferQueue(compiled_model, 2)
|
||||
niter = 10
|
||||
|
||||
for _ in range(len(queue)):
|
||||
@ -585,8 +583,8 @@ def test_query_state_write_buffer(device, input_shape, data_type, mode):
|
||||
from openvino.runtime.utils.types import get_dtype
|
||||
|
||||
model = create_model_with_memory(input_shape, data_type)
|
||||
compiled = core.compile_model(model=model, device_name=device)
|
||||
request = compiled.create_infer_request()
|
||||
compiled_model = core.compile_model(model=model, device_name=device)
|
||||
request = compiled_model.create_infer_request()
|
||||
mem_states = request.query_state()
|
||||
mem_state = mem_states[0]
|
||||
|
||||
@ -622,11 +620,11 @@ def test_get_results(device):
|
||||
core = Core()
|
||||
data = ops.parameter([10], np.float64)
|
||||
model = Model(ops.split(data, 0, 5), [data])
|
||||
compiled = core.compile_model(model, device)
|
||||
request = compiled.create_infer_request()
|
||||
inputs = [np.random.normal(size=list(compiled.input().shape))]
|
||||
compiled_model = core.compile_model(model, device)
|
||||
request = compiled_model.create_infer_request()
|
||||
inputs = [np.random.normal(size=list(compiled_model.input().shape))]
|
||||
results = request.infer(inputs)
|
||||
for output in compiled.outputs:
|
||||
for output in compiled_model.outputs:
|
||||
assert np.array_equal(results[output], request.results[output])
|
||||
|
||||
|
||||
@ -635,8 +633,8 @@ def test_results_async_infer(device):
|
||||
num_request = 4
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled, num_request)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled_model, num_request)
|
||||
jobs_done = [{"finished": False, "latency": 0} for _ in range(jobs)]
|
||||
|
||||
def callback(request, job_id):
|
||||
@ -649,7 +647,7 @@ def test_results_async_infer(device):
|
||||
infer_queue.start_async({"data": img}, i)
|
||||
infer_queue.wait_all()
|
||||
|
||||
request = compiled.create_infer_request()
|
||||
request = compiled_model.create_infer_request()
|
||||
outputs = request.infer({0: img})
|
||||
|
||||
for i in range(num_request):
|
||||
@ -732,9 +730,9 @@ def test_infer_float16(device):
|
||||
ppp.output(0).postprocess().convert_element_type(Type.f16)
|
||||
|
||||
model = ppp.build()
|
||||
compiled = core.compile_model(model, device)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
input_data = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]).astype(np.float16)
|
||||
request = compiled.create_infer_request()
|
||||
request = compiled_model.create_infer_request()
|
||||
outputs = request.infer({0: input_data, 1: input_data})
|
||||
assert np.allclose(list(outputs.values()), list(request.results.values()))
|
||||
assert np.allclose(list(outputs.values()), input_data + input_data)
|
||||
@ -747,8 +745,8 @@ def test_ports_as_inputs(device):
|
||||
model = Model(ops.add(param_a, param_b), [param_a, param_b])
|
||||
|
||||
core = Core()
|
||||
compiled = core.compile_model(model, device)
|
||||
request = compiled.create_infer_request()
|
||||
compiled_model = core.compile_model(model, device)
|
||||
request = compiled_model.create_infer_request()
|
||||
|
||||
arr_1 = np.array([[1, 2], [3, 4]], dtype=np.float32)
|
||||
arr_2 = np.array([[3, 4], [1, 2]], dtype=np.float32)
|
||||
@ -756,8 +754,8 @@ def test_ports_as_inputs(device):
|
||||
tensor1 = Tensor(arr_1)
|
||||
tensor2 = Tensor(arr_2)
|
||||
|
||||
res = request.infer({compiled.inputs[0]: tensor1, compiled.inputs[1]: tensor2})
|
||||
assert np.array_equal(res[compiled.outputs[0]], tensor1.data + tensor2.data)
|
||||
res = request.infer({compiled_model.inputs[0]: tensor1, compiled_model.inputs[1]: tensor2})
|
||||
assert np.array_equal(res[compiled_model.outputs[0]], tensor1.data + tensor2.data)
|
||||
|
||||
res = request.infer({request.model_inputs[0]: tensor1, request.model_inputs[1]: tensor2})
|
||||
assert np.array_equal(res[request.model_outputs[0]], tensor1.data + tensor2.data)
|
||||
@ -813,9 +811,9 @@ def test_infer_dynamic_model(device):
|
||||
core = Core()
|
||||
param = ops.parameter(PartialShape([-1, -1]))
|
||||
model = Model(ops.relu(param), [param])
|
||||
compiled = core.compile_model(model, device)
|
||||
assert compiled.input().partial_shape.is_dynamic
|
||||
request = compiled.create_infer_request()
|
||||
compiled_model = core.compile_model(model, device)
|
||||
assert compiled_model.input().partial_shape.is_dynamic
|
||||
request = compiled_model.create_infer_request()
|
||||
|
||||
shape1 = [1, 28]
|
||||
request.infer([np.random.normal(size=shape1)])
|
@ -17,18 +17,18 @@ test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
|
||||
def test_input_type(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
assert isinstance(input_node, Input)
|
||||
|
||||
|
||||
def test_const_output_docs(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
exptected_string = "openvino.runtime.Input wraps ov::Input<Node>"
|
||||
assert input_node.__doc__ == exptected_string
|
||||
@ -36,36 +36,36 @@ def test_const_output_docs(device):
|
||||
|
||||
def test_input_get_index(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
assert input_node.get_index() == 0
|
||||
|
||||
|
||||
def test_input_element_type(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
assert input_node.get_element_type() == Type.f32
|
||||
|
||||
|
||||
def test_input_get_shape(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
assert str(input_node.get_shape()) == str(Shape([1, 10]))
|
||||
|
||||
|
||||
def test_input_get_partial_shape(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
expected_partial_shape = PartialShape([1, 10])
|
||||
assert input_node.get_partial_shape() == expected_partial_shape
|
||||
@ -73,9 +73,9 @@ def test_input_get_partial_shape(device):
|
||||
|
||||
def test_input_get_source_output(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
name = input_node.get_source_output().get_node().get_friendly_name()
|
||||
assert name == "fc_out"
|
||||
@ -83,9 +83,9 @@ def test_input_get_source_output(device):
|
||||
|
||||
def test_input_get_tensor(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
tensor = input_node.get_tensor()
|
||||
assert isinstance(tensor, DescriptorTensor)
|
||||
@ -93,9 +93,9 @@ def test_input_get_tensor(device):
|
||||
|
||||
def test_input_get_rt_info(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
rt_info = input_node.get_rt_info()
|
||||
assert isinstance(rt_info, RTMap)
|
||||
@ -103,9 +103,9 @@ def test_input_get_rt_info(device):
|
||||
|
||||
def test_input_rt_info(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
rt_info = input_node.rt_info
|
||||
assert isinstance(rt_info, RTMap)
|
||||
@ -127,9 +127,9 @@ def test_input_replace_source_output(device):
|
||||
|
||||
def test_input_update_rt_info(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
net_input = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
net_input = compiled_model.output(0)
|
||||
input_node = net_input.get_node().inputs()[0]
|
||||
rt = input_node.get_rt_info()
|
||||
rt["test12345"] = "test"
|
@ -13,7 +13,7 @@ from openvino.runtime import Core, Model, AsyncInferQueue, PartialShape, Layout,
|
||||
from openvino.preprocess import PrePostProcessor
|
||||
|
||||
|
||||
# check if func releases the GIL and doens't increment reference counters of args while GIL is released
|
||||
# check if func releases the GIL and doesn't increment reference counters of args while GIL is released
|
||||
def check_gil_released_safe(func, args=[]): # noqa: B006
|
||||
global gil_released
|
||||
gil_released = False
|
||||
@ -38,8 +38,8 @@ core = Core()
|
||||
core.set_property({"PERF_COUNT": "YES"})
|
||||
param = ops.parameter([224, 224])
|
||||
model = Model(ops.relu(param), [param])
|
||||
compiled = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled, 1)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
infer_queue = AsyncInferQueue(compiled_model, 1)
|
||||
user_stream = io.BytesIO()
|
||||
|
||||
|
||||
@ -68,23 +68,23 @@ def test_gil_released_async_infer_queue_get_idle_request_id():
|
||||
# CompiledModel
|
||||
|
||||
def test_gil_released_create_infer_request():
|
||||
check_gil_released_safe(compiled.create_infer_request)
|
||||
check_gil_released_safe(compiled_model.create_infer_request)
|
||||
|
||||
|
||||
def test_gil_released_infer_new_request():
|
||||
check_gil_released_safe(compiled)
|
||||
check_gil_released_safe(compiled_model)
|
||||
|
||||
|
||||
def test_gil_released_export():
|
||||
check_gil_released_safe(compiled.export_model)
|
||||
check_gil_released_safe(compiled_model.export_model)
|
||||
|
||||
|
||||
def test_gil_released_export_advanced():
|
||||
check_gil_released_safe(compiled.export_model, [user_stream])
|
||||
check_gil_released_safe(compiled_model.export_model, [user_stream])
|
||||
|
||||
|
||||
def test_gil_released_get_runtime_model():
|
||||
check_gil_released_safe(compiled.get_runtime_model)
|
||||
check_gil_released_safe(compiled_model.get_runtime_model)
|
||||
|
||||
|
||||
# Core
|
||||
@ -94,7 +94,7 @@ def test_compile_model(device):
|
||||
|
||||
|
||||
def test_read_model_from_bytes():
|
||||
ir = bytes(b"""<net name="relu_model" version="11">
|
||||
bytes_model = bytes(b"""<net name="relu_model" version="11">
|
||||
<layers>
|
||||
<layer id="0" name="x" type="Parameter" version="opset1">
|
||||
<data element_type="f32" shape="10"/>
|
||||
@ -129,7 +129,7 @@ def test_read_model_from_bytes():
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="0"/>
|
||||
</edges>
|
||||
</net>""")
|
||||
check_gil_released_safe(core.read_model, [ir])
|
||||
check_gil_released_safe(core.read_model, [bytes_model])
|
||||
|
||||
|
||||
def test_read_model_from_path():
|
||||
@ -156,28 +156,28 @@ def test_get_available_devices(device):
|
||||
|
||||
# InferRequest
|
||||
|
||||
request = compiled.create_infer_request()
|
||||
request = compiled_model.create_infer_request()
|
||||
|
||||
|
||||
def test_infer():
|
||||
data = [np.random.normal(size=list(compiled.input().shape))]
|
||||
data = [np.random.normal(size=list(compiled_model.input().shape))]
|
||||
check_gil_released_safe(request.infer, [data])
|
||||
|
||||
|
||||
def test_start_async():
|
||||
data = [np.random.normal(size=list(compiled.input().shape))]
|
||||
data = [np.random.normal(size=list(compiled_model.input().shape))]
|
||||
check_gil_released_safe(request.start_async, [data])
|
||||
request.wait()
|
||||
|
||||
|
||||
def test_wait():
|
||||
data = [np.random.normal(size=list(compiled.input().shape))]
|
||||
data = [np.random.normal(size=list(compiled_model.input().shape))]
|
||||
request.start_async(data)
|
||||
check_gil_released_safe(request.wait)
|
||||
|
||||
|
||||
def test_wait_for():
|
||||
data = [np.random.normal(size=list(compiled.input().shape))]
|
||||
data = [np.random.normal(size=list(compiled_model.input().shape))]
|
||||
request.start_async(data)
|
||||
check_gil_released_safe(request.wait_for, [1])
|
||||
request.wait()
|
@ -24,44 +24,44 @@ test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
|
||||
def test_const_output_type(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
node = exec_net.input(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
node = compiled_model.input(0)
|
||||
assert isinstance(node, ConstOutput)
|
||||
|
||||
|
||||
def test_const_output_docs(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
node = exec_net.input(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
node = compiled_model.input(0)
|
||||
exptected_string = "openvino.runtime.ConstOutput represents port/node output."
|
||||
assert node.__doc__ == exptected_string
|
||||
|
||||
|
||||
def test_const_output_get_index(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
node = exec_net.input("data")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
node = compiled_model.input("data")
|
||||
assert node.get_index() == 0
|
||||
assert node.index == 0
|
||||
|
||||
|
||||
def test_const_output_get_element_type(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
node = exec_net.input("data")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
node = compiled_model.input("data")
|
||||
assert node.get_element_type() == Type.f32
|
||||
assert node.element_type == Type.f32
|
||||
|
||||
|
||||
def test_const_output_get_shape(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
node = exec_net.input("data")
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
node = compiled_model.input("data")
|
||||
expected_shape = Shape([1, 3, 32, 32])
|
||||
assert str(node.get_shape()) == str(expected_shape)
|
||||
assert str(node.shape) == str(expected_shape)
|
||||
@ -69,8 +69,8 @@ def test_const_output_get_shape(device):
|
||||
|
||||
def test_const_output_get_partial_shape(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(model, device)
|
||||
node = exec_net.input("data")
|
||||
expected_partial_shape = PartialShape([1, 3, 32, 32])
|
||||
assert node.get_partial_shape() == expected_partial_shape
|
||||
@ -79,9 +79,9 @@ def test_const_output_get_partial_shape(device):
|
||||
|
||||
def test_const_output_get_target_inputs(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
outputs = exec_net.outputs
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
outputs = compiled_model.outputs
|
||||
for node in outputs:
|
||||
assert isinstance(node.get_target_inputs(), set)
|
||||
assert isinstance(node.target_inputs, set)
|
||||
@ -89,10 +89,10 @@ def test_const_output_get_target_inputs(device):
|
||||
|
||||
def test_const_output_get_names(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
input_name = "data"
|
||||
node = exec_net.input(input_name)
|
||||
node = compiled_model.input(input_name)
|
||||
expected_names = set()
|
||||
expected_names.add(input_name)
|
||||
assert node.get_names() == expected_names
|
||||
@ -103,19 +103,19 @@ def test_const_output_get_names(device):
|
||||
|
||||
def test_const_get_rf_info(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
output_node = exec_net.output(0)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
output_node = compiled_model.output(0)
|
||||
rt_info = output_node.get_rt_info()
|
||||
assert isinstance(rt_info, RTMap)
|
||||
|
||||
|
||||
def test_const_output_runtime_info(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = core.compile_model(func, device)
|
||||
model = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
compiled_model = core.compile_model(model, device)
|
||||
input_name = "data"
|
||||
output_node = exec_net.input(input_name)
|
||||
output_node = compiled_model.input(input_name)
|
||||
rt_info = output_node.rt_info
|
||||
assert isinstance(rt_info, RTMap)
|
||||
|
@ -4,11 +4,11 @@
|
||||
from openvino.runtime import opset8
|
||||
from openvino.runtime.passes import Manager, GraphRewrite, MatcherPass, WrapType, Matcher
|
||||
|
||||
from utils.utils import count_ops, get_test_function, PatternReplacement
|
||||
from utils.utils import count_ops, get_test_model, PatternReplacement
|
||||
|
||||
|
||||
def test_graph_rewrite():
|
||||
model = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
manager = Manager()
|
||||
# check that register pass returns pass instance
|
||||
@ -68,7 +68,7 @@ def test_register_new_node():
|
||||
manager = Manager()
|
||||
ins = manager.register_pass(InsertExp())
|
||||
rem = manager.register_pass(RemoveExp())
|
||||
manager.run_passes(get_test_function())
|
||||
manager.run_passes(get_test_model())
|
||||
|
||||
assert ins.model_changed
|
||||
assert rem.model_changed
|
||||
|
@ -5,7 +5,7 @@ from openvino.runtime import opset8
|
||||
from openvino.runtime.passes import Manager, Matcher, MatcherPass, WrapType
|
||||
from openvino.runtime.utils import replace_node
|
||||
|
||||
from utils.utils import count_ops, get_test_function, PatternReplacement
|
||||
from utils.utils import count_ops, get_test_model, PatternReplacement
|
||||
|
||||
|
||||
def test_simple_pattern_replacement():
|
||||
@ -27,7 +27,7 @@ def test_simple_pattern_replacement():
|
||||
|
||||
return Matcher(relu, "SimpleReplacement"), callback
|
||||
|
||||
model = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
manager = Manager()
|
||||
manager.register_pass(MatcherPass(*pattern_replacement()))
|
||||
@ -37,7 +37,7 @@ def test_simple_pattern_replacement():
|
||||
|
||||
|
||||
def test_matcher_pass():
|
||||
model = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
manager = Manager()
|
||||
# check that register pass returns pass instance
|
||||
@ -49,7 +49,7 @@ def test_matcher_pass():
|
||||
|
||||
|
||||
def test_matcher_pass_apply():
|
||||
model = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
pattern_replacement = PatternReplacement()
|
||||
pattern_replacement.apply(model.get_result().input_value(0).get_node())
|
||||
|
@ -3,12 +3,12 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
from openvino.runtime.passes import Manager
|
||||
|
||||
from utils.utils import get_test_function, MyModelPass
|
||||
from utils.utils import get_test_model, MyModelPass
|
||||
|
||||
|
||||
def test_model_pass():
|
||||
manager = Manager()
|
||||
model_pass = manager.register_pass(MyModelPass())
|
||||
manager.run_passes(get_test_function())
|
||||
manager.run_passes(get_test_model())
|
||||
|
||||
assert model_pass.model_changed
|
||||
|
@ -18,7 +18,7 @@ from openvino.runtime import Model, PartialShape, Core
|
||||
import openvino.runtime as ov
|
||||
|
||||
|
||||
def get_test_function():
|
||||
def get_test_model():
|
||||
param = ov.opset8.parameter(PartialShape([1, 3, 22, 22]), name="parameter")
|
||||
param.get_output_tensor(0).set_names({"parameter"})
|
||||
relu = ov.opset8.relu(param)
|
||||
@ -28,55 +28,55 @@ def get_test_function():
|
||||
|
||||
|
||||
def test_moc_transformations():
|
||||
function = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
apply_moc_transformations(function, False)
|
||||
apply_moc_transformations(model, False)
|
||||
|
||||
assert function is not None
|
||||
assert len(function.get_ops()) == 3
|
||||
assert model is not None
|
||||
assert len(model.get_ops()) == 3
|
||||
|
||||
|
||||
def test_pot_transformations():
|
||||
function = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
apply_pot_transformations(function, "GNA")
|
||||
apply_pot_transformations(model, "GNA")
|
||||
|
||||
assert function is not None
|
||||
assert len(function.get_ops()) == 3
|
||||
assert model is not None
|
||||
assert len(model.get_ops()) == 3
|
||||
|
||||
|
||||
def test_low_latency_transformation():
|
||||
function = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
apply_low_latency_transformation(function, True)
|
||||
apply_low_latency_transformation(model, True)
|
||||
|
||||
assert function is not None
|
||||
assert len(function.get_ops()) == 3
|
||||
assert model is not None
|
||||
assert len(model.get_ops()) == 3
|
||||
|
||||
|
||||
def test_pruning_transformation():
|
||||
function = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
apply_pruning_transformation(function)
|
||||
apply_pruning_transformation(model)
|
||||
|
||||
assert function is not None
|
||||
assert len(function.get_ops()) == 3
|
||||
assert model is not None
|
||||
assert len(model.get_ops()) == 3
|
||||
|
||||
|
||||
def test_make_stateful_transformations():
|
||||
function = get_test_function()
|
||||
model = get_test_model()
|
||||
|
||||
apply_make_stateful_transformation(function, {"parameter": "result"})
|
||||
apply_make_stateful_transformation(model, {"parameter": "result"})
|
||||
|
||||
assert function is not None
|
||||
assert len(function.get_parameters()) == 0
|
||||
assert len(function.get_results()) == 0
|
||||
assert model is not None
|
||||
assert len(model.get_parameters()) == 0
|
||||
assert len(model.get_results()) == 0
|
||||
|
||||
|
||||
def test_serialize_pass_v2():
|
||||
core = Core()
|
||||
xml_path = "./serialized_function.xml"
|
||||
bin_path = "./serialized_function.bin"
|
||||
xml_path = "./serialized_model.xml"
|
||||
bin_path = "./serialized_model.bin"
|
||||
shape = [100, 100, 2]
|
||||
parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B")
|
||||
@ -87,10 +87,10 @@ def test_serialize_pass_v2():
|
||||
|
||||
assert func is not None
|
||||
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -99,20 +99,20 @@ def test_serialize_pass_v2():
|
||||
def test_compress_model_transformation():
|
||||
node_constant = ov.opset8.constant(np.array([[0.0, 0.1, -0.1], [-2.5, 2.5, 3.0]], dtype=np.float32))
|
||||
node_ceil = ov.opset8.ceiling(node_constant)
|
||||
func = Model(node_ceil, [], "TestFunction")
|
||||
elem_type = func.get_ordered_ops()[0].get_element_type().get_type_name()
|
||||
model = Model(node_ceil, [], "TestModel")
|
||||
elem_type = model.get_ordered_ops()[0].get_element_type().get_type_name()
|
||||
assert elem_type == "f32"
|
||||
compress_model_transformation(func)
|
||||
compress_model_transformation(model)
|
||||
|
||||
assert func is not None
|
||||
elem_type = func.get_ordered_ops()[0].get_element_type().get_type_name()
|
||||
assert model is not None
|
||||
elem_type = model.get_ordered_ops()[0].get_element_type().get_type_name()
|
||||
assert elem_type == "f16"
|
||||
|
||||
|
||||
def test_version_default():
|
||||
core = Core()
|
||||
xml_path = "./serialized_function.xml"
|
||||
bin_path = "./serialized_function.bin"
|
||||
xml_path = "./serialized_model.xml"
|
||||
bin_path = "./serialized_model.bin"
|
||||
shape = [100, 100, 2]
|
||||
parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B")
|
||||
@ -120,19 +120,19 @@ def test_version_default():
|
||||
func = Model(model, [parameter_a, parameter_b], "Model")
|
||||
|
||||
serialize(func, xml_path, bin_path)
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
|
||||
|
||||
def test_serialize_default_bin():
|
||||
xml_path = "./serialized_function.xml"
|
||||
bin_path = "./serialized_function.bin"
|
||||
model = get_test_function()
|
||||
xml_path = "./serialized_model.xml"
|
||||
bin_path = "./serialized_model.bin"
|
||||
model = get_test_model()
|
||||
serialize(model, xml_path)
|
||||
assert os.path.exists(bin_path)
|
||||
os.remove(xml_path)
|
||||
@ -141,8 +141,8 @@ def test_serialize_default_bin():
|
||||
|
||||
def test_version_ir_v10():
|
||||
core = Core()
|
||||
xml_path = "./serialized_function.xml"
|
||||
bin_path = "./serialized_function.bin"
|
||||
xml_path = "./serialized_model.xml"
|
||||
bin_path = "./serialized_model.bin"
|
||||
shape = [100, 100, 2]
|
||||
parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B")
|
||||
@ -150,10 +150,10 @@ def test_version_ir_v10():
|
||||
func = Model(model, [parameter_a, parameter_b], "Model")
|
||||
|
||||
serialize(func, xml_path, bin_path, "IR_V10")
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
@ -161,8 +161,8 @@ def test_version_ir_v10():
|
||||
|
||||
def test_version_ir_v11():
|
||||
core = Core()
|
||||
xml_path = "./serialized_function.xml"
|
||||
bin_path = "./serialized_function.bin"
|
||||
xml_path = "./serialized_model.xml"
|
||||
bin_path = "./serialized_model.bin"
|
||||
shape = [100, 100, 2]
|
||||
parameter_a = ov.opset8.parameter(shape, dtype=np.float32, name="A")
|
||||
parameter_b = ov.opset8.parameter(shape, dtype=np.float32, name="B")
|
||||
@ -170,10 +170,10 @@ def test_version_ir_v11():
|
||||
func = Model(model, [parameter_a, parameter_b], "Model")
|
||||
|
||||
serialize(func, xml_path, bin_path, "IR_V11")
|
||||
res_func = core.read_model(model=xml_path, weights=bin_path)
|
||||
res_model = core.read_model(model=xml_path, weights=bin_path)
|
||||
|
||||
assert func.get_parameters() == res_func.get_parameters()
|
||||
assert func.get_ordered_ops() == res_func.get_ordered_ops()
|
||||
assert func.get_parameters() == res_model.get_parameters()
|
||||
assert func.get_ordered_ops() == res_model.get_ordered_ops()
|
||||
|
||||
os.remove(xml_path)
|
||||
os.remove(bin_path)
|
||||
|
@ -15,7 +15,7 @@ from openvino.runtime.passes import (
|
||||
LowLatency2,
|
||||
Serialize,
|
||||
)
|
||||
from utils.utils import count_ops, get_test_function
|
||||
from utils.utils import count_ops, get_test_model
|
||||
|
||||
|
||||
def get_model():
|
||||
@ -107,7 +107,7 @@ def test_serialize_pass():
|
||||
xml_path = "serialized_function.xml"
|
||||
bin_path = "serialized_function.bin"
|
||||
|
||||
func = get_test_function()
|
||||
func = get_test_model()
|
||||
|
||||
manager = Manager()
|
||||
manager.register_pass(Serialize(xml_path, bin_path))
|
||||
|
@ -6,7 +6,7 @@ from openvino.runtime import Model, PartialShape, opset8
|
||||
from openvino.runtime.utils import replace_node, replace_output_update_name
|
||||
|
||||
|
||||
def get_test_function():
|
||||
def get_test_model():
|
||||
# Parameter->Relu->Result
|
||||
param = opset8.parameter(PartialShape([1, 3, 22, 22]), name="parameter")
|
||||
relu = opset8.relu(param.output(0))
|
||||
|
@ -6,7 +6,7 @@ from openvino.runtime import Model, PartialShape, opset8
|
||||
from openvino.runtime.passes import ModelPass, Matcher, MatcherPass, WrapType
|
||||
|
||||
|
||||
def get_test_function():
|
||||
def get_test_model():
|
||||
# Parameter->Relu->Result
|
||||
param = opset8.parameter(PartialShape([1, 3, 22, 22]), name="parameter")
|
||||
relu = opset8.relu(param.output(0))
|
||||
|
@ -10,23 +10,23 @@ from typing import Tuple, Union, List
|
||||
import numpy as np
|
||||
|
||||
|
||||
def get_test_function():
|
||||
def get_test_model():
|
||||
element_type = Type.f32
|
||||
param = Parameter(element_type, Shape([1, 3, 22, 22]))
|
||||
relu = ops.relu(param)
|
||||
func = Model([relu], [param], "test")
|
||||
assert func is not None
|
||||
return func
|
||||
model = Model([relu], [param], "test")
|
||||
assert model is not None
|
||||
return model
|
||||
|
||||
|
||||
def test_compare_functions():
|
||||
def test_compare_models():
|
||||
try:
|
||||
from openvino.test_utils import compare_functions
|
||||
func = get_test_function()
|
||||
status, _ = compare_functions(func, func)
|
||||
from openvino.test_utils import compare_models
|
||||
model = get_test_model()
|
||||
status, _ = compare_models(model, model)
|
||||
assert status
|
||||
except RuntimeError:
|
||||
print("openvino.test_utils.compare_functions is not available")
|
||||
print("openvino.test_utils.compare_models is not available")
|
||||
|
||||
|
||||
def generate_image(shape: Tuple = (1, 3, 32, 32), dtype: Union[str, np.dtype] = "float32") -> np.array:
|
||||
@ -37,11 +37,11 @@ def generate_image(shape: Tuple = (1, 3, 32, 32), dtype: Union[str, np.dtype] =
|
||||
def generate_relu_model(input_shape: List[int]) -> openvino.runtime.ie_api.CompiledModel:
|
||||
param = ops.parameter(input_shape, np.float32, name="parameter")
|
||||
relu = ops.relu(param, name="relu")
|
||||
func = Model([relu], [param], "test")
|
||||
func.get_ordered_ops()[2].friendly_name = "friendly"
|
||||
model = Model([relu], [param], "test")
|
||||
model.get_ordered_ops()[2].friendly_name = "friendly"
|
||||
|
||||
core = Core()
|
||||
return core.compile_model(func, "CPU", {})
|
||||
return core.compile_model(model, "CPU", {})
|
||||
|
||||
|
||||
def generate_add_model() -> openvino.pyopenvino.Model:
|
||||
|
@ -13,7 +13,7 @@ R_TOLERANCE = 1e-6 # global relative tolerance
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ng_api_fn, numpy_fn, range_start, range_end",
|
||||
"graph_api_fn, numpy_fn, range_start, range_end",
|
||||
[
|
||||
(ng.absolute, np.abs, -1, 1),
|
||||
(ng.abs, np.abs, -1, 1),
|
||||
@ -39,17 +39,17 @@ R_TOLERANCE = 1e-6 # global relative tolerance
|
||||
(ng.tanh, np.tanh, -100.0, 100.0),
|
||||
],
|
||||
)
|
||||
def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
|
||||
def test_unary_op_array(graph_api_fn, numpy_fn, range_start, range_end):
|
||||
np.random.seed(133391)
|
||||
input_data = (range_start + np.random.rand(2, 3, 4) * (range_end - range_start)).astype(np.float32)
|
||||
expected = numpy_fn(input_data)
|
||||
|
||||
result = run_op_node([input_data], ng_api_fn)
|
||||
result = run_op_node([input_data], graph_api_fn)
|
||||
assert np.allclose(result, expected, rtol=0.001)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ng_api_fn, numpy_fn, input_data",
|
||||
"graph_api_fn, numpy_fn, input_data",
|
||||
[
|
||||
pytest.param(ng.absolute, np.abs, np.float32(-3)),
|
||||
pytest.param(ng.abs, np.abs, np.float32(-3)),
|
||||
@ -72,10 +72,10 @@ def test_unary_op_array(ng_api_fn, numpy_fn, range_start, range_end):
|
||||
pytest.param(ng.tanh, np.tanh, np.float32(0.1234)),
|
||||
],
|
||||
)
|
||||
def test_unary_op_scalar(ng_api_fn, numpy_fn, input_data):
|
||||
def test_unary_op_scalar(graph_api_fn, numpy_fn, input_data):
|
||||
expected = numpy_fn(input_data)
|
||||
|
||||
result = run_op_node([input_data], ng_api_fn)
|
||||
result = run_op_node([input_data], graph_api_fn)
|
||||
assert np.allclose(result, expected)
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user