[PYTHON] Create graph and generate image in tests (#11569)
* Change read_image() into generate_image() * Move test utils from testdata repo to local files * Minor changes * Remove unnecessary code * Minor changes * Fix compatibility tests * Fix imports for Azure pipeline * Move model generation into test_utils * Minor changes * Minor changes * Update linux.yml CI * Remove testdata repo from .ci/linux.yml * Remove testdata repo from pipelines * Fix Azure compatibility tests * Reset linux.yml * Remove testdata repo from linux CI * Try eliminating one of configs * Attempt at fixing Azure tests * Add separate utils for compatibility * xfail comp if op tests * Minor changes * Revert changes to .ci files * minor changes * Remove xfails * Remove unecessary import * Skip if op tests Co-authored-by: Michal Lukaszewski <michal.lukaszewski@intel.com>
This commit is contained in:
parent
7114863cbc
commit
09a0fb7890
@ -4,7 +4,6 @@
|
||||
|
||||
import os
|
||||
import pytest
|
||||
import numpy as np
|
||||
|
||||
import tests
|
||||
|
||||
@ -13,25 +12,6 @@ from sys import platform
|
||||
from openvino.runtime import Core
|
||||
|
||||
|
||||
def image_path():
|
||||
path_to_repo = os.environ["DATA_PATH"]
|
||||
path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp")
|
||||
return path_to_img
|
||||
|
||||
|
||||
def read_image():
|
||||
import cv2
|
||||
n, c, h, w = (1, 3, 32, 32)
|
||||
image = cv2.imread(image_path())
|
||||
if image is None:
|
||||
raise FileNotFoundError("Input image not found")
|
||||
|
||||
image = cv2.resize(image, (h, w)) / 255
|
||||
image = image.transpose((2, 0, 1)).astype(np.float32)
|
||||
image = image.reshape((n, c, h, w))
|
||||
return image
|
||||
|
||||
|
||||
def get_model_with_template_extension():
|
||||
core = Core()
|
||||
ir = bytes(b"""<net name="Activation" version="10">
|
||||
@ -89,27 +69,27 @@ def get_model_with_template_extension():
|
||||
|
||||
|
||||
def model_path(is_myriad=False):
|
||||
path_to_repo = os.environ["MODELS_PATH"]
|
||||
if not is_myriad:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
|
||||
base_path = os.path.dirname(__file__)
|
||||
if is_myriad:
|
||||
test_xml = os.path.join(base_path, "test_utils", "utils", "test_model_fp16.xml")
|
||||
test_bin = os.path.join(base_path, "test_utils", "utils", "test_model_fp16.bin")
|
||||
else:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
|
||||
test_xml = os.path.join(base_path, "test_utils", "utils", "test_model_fp32.xml")
|
||||
test_bin = os.path.join(base_path, "test_utils", "utils", "test_model_fp32.bin")
|
||||
return (test_xml, test_bin)
|
||||
|
||||
|
||||
def model_onnx_path():
|
||||
path_to_repo = os.environ["MODELS_PATH"]
|
||||
test_onnx = os.path.join(path_to_repo, "models", "test_model", "test_model.onnx")
|
||||
base_path = os.path.dirname(__file__)
|
||||
test_onnx = os.path.join(base_path, "test_utils", "utils", "test_model.onnx")
|
||||
return test_onnx
|
||||
|
||||
|
||||
def plugins_path():
|
||||
path_to_repo = os.environ["DATA_PATH"]
|
||||
plugins_xml = os.path.join(path_to_repo, "ie_class", "plugins.xml")
|
||||
plugins_win_xml = os.path.join(path_to_repo, "ie_class", "plugins_win.xml")
|
||||
plugins_osx_xml = os.path.join(path_to_repo, "ie_class", "plugins_apple.xml")
|
||||
base_path = os.path.dirname(__file__)
|
||||
plugins_xml = os.path.join(base_path, "test_utils", "utils", "plugins.xml")
|
||||
plugins_win_xml = os.path.join(base_path, "test_utils", "utils", "plugins_win.xml")
|
||||
plugins_osx_xml = os.path.join(base_path, "test_utils", "utils", "plugins_apple.xml")
|
||||
return (plugins_xml, plugins_win_xml, plugins_osx_xml)
|
||||
|
||||
|
||||
|
@ -6,7 +6,9 @@ import os
|
||||
import pytest
|
||||
import numpy as np
|
||||
|
||||
from ..conftest import model_path, read_image, get_model_with_template_extension
|
||||
# TODO: refactor into absolute paths
|
||||
from ..conftest import model_path, get_model_with_template_extension
|
||||
from ..test_utils.test_utils import generate_image
|
||||
from openvino.runtime import Model, ConstOutput, Shape
|
||||
|
||||
from openvino.runtime import Core, Tensor
|
||||
@ -51,10 +53,10 @@ def test_export_import():
|
||||
|
||||
new_compiled = core.import_model(user_stream, "CPU")
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res = new_compiled.infer_new_request({"data": img})
|
||||
|
||||
assert np.argmax(res[new_compiled.outputs[0]]) == 2
|
||||
assert np.argmax(res[new_compiled.outputs[0]]) == 9
|
||||
|
||||
|
||||
def test_export_import_advanced():
|
||||
@ -70,10 +72,10 @@ def test_export_import_advanced():
|
||||
|
||||
new_compiled = core.import_model(user_stream, "CPU")
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res = new_compiled.infer_new_request({"data": img})
|
||||
|
||||
assert np.argmax(res[new_compiled.outputs[0]]) == 2
|
||||
assert np.argmax(res[new_compiled.outputs[0]]) == 9
|
||||
|
||||
|
||||
def test_get_input_i(device):
|
||||
@ -246,41 +248,41 @@ def test_inputs_docs(device):
|
||||
def test_infer_new_request_numpy(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
exec_net = ie.compile_model(func, device)
|
||||
res = exec_net.infer_new_request({"data": img})
|
||||
assert np.argmax(res[list(res)[0]]) == 2
|
||||
assert np.argmax(res[list(res)[0]]) == 9
|
||||
|
||||
|
||||
def test_infer_new_request_tensor_numpy_copy(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
res_tensor = exec_net.infer_new_request({"data": tensor})
|
||||
res_img = exec_net.infer_new_request({"data": tensor})
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == 2
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == 9
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax(res_img[list(res_img)[0]])
|
||||
|
||||
|
||||
def test_infer_tensor_numpy_shared_memory(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
img = np.ascontiguousarray(img)
|
||||
tensor = Tensor(img, shared_memory=True)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
res_tensor = exec_net.infer_new_request({"data": tensor})
|
||||
res_img = exec_net.infer_new_request({"data": tensor})
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == 2
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == 9
|
||||
assert np.argmax(res_tensor[list(res_tensor)[0]]) == np.argmax(res_img[list(res_img)[0]])
|
||||
|
||||
|
||||
def test_infer_new_request_wrong_port_name(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
@ -291,7 +293,7 @@ def test_infer_new_request_wrong_port_name(device):
|
||||
def test_infer_tensor_wrong_input_data(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
img = np.ascontiguousarray(img)
|
||||
tensor = Tensor(img, shared_memory=True)
|
||||
exec_net = ie.compile_model(func, device)
|
||||
@ -307,10 +309,10 @@ def test_infer_numpy_model_from_buffer(device):
|
||||
with open(test_net_xml, "rb") as f:
|
||||
xml = f.read()
|
||||
func = core.read_model(model=xml, weights=weights)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
exec_net = core.compile_model(func, device)
|
||||
res = exec_net.infer_new_request({"data": img})
|
||||
assert np.argmax(res[list(res)[0]]) == 2
|
||||
assert np.argmax(res[list(res)[0]]) == 9
|
||||
|
||||
|
||||
def test_infer_tensor_model_from_buffer(device):
|
||||
@ -320,11 +322,11 @@ def test_infer_tensor_model_from_buffer(device):
|
||||
with open(test_net_xml, "rb") as f:
|
||||
xml = f.read()
|
||||
func = core.read_model(model=xml, weights=weights)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
exec_net = core.compile_model(func, device)
|
||||
res = exec_net.infer_new_request({"data": tensor})
|
||||
assert np.argmax(res[list(res)[0]]) == 2
|
||||
assert np.argmax(res[list(res)[0]]) == 9
|
||||
|
||||
|
||||
def test_direct_infer(device):
|
||||
@ -334,11 +336,11 @@ def test_direct_infer(device):
|
||||
with open(test_net_xml, "rb") as f:
|
||||
xml = f.read()
|
||||
model = core.read_model(model=xml, weights=weights)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
comp_model = core.compile_model(model, device)
|
||||
res = comp_model({"data": tensor})
|
||||
assert np.argmax(res[comp_model.outputs[0]]) == 2
|
||||
assert np.argmax(res[comp_model.outputs[0]]) == 9
|
||||
ref = comp_model.infer_new_request({"data": tensor})
|
||||
assert np.array_equal(ref[comp_model.outputs[0]], res[comp_model.outputs[0]])
|
||||
|
||||
|
@ -24,43 +24,41 @@ from ..conftest import (
|
||||
model_path,
|
||||
model_onnx_path,
|
||||
plugins_path,
|
||||
read_image,
|
||||
get_model_with_template_extension,
|
||||
)
|
||||
|
||||
from ..test_utils.test_utils import (
|
||||
generate_image,
|
||||
generate_relu_model,
|
||||
)
|
||||
|
||||
|
||||
plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path()
|
||||
test_net_xml, test_net_bin = model_path()
|
||||
test_net_onnx = model_onnx_path()
|
||||
plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path()
|
||||
|
||||
|
||||
def test_compact_api_xml():
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
|
||||
model = compile_model(test_net_xml)
|
||||
assert isinstance(model, CompiledModel)
|
||||
results = model.infer_new_request({"data": img})
|
||||
assert np.argmax(results[list(results)[0]]) == 2
|
||||
assert np.argmax(results[list(results)[0]]) == 9
|
||||
|
||||
|
||||
def test_compact_api_onnx():
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
|
||||
model = compile_model(test_net_onnx)
|
||||
assert isinstance(model, CompiledModel)
|
||||
results = model.infer_new_request({"data": img})
|
||||
assert np.argmax(results[list(results)[0]]) == 2
|
||||
assert np.argmax(results[list(results)[0]]) == 9
|
||||
|
||||
|
||||
def test_core_class():
|
||||
input_shape = [1, 3, 4, 4]
|
||||
param = ov.parameter(input_shape, np.float32, name="parameter")
|
||||
relu = ov.relu(param, name="relu")
|
||||
func = Model([relu], [param], "test")
|
||||
func.get_ordered_ops()[2].friendly_name = "friendly"
|
||||
|
||||
core = Core()
|
||||
model = core.compile_model(func, "CPU", {})
|
||||
model = generate_relu_model(input_shape)
|
||||
|
||||
request = model.create_infer_request()
|
||||
input_data = np.random.rand(*input_shape).astype(np.float32) - 0.5
|
||||
@ -390,7 +388,7 @@ def test_read_model_from_buffer_no_weights(device):
|
||||
def test_infer_new_request_return_type(device):
|
||||
ie = Core()
|
||||
func = ie.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
exec_net = ie.compile_model(func, device)
|
||||
res = exec_net.infer_new_request({"data": img})
|
||||
arr = res[list(res)[0]][0]
|
||||
|
@ -20,12 +20,7 @@ from openvino.runtime import (
|
||||
get_batch,
|
||||
)
|
||||
|
||||
|
||||
def create_test_model():
|
||||
param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
|
||||
param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
|
||||
add = ops.add(param1, param2)
|
||||
return Model(add, [param1, param2], "TestFunction")
|
||||
from ..test_utils.test_utils import generate_add_model # TODO: reformat into an absolute path
|
||||
|
||||
|
||||
def test_test_descriptor_tensor():
|
||||
@ -196,7 +191,7 @@ def test_add_outputs_incorrect_outputs_list():
|
||||
|
||||
|
||||
def test_validate_nodes_and_infer_types():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
invalid_shape = Shape([3, 7])
|
||||
param3 = ops.parameter(invalid_shape, dtype=np.float32, name="data3")
|
||||
model.replace_parameter(0, param3)
|
||||
@ -262,7 +257,7 @@ def test_replace_parameter():
|
||||
|
||||
|
||||
def test_evaluate():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
input1 = np.array([2, 1], dtype=np.float32).reshape(2, 1)
|
||||
input2 = np.array([3, 7], dtype=np.float32).reshape(2, 1)
|
||||
out_tensor = Tensor("float32", Shape([2, 1]))
|
||||
@ -272,7 +267,7 @@ def test_evaluate():
|
||||
|
||||
|
||||
def test_evaluate_invalid_input_shape():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
assert model.evaluate(
|
||||
[Tensor("float32", Shape([2, 1]))],
|
||||
@ -282,7 +277,7 @@ def test_evaluate_invalid_input_shape():
|
||||
|
||||
|
||||
def test_get_batch():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
param = model.get_parameters()[0]
|
||||
param.set_layout(Layout("NC"))
|
||||
assert get_batch(model) == 2
|
||||
@ -301,7 +296,7 @@ def test_get_batch_chwn():
|
||||
|
||||
|
||||
def test_set_batch_dimension():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
model_param1 = model.get_parameters()[0]
|
||||
model_param2 = model.get_parameters()[1]
|
||||
# check batch == 2
|
||||
@ -317,7 +312,7 @@ def test_set_batch_dimension():
|
||||
|
||||
|
||||
def test_set_batch_int():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
model_param1 = model.get_parameters()[0]
|
||||
model_param2 = model.get_parameters()[1]
|
||||
# check batch == 2
|
||||
@ -333,7 +328,7 @@ def test_set_batch_int():
|
||||
|
||||
|
||||
def test_set_batch_default_batch_size():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
model_param1 = model.get_parameters()[0]
|
||||
model_param1.set_layout(Layout("NC"))
|
||||
set_batch(model)
|
||||
@ -341,7 +336,7 @@ def test_set_batch_default_batch_size():
|
||||
|
||||
|
||||
def test_reshape_with_ports():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
new_shape = PartialShape([1, 4])
|
||||
for model_input in model.inputs:
|
||||
assert isinstance(model_input, Output)
|
||||
@ -350,7 +345,7 @@ def test_reshape_with_ports():
|
||||
|
||||
|
||||
def test_reshape_with_indexes():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
new_shape = PartialShape([1, 4])
|
||||
for index, model_input in enumerate(model.inputs):
|
||||
model.reshape({index: new_shape})
|
||||
@ -358,7 +353,7 @@ def test_reshape_with_indexes():
|
||||
|
||||
|
||||
def test_reshape_with_names():
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
new_shape = PartialShape([1, 4])
|
||||
for model_input in model.inputs:
|
||||
model.reshape({model_input.any_name: new_shape})
|
||||
@ -378,7 +373,7 @@ def test_reshape(device):
|
||||
|
||||
|
||||
def test_reshape_with_python_types(device):
|
||||
model = create_test_model()
|
||||
model = generate_add_model()
|
||||
|
||||
def check_shape(new_shape):
|
||||
for model_input in model.inputs:
|
||||
@ -395,7 +390,7 @@ def test_reshape_with_python_types(device):
|
||||
check_shape(PartialShape(shape2))
|
||||
|
||||
shape3 = [1, 8]
|
||||
new_shapes = {i: shape3 for i, input in enumerate(model.inputs)}
|
||||
new_shapes = {i: shape3 for i, _ in enumerate(model.inputs)}
|
||||
model.reshape(new_shapes)
|
||||
check_shape(PartialShape(shape3))
|
||||
|
||||
|
@ -14,7 +14,9 @@ from openvino.runtime import Core, AsyncInferQueue, Tensor, ProfilingInfo, Model
|
||||
from openvino.runtime import Type, PartialShape, Shape, Layout
|
||||
from openvino.preprocess import PrePostProcessor
|
||||
|
||||
from ..conftest import model_path, read_image
|
||||
# TODO: reformat into absolute paths
|
||||
from ..conftest import model_path
|
||||
from ..test_utils.test_utils import generate_image
|
||||
|
||||
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
|
||||
test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
@ -89,7 +91,7 @@ def test_get_profiling_info(device):
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
core.set_property(device, {"PERF_COUNT": "YES"})
|
||||
compiled = core.compile_model(model, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = compiled.create_infer_request()
|
||||
tensor_name = compiled.input("data").any_name
|
||||
request.infer({tensor_name: img})
|
||||
@ -110,7 +112,7 @@ def test_tensor_setter(device):
|
||||
compiled_2 = core.compile_model(model=model, device_name=device)
|
||||
compiled_3 = core.compile_model(model=model, device_name=device)
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
|
||||
request1 = compiled_1.create_infer_request()
|
||||
@ -152,7 +154,7 @@ def test_set_tensors(device):
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
|
||||
data1 = read_image()
|
||||
data1 = generate_image()
|
||||
tensor1 = Tensor(data1)
|
||||
data2 = np.ones(shape=(1, 10), dtype=np.float32)
|
||||
tensor2 = Tensor(data2)
|
||||
@ -281,7 +283,7 @@ def test_cancel(device):
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = compiled.create_infer_request()
|
||||
|
||||
request.start_async({0: img})
|
||||
@ -301,7 +303,7 @@ def test_start_async(device):
|
||||
core = Core()
|
||||
model = core.read_model(test_net_xml, test_net_bin)
|
||||
compiled = core.compile_model(model, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
jobs = 3
|
||||
requests = []
|
||||
for _ in range(jobs):
|
||||
@ -354,7 +356,7 @@ def test_infer_mixed_keys(device):
|
||||
core.set_property(device, {"PERF_COUNT": "YES"})
|
||||
model = core.compile_model(model, device)
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
tensor = Tensor(img)
|
||||
|
||||
data2 = np.ones(shape=img.shape, dtype=np.float32)
|
||||
@ -362,7 +364,7 @@ def test_infer_mixed_keys(device):
|
||||
|
||||
request = model.create_infer_request()
|
||||
res = request.infer({0: tensor2, "data": tensor})
|
||||
assert np.argmax(res[model.output()]) == 2
|
||||
assert np.argmax(res[model.output()]) == 9
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("ov_type", "numpy_dtype"), [
|
||||
@ -471,7 +473,7 @@ def test_infer_queue(device):
|
||||
jobs_done[job_id]["finished"] = True
|
||||
jobs_done[job_id]["latency"] = request.latency
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
infer_queue.set_callback(callback)
|
||||
for i in range(jobs):
|
||||
infer_queue.start_async({"data": img}, i)
|
||||
@ -508,7 +510,7 @@ def test_infer_queue_fail_on_cpp_model(device):
|
||||
def callback(request, _):
|
||||
request.get_tensor("Unknown")
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
infer_queue.set_callback(callback)
|
||||
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
@ -530,7 +532,7 @@ def test_infer_queue_fail_on_py_model(device):
|
||||
def callback(request, _):
|
||||
request = request + 21
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
infer_queue.set_callback(callback)
|
||||
|
||||
with pytest.raises(TypeError) as e:
|
||||
@ -641,7 +643,7 @@ def test_results_async_infer(device):
|
||||
jobs_done[job_id]["finished"] = True
|
||||
jobs_done[job_id]["latency"] = request.latency
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
infer_queue.set_callback(callback)
|
||||
for i in range(jobs):
|
||||
infer_queue.start_async({"data": img}, i)
|
||||
|
@ -15,17 +15,6 @@ is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
|
||||
test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
|
||||
|
||||
def model_path(is_myriad=False):
|
||||
path_to_repo = os.environ["MODELS_PATH"]
|
||||
if not is_myriad:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
|
||||
else:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
|
||||
return (test_xml, test_bin)
|
||||
|
||||
|
||||
def test_input_type(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
|
@ -22,17 +22,6 @@ is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
|
||||
test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
|
||||
|
||||
def model_path(is_myriad=False):
|
||||
path_to_repo = os.environ["MODELS_PATH"]
|
||||
if not is_myriad:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
|
||||
else:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
|
||||
return (test_xml, test_bin)
|
||||
|
||||
|
||||
def test_const_output_type(device):
|
||||
core = Core()
|
||||
func = core.read_model(model=test_net_xml, weights=test_net_bin)
|
||||
|
@ -12,17 +12,6 @@ is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
|
||||
test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
|
||||
|
||||
def model_path(is_myriad=False):
|
||||
path_to_repo = os.environ["MODELS_PATH"]
|
||||
if not is_myriad:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
|
||||
else:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
|
||||
return (test_xml, test_bin)
|
||||
|
||||
|
||||
def test_output_replace(device):
|
||||
param = ops.parameter([1, 64], Type.i64)
|
||||
param.output(0).get_tensor().set_names({"a", "b"})
|
||||
|
@ -14,7 +14,7 @@ from openvino.helpers import pack_data, unpack_data
|
||||
|
||||
import pytest
|
||||
|
||||
from ..conftest import read_image
|
||||
from ..test_utils.test_utils import generate_image # TODO: reformat into an absolute path
|
||||
|
||||
|
||||
@pytest.mark.parametrize(("ov_type", "numpy_dtype"), [
|
||||
@ -97,7 +97,7 @@ def test_init_with_numpy_dtype(ov_type, numpy_dtype):
|
||||
(ov.Type.boolean, np.bool),
|
||||
])
|
||||
def test_init_with_numpy_shared_memory(ov_type, numpy_dtype):
|
||||
arr = read_image().astype(numpy_dtype)
|
||||
arr = generate_image().astype(numpy_dtype)
|
||||
shape = arr.shape
|
||||
arr = np.ascontiguousarray(arr)
|
||||
ov_tensor = Tensor(array=arr, shared_memory=True)
|
||||
@ -134,7 +134,7 @@ def test_init_with_numpy_shared_memory(ov_type, numpy_dtype):
|
||||
(ov.Type.boolean, np.bool),
|
||||
])
|
||||
def test_init_with_numpy_copy_memory(ov_type, numpy_dtype):
|
||||
arr = read_image().astype(numpy_dtype)
|
||||
arr = generate_image().astype(numpy_dtype)
|
||||
shape = arr.shape
|
||||
ov_tensor = Tensor(array=arr, shared_memory=False)
|
||||
assert tuple(ov_tensor.shape) == shape
|
||||
@ -149,7 +149,7 @@ def test_init_with_numpy_copy_memory(ov_type, numpy_dtype):
|
||||
|
||||
|
||||
def test_init_with_numpy_fail():
|
||||
arr = np.asfortranarray(read_image())
|
||||
arr = np.asfortranarray(generate_image())
|
||||
with pytest.raises(RuntimeError) as e:
|
||||
_ = Tensor(array=arr, shared_memory=True)
|
||||
assert "Tensor with shared memory must be C contiguous" in str(e.value)
|
||||
|
@ -2,10 +2,12 @@
|
||||
# Copyright (C) 2018-2022 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
from openvino.runtime import Model
|
||||
from openvino.runtime import Shape, Type
|
||||
import openvino
|
||||
from openvino.runtime import Model, Core, Shape, Type
|
||||
from openvino.runtime.op import Parameter
|
||||
import openvino.runtime.opset8 as ops
|
||||
from typing import Tuple, Union, List
|
||||
import numpy as np
|
||||
|
||||
|
||||
def get_test_function():
|
||||
@ -21,7 +23,29 @@ def test_compare_functions():
|
||||
try:
|
||||
from openvino.test_utils import compare_functions
|
||||
func = get_test_function()
|
||||
status, msg = compare_functions(func, func)
|
||||
status, _ = compare_functions(func, func)
|
||||
assert status
|
||||
except RuntimeError:
|
||||
print("openvino.test_utils.compare_functions is not available")
|
||||
|
||||
|
||||
def generate_image(shape: Tuple = (1, 3, 32, 32), dtype: Union[str, np.dtype] = "float32") -> np.array:
|
||||
np.random.seed(42)
|
||||
return np.random.rand(*shape).astype(dtype)
|
||||
|
||||
|
||||
def generate_relu_model(input_shape: List[int]) -> openvino.runtime.ie_api.CompiledModel:
|
||||
param = ops.parameter(input_shape, np.float32, name="parameter")
|
||||
relu = ops.relu(param, name="relu")
|
||||
func = Model([relu], [param], "test")
|
||||
func.get_ordered_ops()[2].friendly_name = "friendly"
|
||||
|
||||
core = Core()
|
||||
return core.compile_model(func, "CPU", {})
|
||||
|
||||
|
||||
def generate_add_model() -> openvino.pyopenvino.Model:
|
||||
param1 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data1")
|
||||
param2 = ops.parameter(Shape([2, 1]), dtype=np.float32, name="data2")
|
||||
add = ops.add(param1, param2)
|
||||
return Model(add, [param1, param2], "TestFunction")
|
||||
|
11
src/bindings/python/tests/test_utils/utils/plugins.xml
Normal file
11
src/bindings/python/tests/test_utils/utils/plugins.xml
Normal file
@ -0,0 +1,11 @@
|
||||
<!--
|
||||
Copyright (C) 2020 Intel Corporation
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
-->
|
||||
|
||||
<ie>
|
||||
<plugins>
|
||||
<plugin location="libopenvino_intel_cpu_plugin.so" name="CUSTOM">
|
||||
</plugin>
|
||||
</plugins>
|
||||
</ie>
|
11
src/bindings/python/tests/test_utils/utils/plugins_apple.xml
Normal file
11
src/bindings/python/tests/test_utils/utils/plugins_apple.xml
Normal file
@ -0,0 +1,11 @@
|
||||
<!--
|
||||
Copyright (C) 2020 Intel Corporation
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
-->
|
||||
|
||||
<ie>
|
||||
<plugins>
|
||||
<plugin location="libopenvino_intel_cpu_plugin.so" name="CUSTOM">
|
||||
</plugin>
|
||||
</plugins>
|
||||
</ie>
|
11
src/bindings/python/tests/test_utils/utils/plugins_win.xml
Normal file
11
src/bindings/python/tests/test_utils/utils/plugins_win.xml
Normal file
@ -0,0 +1,11 @@
|
||||
<!--
|
||||
Copyright (C) 2020 Intel Corporation
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
-->
|
||||
|
||||
<ie>
|
||||
<plugins>
|
||||
<plugin location="openvino_intel_cpu_plugin.dll" name="CUSTOM">
|
||||
</plugin>
|
||||
</plugins>
|
||||
</ie>
|
BIN
src/bindings/python/tests/test_utils/utils/test_model.onnx
Normal file
BIN
src/bindings/python/tests/test_utils/utils/test_model.onnx
Normal file
Binary file not shown.
BIN
src/bindings/python/tests/test_utils/utils/test_model_fp16.bin
Normal file
BIN
src/bindings/python/tests/test_utils/utils/test_model_fp16.bin
Normal file
Binary file not shown.
467
src/bindings/python/tests/test_utils/utils/test_model_fp16.xml
Normal file
467
src/bindings/python/tests/test_utils/utils/test_model_fp16.xml
Normal file
@ -0,0 +1,467 @@
|
||||
<?xml version="1.0" ?>
|
||||
<net name="test_model" version="10">
|
||||
<layers>
|
||||
<layer id="0" name="data" type="Parameter" version="opset1">
|
||||
<data element_type="f16" shape="1,3,32,32"/>
|
||||
<output>
|
||||
<port id="0" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="20/mean/Fused_Mul_614616_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="0" shape="16,3,5,5" size="2400"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>16</dim>
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="19/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>16</dim>
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="3" name="data_add_575/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="2400" shape="1,16,1,1" size="32"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="4" name="19/Fused_Add_" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="5" name="21" type="ReLU" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="6" name="22" type="MaxPool" version="opset1">
|
||||
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="7" name="onnx_initializer_node_8/Output_0/Data__const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="2432" shape="32,16,5,5" size="25600"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="8" name="23/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="9" name="23/Dims351/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="28032" shape="1,32,1,1" size="64"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="10" name="23" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="11" name="25/mean/Fused_Mul_618620_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="28096" shape="64,32,3,3" size="36864"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>64</dim>
|
||||
<dim>32</dim>
|
||||
<dim>3</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="12" name="24/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>64</dim>
|
||||
<dim>32</dim>
|
||||
<dim>3</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="13" name="data_add_578583/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="64960" shape="1,64,1,1" size="128"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="14" name="24/Fused_Add_" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="15" name="26" type="ReLU" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="16" name="27" type="MaxPool" version="opset1">
|
||||
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>9</dim>
|
||||
<dim>9</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="17" name="28/Reshape/Cast_1955_const" type="Const" version="opset1">
|
||||
<data element_type="i64" offset="65088" shape="2" size="16"/>
|
||||
<output>
|
||||
<port id="1" precision="I64">
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="18" name="28/Reshape" type="Reshape" version="opset1">
|
||||
<data special_zero="True"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>9</dim>
|
||||
<dim>9</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="19" name="onnx_initializer_node_17/Output_0/Data__const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="65104" shape="10,5184" size="103680"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>10</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="20" name="29/WithoutBiases" type="MatMul" version="opset1">
|
||||
<data transpose_a="0" transpose_b="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>10</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="21" name="onnx_initializer_node_18/Output_0/Data_/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="168784" shape="1,10" size="20"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="22" name="29" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="23" name="fc_out" type="SoftMax" version="opset1">
|
||||
<data axis="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="24" name="fc_out/sink_port_0" type="Result" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
|
||||
<edge from-layer="3" from-port="1" to-layer="4" to-port="1"/>
|
||||
<edge from-layer="4" from-port="2" to-layer="5" to-port="0"/>
|
||||
<edge from-layer="5" from-port="1" to-layer="6" to-port="0"/>
|
||||
<edge from-layer="6" from-port="1" to-layer="8" to-port="0"/>
|
||||
<edge from-layer="7" from-port="1" to-layer="8" to-port="1"/>
|
||||
<edge from-layer="8" from-port="2" to-layer="10" to-port="0"/>
|
||||
<edge from-layer="9" from-port="1" to-layer="10" to-port="1"/>
|
||||
<edge from-layer="10" from-port="2" to-layer="12" to-port="0"/>
|
||||
<edge from-layer="11" from-port="1" to-layer="12" to-port="1"/>
|
||||
<edge from-layer="12" from-port="2" to-layer="14" to-port="0"/>
|
||||
<edge from-layer="13" from-port="1" to-layer="14" to-port="1"/>
|
||||
<edge from-layer="14" from-port="2" to-layer="15" to-port="0"/>
|
||||
<edge from-layer="15" from-port="1" to-layer="16" to-port="0"/>
|
||||
<edge from-layer="16" from-port="1" to-layer="18" to-port="0"/>
|
||||
<edge from-layer="17" from-port="1" to-layer="18" to-port="1"/>
|
||||
<edge from-layer="18" from-port="2" to-layer="20" to-port="0"/>
|
||||
<edge from-layer="19" from-port="1" to-layer="20" to-port="1"/>
|
||||
<edge from-layer="20" from-port="2" to-layer="22" to-port="0"/>
|
||||
<edge from-layer="21" from-port="1" to-layer="22" to-port="1"/>
|
||||
<edge from-layer="22" from-port="2" to-layer="23" to-port="0"/>
|
||||
<edge from-layer="23" from-port="1" to-layer="24" to-port="0"/>
|
||||
</edges>
|
||||
<meta_data>
|
||||
<MO_version value="unknown version"/>
|
||||
<cli_parameters>
|
||||
<blobs_as_inputs value="True"/>
|
||||
<data_type value="FP16"/>
|
||||
<disable_resnet_optimization value="False"/>
|
||||
<disable_weights_compression value="False"/>
|
||||
<enable_concat_optimization value="False"/>
|
||||
<extensions value="DIR"/>
|
||||
<framework value="onnx"/>
|
||||
<freeze_placeholder_with_value value="{}"/>
|
||||
<generate_deprecated_IR_V2 value="False"/>
|
||||
<generate_deprecated_IR_V7 value="False"/>
|
||||
<generate_experimental_IR_V10 value="True"/>
|
||||
<input_model value="DIR/test_model.onnx"/>
|
||||
<keep_quantize_ops_in_IR value="True"/>
|
||||
<keep_shape_ops value="False"/>
|
||||
<log_level value="ERROR"/>
|
||||
<mean_scale_values value="{}"/>
|
||||
<mean_values value="()"/>
|
||||
<model_name value="test_model"/>
|
||||
<move_to_preprocess value="False"/>
|
||||
<output_dir value="DIR"/>
|
||||
<placeholder_data_types value="{}"/>
|
||||
<progress value="False"/>
|
||||
<reverse_input_channels value="False"/>
|
||||
<scale_values value="()"/>
|
||||
<silent value="False"/>
|
||||
<stream_output value="False"/>
|
||||
<unset unset_cli_parameters="batch, disable_fusing, disable_gfusing, finegrain_fusing, input, input_shape, output, placeholder_shapes, scale, transformations_config"/>
|
||||
</cli_parameters>
|
||||
</meta_data>
|
||||
</net>
|
BIN
src/bindings/python/tests/test_utils/utils/test_model_fp32.bin
Normal file
BIN
src/bindings/python/tests/test_utils/utils/test_model_fp32.bin
Normal file
Binary file not shown.
467
src/bindings/python/tests/test_utils/utils/test_model_fp32.xml
Normal file
467
src/bindings/python/tests/test_utils/utils/test_model_fp32.xml
Normal file
@ -0,0 +1,467 @@
|
||||
<?xml version="1.0" ?>
|
||||
<net name="test_model" version="10">
|
||||
<layers>
|
||||
<layer id="0" name="data" type="Parameter" version="opset1">
|
||||
<data element_type="f32" shape="1,3,32,32"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="20/mean/Fused_Mul_614616_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="0" shape="16,3,5,5" size="4800"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>16</dim>
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="19/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>16</dim>
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="3" name="data_add_575/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="4800" shape="1,16,1,1" size="64"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="4" name="19/Fused_Add_" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="5" name="21" type="ReLU" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="6" name="22" type="MaxPool" version="opset1">
|
||||
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="7" name="onnx_initializer_node_8/Output_0/Data__const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="4864" shape="32,16,5,5" size="51200"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="8" name="23/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="9" name="23/Dims357/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="56064" shape="1,32,1,1" size="128"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="10" name="23" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="11" name="25/mean/Fused_Mul_618620_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="56192" shape="64,32,3,3" size="73728"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>64</dim>
|
||||
<dim>32</dim>
|
||||
<dim>3</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="12" name="24/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>64</dim>
|
||||
<dim>32</dim>
|
||||
<dim>3</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="13" name="data_add_578583/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="129920" shape="1,64,1,1" size="256"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="14" name="24/Fused_Add_" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="15" name="26" type="ReLU" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="16" name="27" type="MaxPool" version="opset1">
|
||||
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>9</dim>
|
||||
<dim>9</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="17" name="28/Reshape/Cast_1955_const" type="Const" version="opset1">
|
||||
<data element_type="i64" offset="130176" shape="2" size="16"/>
|
||||
<output>
|
||||
<port id="1" precision="I64">
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="18" name="28/Reshape" type="Reshape" version="opset1">
|
||||
<data special_zero="True"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>9</dim>
|
||||
<dim>9</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="19" name="onnx_initializer_node_17/Output_0/Data__const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="130192" shape="10,5184" size="207360"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>10</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="20" name="29/WithoutBiases" type="MatMul" version="opset1">
|
||||
<data transpose_a="0" transpose_b="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>10</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="21" name="onnx_initializer_node_18/Output_0/Data_/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="337552" shape="1,10" size="40"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="22" name="29" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="23" name="fc_out" type="SoftMax" version="opset1">
|
||||
<data axis="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="24" name="fc_out/sink_port_0" type="Result" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
|
||||
<edge from-layer="3" from-port="1" to-layer="4" to-port="1"/>
|
||||
<edge from-layer="4" from-port="2" to-layer="5" to-port="0"/>
|
||||
<edge from-layer="5" from-port="1" to-layer="6" to-port="0"/>
|
||||
<edge from-layer="6" from-port="1" to-layer="8" to-port="0"/>
|
||||
<edge from-layer="7" from-port="1" to-layer="8" to-port="1"/>
|
||||
<edge from-layer="8" from-port="2" to-layer="10" to-port="0"/>
|
||||
<edge from-layer="9" from-port="1" to-layer="10" to-port="1"/>
|
||||
<edge from-layer="10" from-port="2" to-layer="12" to-port="0"/>
|
||||
<edge from-layer="11" from-port="1" to-layer="12" to-port="1"/>
|
||||
<edge from-layer="12" from-port="2" to-layer="14" to-port="0"/>
|
||||
<edge from-layer="13" from-port="1" to-layer="14" to-port="1"/>
|
||||
<edge from-layer="14" from-port="2" to-layer="15" to-port="0"/>
|
||||
<edge from-layer="15" from-port="1" to-layer="16" to-port="0"/>
|
||||
<edge from-layer="16" from-port="1" to-layer="18" to-port="0"/>
|
||||
<edge from-layer="17" from-port="1" to-layer="18" to-port="1"/>
|
||||
<edge from-layer="18" from-port="2" to-layer="20" to-port="0"/>
|
||||
<edge from-layer="19" from-port="1" to-layer="20" to-port="1"/>
|
||||
<edge from-layer="20" from-port="2" to-layer="22" to-port="0"/>
|
||||
<edge from-layer="21" from-port="1" to-layer="22" to-port="1"/>
|
||||
<edge from-layer="22" from-port="2" to-layer="23" to-port="0"/>
|
||||
<edge from-layer="23" from-port="1" to-layer="24" to-port="0"/>
|
||||
</edges>
|
||||
<meta_data>
|
||||
<MO_version value="unknown version"/>
|
||||
<cli_parameters>
|
||||
<blobs_as_inputs value="True"/>
|
||||
<data_type value="FP32"/>
|
||||
<disable_resnet_optimization value="False"/>
|
||||
<disable_weights_compression value="False"/>
|
||||
<enable_concat_optimization value="False"/>
|
||||
<extensions value="DIR"/>
|
||||
<framework value="onnx"/>
|
||||
<freeze_placeholder_with_value value="{}"/>
|
||||
<generate_deprecated_IR_V2 value="False"/>
|
||||
<generate_deprecated_IR_V7 value="False"/>
|
||||
<generate_experimental_IR_V10 value="True"/>
|
||||
<input_model value="DIR/test_model.onnx"/>
|
||||
<keep_quantize_ops_in_IR value="True"/>
|
||||
<keep_shape_ops value="False"/>
|
||||
<log_level value="ERROR"/>
|
||||
<mean_scale_values value="{}"/>
|
||||
<mean_values value="()"/>
|
||||
<model_name value="test_model"/>
|
||||
<move_to_preprocess value="False"/>
|
||||
<output_dir value="DIR"/>
|
||||
<placeholder_data_types value="{}"/>
|
||||
<progress value="False"/>
|
||||
<reverse_input_channels value="False"/>
|
||||
<scale_values value="()"/>
|
||||
<silent value="False"/>
|
||||
<stream_output value="False"/>
|
||||
<unset unset_cli_parameters="batch, disable_fusing, disable_gfusing, finegrain_fusing, input, input_shape, output, placeholder_shapes, scale, transformations_config"/>
|
||||
</cli_parameters>
|
||||
</meta_data>
|
||||
</net>
|
@ -146,3 +146,4 @@ xfail_issue_81976 = xfail_test(reason="RuntimeError: z node not found in graph c
|
||||
xfail_issue_82038 = xfail_test(reason="ScatterElements, ScatterND, AssertionError: Result mismatch")
|
||||
xfail_issue_82039 = xfail_test(reason="Unsupported data type Optional, RuntimeError: [ NOT_IMPLEMENTED ] "
|
||||
"CPU plugin: Input image format UNSPECIFIED is not supported yet...")
|
||||
skip_issue_86384 = pytest.mark.skip(reason="If op compatibility tests failing only when triggering whole test suite")
|
||||
|
@ -11,34 +11,28 @@ import tests_compatibility
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def image_path():
|
||||
path_to_repo = os.environ["DATA_PATH"]
|
||||
path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp")
|
||||
return path_to_img
|
||||
|
||||
|
||||
def model_path(is_myriad=False):
|
||||
path_to_repo = os.environ["MODELS_PATH"]
|
||||
if not is_myriad:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
|
||||
base_path = os.path.dirname(__file__)
|
||||
if is_myriad:
|
||||
test_xml = os.path.join(base_path, "test_utils", "utils", "test_model_fp16.xml")
|
||||
test_bin = os.path.join(base_path, "test_utils", "utils", "test_model_fp16.bin")
|
||||
else:
|
||||
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
|
||||
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
|
||||
test_xml = os.path.join(base_path, "test_utils", "utils", "test_model_fp32.xml")
|
||||
test_bin = os.path.join(base_path, "test_utils", "utils", "test_model_fp32.bin")
|
||||
return (test_xml, test_bin)
|
||||
|
||||
|
||||
def model_onnx_path():
|
||||
path_to_repo = os.environ["MODELS_PATH"]
|
||||
test_onnx = os.path.join(path_to_repo, "models", "test_model", "test_model.onnx")
|
||||
base_path = os.path.dirname(__file__)
|
||||
test_onnx = os.path.join(base_path, "test_utils", "utils", "test_model.onnx")
|
||||
return test_onnx
|
||||
|
||||
|
||||
def plugins_path():
|
||||
path_to_repo = os.environ["DATA_PATH"]
|
||||
plugins_xml = os.path.join(path_to_repo, "ie_class", "plugins.xml")
|
||||
plugins_win_xml = os.path.join(path_to_repo, "ie_class", "plugins_win.xml")
|
||||
plugins_osx_xml = os.path.join(path_to_repo, "ie_class", "plugins_apple.xml")
|
||||
base_path = os.path.dirname(__file__)
|
||||
plugins_xml = os.path.join(base_path, "test_utils", "utils", "plugins.xml")
|
||||
plugins_win_xml = os.path.join(base_path, "test_utils", "utils", "plugins_win.xml")
|
||||
plugins_osx_xml = os.path.join(base_path, "test_utils", "utils", "plugins_apple.xml")
|
||||
return (plugins_xml, plugins_win_xml, plugins_osx_xml)
|
||||
|
||||
|
||||
|
@ -7,11 +7,8 @@ import numpy as np
|
||||
import os
|
||||
|
||||
from openvino.inference_engine import TensorDesc, Blob, IECore
|
||||
from tests_compatibility.conftest import image_path, create_encoder
|
||||
import ngraph as ng
|
||||
|
||||
|
||||
path_to_image = image_path()
|
||||
from tests_compatibility.conftest import model_path
|
||||
from ..test_utils.test_utils import generate_image # TODO: reformat into an absolute path
|
||||
|
||||
|
||||
def test_init_with_tensor_desc():
|
||||
@ -89,15 +86,7 @@ def test_incompatible_array_and_td():
|
||||
|
||||
|
||||
def test_incompatible_input_precision():
|
||||
import cv2
|
||||
n, c, h, w = (1, 3, 32, 32)
|
||||
image = cv2.imread(path_to_image)
|
||||
if image is None:
|
||||
raise FileNotFoundError("Input image not found")
|
||||
|
||||
image = cv2.resize(image, (h, w)) / 255
|
||||
image = image.transpose((2, 0, 1))
|
||||
image = image.reshape((n, c, h, w))
|
||||
image = generate_image(dtype="float64")
|
||||
tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
|
||||
with pytest.raises(ValueError) as e:
|
||||
Blob(tensor_desc, image)
|
||||
@ -109,9 +98,7 @@ def test_incompatible_input_precision():
|
||||
@pytest.mark.skip(reason="Test will enable when CPU fix will be merge")
|
||||
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test")
|
||||
def test_buffer_values_after_add_outputs(device):
|
||||
path_to_repo = os.environ["MODELS_PATH"]
|
||||
test_net_xml_fp16 = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp16.xml')
|
||||
test_net_bin_fp16 = os.path.join(path_to_repo, "models", "test_model", 'test_model_fp16.bin')
|
||||
test_net_xml_fp16, test_net_bin_fp16 = model_path(is_myriad=True)
|
||||
ie_core = IECore()
|
||||
if device == "CPU":
|
||||
if ie_core.get_metric(device, "FULL_DEVICE_NAME") == "arm_compute::NEON":
|
||||
|
@ -4,38 +4,24 @@
|
||||
import numpy as np
|
||||
import os
|
||||
import pytest
|
||||
import warnings
|
||||
import time
|
||||
|
||||
from openvino.inference_engine import ie_api as ie
|
||||
from tests_compatibility.conftest import model_path, image_path
|
||||
from tests_compatibility.conftest import model_path
|
||||
from ..test_utils.test_utils import generate_image # TODO: reformat into an absolute path
|
||||
|
||||
|
||||
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
|
||||
path_to_image = image_path()
|
||||
test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
|
||||
|
||||
def read_image():
|
||||
import cv2
|
||||
n, c, h, w = (1, 3, 32, 32)
|
||||
image = cv2.imread(path_to_image)
|
||||
if image is None:
|
||||
raise FileNotFoundError("Input image not found")
|
||||
|
||||
image = cv2.resize(image, (h, w)) / 255
|
||||
image = image.transpose((2, 0, 1))
|
||||
image = image.reshape((n, c, h, w))
|
||||
return image
|
||||
|
||||
|
||||
def test_infer(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res = exec_net.infer({'data': img})
|
||||
assert np.argmax(res['fc_out'][0]) == 2
|
||||
assert np.argmax(res['fc_out'][0]) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
|
||||
@ -50,7 +36,7 @@ def test_infer_net_from_buffer(device):
|
||||
net2 = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device)
|
||||
exec_net2 = ie_core.load_network(net2, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res = exec_net.infer({'data': img})
|
||||
res2 = exec_net2.infer({'data': img})
|
||||
del ie_core
|
||||
@ -63,7 +49,7 @@ def test_infer_wrong_input_name(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
with pytest.raises(AssertionError) as e:
|
||||
exec_net.infer({'_data_': img})
|
||||
assert "No input with name _data_ found in network" in str(e.value)
|
||||
@ -108,11 +94,11 @@ def test_async_infer_one_req(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request_handler = exec_net.start_async(request_id=0, inputs={'data': img})
|
||||
request_handler.wait()
|
||||
res = request_handler.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
|
||||
@ -121,12 +107,12 @@ def test_async_infer_many_req(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=5)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
for id in range(5):
|
||||
request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
|
||||
request_handler.wait()
|
||||
res = request_handler.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
|
||||
@ -136,7 +122,7 @@ def test_async_infer_many_req_get_idle(device):
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
num_requests = 5
|
||||
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
check_id = set()
|
||||
for id in range(2*num_requests):
|
||||
request_id = exec_net.get_idle_request_id()
|
||||
@ -151,7 +137,7 @@ def test_async_infer_many_req_get_idle(device):
|
||||
assert status == ie.StatusCode.OK
|
||||
for id in range(num_requests):
|
||||
if id in check_id:
|
||||
assert np.argmax(exec_net.requests[id].output_blobs['fc_out'].buffer) == 2
|
||||
assert np.argmax(exec_net.requests[id].output_blobs['fc_out'].buffer) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
|
||||
@ -161,7 +147,7 @@ def test_wait_before_start(device):
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
num_requests = 5
|
||||
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
requests = exec_net.requests
|
||||
for id in range(num_requests):
|
||||
status = requests[id].wait()
|
||||
@ -169,7 +155,7 @@ def test_wait_before_start(device):
|
||||
request_handler = exec_net.start_async(request_id=id, inputs={'data': img})
|
||||
status = requests[id].wait()
|
||||
assert status == ie.StatusCode.OK
|
||||
assert np.argmax(request_handler.output_blobs['fc_out'].buffer) == 2
|
||||
assert np.argmax(request_handler.output_blobs['fc_out'].buffer) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
|
||||
@ -185,7 +171,7 @@ def test_wait_for_callback(device):
|
||||
exec_net = ie_core.load_network(net, device, num_requests=num_requests)
|
||||
callbacks_info = {}
|
||||
callbacks_info['finished'] = 0
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
for request in exec_net.requests:
|
||||
request.set_completion_callback(callback, callbacks_info)
|
||||
request.async_infer({'data': img})
|
||||
@ -198,7 +184,7 @@ def test_wrong_request_id(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
with pytest.raises(ValueError) as e:
|
||||
exec_net.start_async(request_id=20, inputs={'data': img})
|
||||
assert "Incorrect request_id specified!" in str(e.value)
|
||||
@ -230,9 +216,9 @@ def test_plugin_accessible_after_deletion(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res = exec_net.infer({'data': img})
|
||||
assert np.argmax(res['fc_out'][0]) == 2
|
||||
assert np.argmax(res['fc_out'][0]) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
|
||||
@ -244,7 +230,7 @@ def test_exec_graph(device):
|
||||
pytest.skip("Can't run on ARM plugin due-to get_exec_graph_info method isn't implemented")
|
||||
net = ie_core.read_network(model=test_net_xml, weights=test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res = exec_net.infer({'data': img})
|
||||
exec_graph = exec_net.get_exec_graph_info()
|
||||
exec_graph_file = 'exec_graph.xml'
|
||||
@ -267,7 +253,7 @@ def test_export_import():
|
||||
assert os.path.exists(exported_net_file)
|
||||
exec_net = ie_core.import_network(exported_net_file, "MYRIAD")
|
||||
os.remove(exported_net_file)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res = exec_net.infer({'data': img})
|
||||
assert np.argmax(res['fc_out'][0]) == 3
|
||||
del exec_net
|
||||
|
@ -5,8 +5,8 @@ import os
|
||||
import pytest
|
||||
|
||||
import ngraph as ng
|
||||
from openvino.inference_engine import IECore, IENetwork, DataPtr, InputInfoPtr, PreProcessInfo
|
||||
from tests_compatibility.conftest import model_path, create_relu
|
||||
from openvino.inference_engine import IECore, DataPtr, InputInfoPtr, PreProcessInfo
|
||||
from tests_compatibility.conftest import model_path
|
||||
|
||||
|
||||
test_net_xml, test_net_bin = model_path()
|
||||
|
@ -9,13 +9,13 @@ from datetime import datetime
|
||||
import time
|
||||
|
||||
from openvino.inference_engine import ie_api as ie
|
||||
from tests_compatibility.conftest import model_path, image_path, create_encoder
|
||||
from tests_compatibility.conftest import model_path, create_encoder
|
||||
from ..test_utils.test_utils import generate_image # TODO: reformat into an absolute path
|
||||
import ngraph as ng
|
||||
from ngraph.impl import Function, Type
|
||||
|
||||
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
|
||||
test_net_xml, test_net_bin = model_path(is_myriad)
|
||||
path_to_img = image_path()
|
||||
|
||||
|
||||
def create_function_with_memory(input_shape, data_type):
|
||||
@ -29,19 +29,6 @@ def create_function_with_memory(input_shape, data_type):
|
||||
return caps
|
||||
|
||||
|
||||
def read_image():
|
||||
import cv2
|
||||
n, c, h, w = (1, 3, 32, 32)
|
||||
image = cv2.imread(path_to_img)
|
||||
if image is None:
|
||||
raise FileNotFoundError("Input image not found")
|
||||
|
||||
image = cv2.resize(image, (h, w)) / 255
|
||||
image = image.transpose((2, 0, 1)).astype(np.float32)
|
||||
image = image.reshape((n, c, h, w))
|
||||
return image
|
||||
|
||||
|
||||
def load_sample_model(device, num_requests=1):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
@ -117,7 +104,7 @@ def test_write_to_input_blobs_directly(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
executable_network = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = executable_network.requests[0]
|
||||
input_data = request.input_blobs["data"]
|
||||
input_data.buffer[:] = img
|
||||
@ -131,7 +118,7 @@ def test_write_to_input_blobs_copy(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
executable_network = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = executable_network.requests[0]
|
||||
request.input_blobs["data"].buffer[:] = img
|
||||
assert np.allclose(executable_network.requests[0].input_blobs["data"].buffer, img)
|
||||
@ -144,11 +131,11 @@ def test_infer(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.infer({'data': img})
|
||||
res = request.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
del net
|
||||
@ -158,12 +145,12 @@ def test_async_infer_default_timeout(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.async_infer({'data': img})
|
||||
request.wait()
|
||||
res = request.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
del net
|
||||
@ -173,12 +160,12 @@ def test_async_infer_wait_finish(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.async_infer({'data': img})
|
||||
request.wait(ie.WaitMode.RESULT_READY)
|
||||
res = request.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
del net
|
||||
@ -188,7 +175,7 @@ def test_async_infer_wait_time(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=2)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.async_infer({'data': img})
|
||||
start_time = datetime.utcnow()
|
||||
@ -207,7 +194,7 @@ def test_async_infer_wait_time(device):
|
||||
i += 1
|
||||
assert status == ie.StatusCode.OK
|
||||
res = request.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
del net
|
||||
@ -217,12 +204,12 @@ def test_async_infer_wait_status(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.async_infer({'data': img})
|
||||
request.wait(ie.WaitMode.RESULT_READY)
|
||||
res = request.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
status = request.wait(ie.WaitMode.STATUS_ONLY)
|
||||
assert status == ie.StatusCode.OK
|
||||
del exec_net
|
||||
@ -234,14 +221,14 @@ def test_async_infer_fill_inputs(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.input_blobs['data'].buffer[:] = img
|
||||
request.async_infer()
|
||||
status_end = request.wait()
|
||||
assert status_end == ie.StatusCode.OK
|
||||
res = request.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res[0]) == 2
|
||||
assert np.argmax(res[0]) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
del net
|
||||
@ -251,18 +238,18 @@ def test_infer_modify_outputs(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
outputs0 = exec_net.infer({'data': img})
|
||||
status_end = request.wait()
|
||||
assert status_end == ie.StatusCode.OK
|
||||
assert np.argmax(outputs0['fc_out']) == 2
|
||||
assert np.argmax(outputs0['fc_out']) == 9
|
||||
outputs0['fc_out'][:] = np.zeros(shape=(1, 10), dtype=np.float32)
|
||||
outputs1 = request.output_blobs
|
||||
assert np.argmax(outputs1['fc_out'].buffer) == 2
|
||||
assert np.argmax(outputs1['fc_out'].buffer) == 9
|
||||
outputs1['fc_out'].buffer[:] = np.ones(shape=(1, 10), dtype=np.float32)
|
||||
outputs2 = request.output_blobs
|
||||
assert np.argmax(outputs2['fc_out'].buffer) == 2
|
||||
assert np.argmax(outputs2['fc_out'].buffer) == 9
|
||||
del exec_net
|
||||
del ie_core
|
||||
del net
|
||||
@ -284,14 +271,14 @@ def test_async_infer_callback(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.set_completion_callback(callback)
|
||||
request.async_infer({'data': img})
|
||||
status = request.wait()
|
||||
assert status == ie.StatusCode.OK
|
||||
res = request.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
assert callback.callback_called == 1
|
||||
del exec_net
|
||||
del ie_core
|
||||
@ -312,7 +299,7 @@ def test_async_infer_callback_wait_before_start(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.set_completion_callback(callback)
|
||||
status = request.wait()
|
||||
@ -321,7 +308,7 @@ def test_async_infer_callback_wait_before_start(device):
|
||||
status = request.wait()
|
||||
assert status == ie.StatusCode.OK
|
||||
res = request.output_blobs['fc_out'].buffer
|
||||
assert np.argmax(res) == 2
|
||||
assert np.argmax(res) == 9
|
||||
assert callback.callback_called == 1
|
||||
del exec_net
|
||||
del ie_core
|
||||
@ -354,7 +341,7 @@ def test_async_infer_callback_wait_in_callback(device):
|
||||
ie_core = ie.IECore()
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(net, device, num_requests=1)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request_wrap = InferReqWrap(exec_net.requests[0])
|
||||
request_wrap.execute({'data': img})
|
||||
del exec_net
|
||||
@ -373,7 +360,7 @@ def test_async_infer_wait_while_callback_will_not_finish(device):
|
||||
callback_status['finished'] = False
|
||||
request = exec_net.requests[0]
|
||||
request.set_completion_callback(callback, py_data=callback_status)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request.async_infer({'data': img})
|
||||
request.wait()
|
||||
assert callback_status['finished'] == True
|
||||
@ -384,7 +371,7 @@ def test_get_perf_counts(device):
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
ie_core.set_config({"PERF_COUNT": "YES"}, device)
|
||||
exec_net = ie_core.load_network(net, device)
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
request = exec_net.requests[0]
|
||||
request.infer({'data': img})
|
||||
pc = request.get_perf_counts()
|
||||
@ -406,11 +393,11 @@ def test_set_batch_size(device):
|
||||
net.batch_size = 10
|
||||
data = np.zeros(shape=net.input_info['data'].input_data.shape)
|
||||
exec_net = ie_core.load_network(net, device)
|
||||
data[0] = read_image()[0]
|
||||
data[0] = generate_image()[0]
|
||||
request = exec_net.requests[0]
|
||||
request.set_batch(1)
|
||||
request.infer({'data': data})
|
||||
assert np.allclose(int(round(request.output_blobs['fc_out'].buffer[0][2])), 1), "Incorrect data for 1st batch"
|
||||
assert np.allclose(int(round(request.output_blobs['fc_out'].buffer[0][2])), 0), "Incorrect data for 1st batch"
|
||||
del exec_net
|
||||
del ie_core
|
||||
del net
|
||||
@ -453,7 +440,7 @@ def test_blob_setter(device):
|
||||
net.input_info['data'].layout = "NHWC"
|
||||
exec_net_2 = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out'])
|
||||
|
||||
img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32)
|
||||
@ -471,7 +458,7 @@ def test_blob_setter_with_preprocess(device):
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
tensor_desc = ie.TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
|
||||
img_blob = ie.Blob(tensor_desc, img)
|
||||
preprocess_info = ie.PreProcessInfo()
|
||||
@ -498,25 +485,15 @@ def test_resize_algorithm_work(device):
|
||||
net = ie_core.read_network(test_net_xml, test_net_bin)
|
||||
exec_net_1 = ie_core.load_network(network=net, device_name=device, num_requests=1)
|
||||
|
||||
img = read_image()
|
||||
img = generate_image()
|
||||
res_1 = np.sort(exec_net_1.infer({"data": img})['fc_out'])
|
||||
|
||||
net.input_info['data'].preprocess_info.resize_algorithm = ie.ResizeAlgorithm.RESIZE_BILINEAR
|
||||
|
||||
exec_net_2 = ie_core.load_network(net, device)
|
||||
|
||||
import cv2
|
||||
|
||||
image = cv2.imread(path_to_img)
|
||||
if image is None:
|
||||
raise FileNotFoundError("Input image not found")
|
||||
|
||||
image = image / 255
|
||||
image = image.transpose((2, 0, 1)).astype(np.float32)
|
||||
image = np.expand_dims(image, 0)
|
||||
|
||||
tensor_desc = ie.TensorDesc("FP32", [1, 3, image.shape[2], image.shape[3]], "NCHW")
|
||||
img_blob = ie.Blob(tensor_desc, image)
|
||||
tensor_desc = ie.TensorDesc("FP32", [1, 3, img.shape[2], img.shape[3]], "NCHW")
|
||||
img_blob = ie.Blob(tensor_desc, img)
|
||||
request = exec_net_2.requests[0]
|
||||
assert request.preprocess_info["data"].resize_algorithm == ie.ResizeAlgorithm.RESIZE_BILINEAR
|
||||
request.set_blob('data', img_blob)
|
||||
|
@ -3,8 +3,7 @@
|
||||
|
||||
from openvino.inference_engine import IECore, IENetwork
|
||||
import ngraph as ng
|
||||
from ngraph.impl.op import Parameter
|
||||
from ngraph.impl import Function, Shape, Type
|
||||
from ngraph.impl import Function
|
||||
|
||||
from tests_compatibility.conftest import model_path, create_relu
|
||||
|
||||
|
@ -9,6 +9,7 @@ from ngraph.utils.tensor_iterator_types import (
|
||||
TensorIteratorInvariantInputDesc,
|
||||
TensorIteratorBodyOutputDesc,
|
||||
)
|
||||
from tests_compatibility import skip_issue_86384
|
||||
from tests_compatibility.runtime import get_runtime
|
||||
|
||||
|
||||
@ -150,6 +151,7 @@ def check_if(if_model, cond_val, exp_results):
|
||||
check_results(results, exp_results)
|
||||
|
||||
|
||||
@skip_issue_86384
|
||||
def test_if_with_two_outputs():
|
||||
check_if(create_simple_if_with_two_outputs, True,
|
||||
[np.array([10], dtype=np.float32), np.array([-20], dtype=np.float32)])
|
||||
@ -157,6 +159,7 @@ def test_if_with_two_outputs():
|
||||
[np.array([17], dtype=np.float32), np.array([16], dtype=np.float32)])
|
||||
|
||||
|
||||
@skip_issue_86384
|
||||
def test_diff_if_with_two_outputs():
|
||||
check_if(create_diff_if_with_two_outputs, True,
|
||||
[np.array([10], dtype=np.float32), np.array([6, 4], dtype=np.float32)])
|
||||
@ -164,11 +167,13 @@ def test_diff_if_with_two_outputs():
|
||||
[np.array([4], dtype=np.float32), np.array([12, 16], dtype=np.float32)])
|
||||
|
||||
|
||||
@skip_issue_86384
|
||||
def test_simple_if():
|
||||
check_if(simple_if, True, [np.array([6, 4], dtype=np.float32)])
|
||||
check_if(simple_if, False, [np.array([5, 5], dtype=np.float32)])
|
||||
|
||||
|
||||
@skip_issue_86384
|
||||
def test_simple_if_without_body_parameters():
|
||||
check_if(simple_if_without_parameters, True, [np.array([0.7], dtype=np.float32)])
|
||||
check_if(simple_if_without_parameters, False, [np.array([9.0], dtype=np.float32)])
|
||||
|
@ -0,0 +1,43 @@
|
||||
# Copyright (C) 2018-2022 Intel Corporation
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
import openvino
|
||||
from openvino.runtime import Model, Core, Shape, Type
|
||||
from openvino.runtime.op import Parameter
|
||||
from typing import Tuple, Union, List
|
||||
import openvino.runtime.opset8 as ops
|
||||
import numpy as np
|
||||
|
||||
|
||||
def get_test_function():
|
||||
element_type = Type.f32
|
||||
param = Parameter(element_type, Shape([1, 3, 22, 22]))
|
||||
relu = ops.relu(param)
|
||||
func = Model([relu], [param], "test")
|
||||
assert func is not None
|
||||
return func
|
||||
|
||||
|
||||
def test_compare_functions():
|
||||
try:
|
||||
from openvino.test_utils import compare_functions
|
||||
func = get_test_function()
|
||||
status, _ = compare_functions(func, func)
|
||||
assert status
|
||||
except RuntimeError:
|
||||
print("openvino.test_utils.compare_functions is not available")
|
||||
|
||||
|
||||
def generate_image(shape: Tuple = (1, 3, 32, 32), dtype: Union[str, np.dtype] = "float32") -> np.array:
|
||||
np.random.seed(42)
|
||||
return np.random.rand(*shape).astype(dtype)
|
||||
|
||||
|
||||
def generate_model(input_shape: List[int]) -> openvino.runtime.ie_api.CompiledModel:
|
||||
param = ops.parameter(input_shape, np.float32, name="parameter")
|
||||
relu = ops.relu(param, name="relu")
|
||||
func = Model([relu], [param], "test")
|
||||
func.get_ordered_ops()[2].friendly_name = "friendly"
|
||||
|
||||
core = Core()
|
||||
return core.compile_model(func, "CPU", {})
|
@ -0,0 +1,11 @@
|
||||
<!--
|
||||
Copyright (C) 2020 Intel Corporation
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
-->
|
||||
|
||||
<ie>
|
||||
<plugins>
|
||||
<plugin location="libopenvino_intel_cpu_plugin.so" name="CUSTOM">
|
||||
</plugin>
|
||||
</plugins>
|
||||
</ie>
|
@ -0,0 +1,11 @@
|
||||
<!--
|
||||
Copyright (C) 2020 Intel Corporation
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
-->
|
||||
|
||||
<ie>
|
||||
<plugins>
|
||||
<plugin location="libopenvino_intel_cpu_plugin.so" name="CUSTOM">
|
||||
</plugin>
|
||||
</plugins>
|
||||
</ie>
|
@ -0,0 +1,11 @@
|
||||
<!--
|
||||
Copyright (C) 2020 Intel Corporation
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
-->
|
||||
|
||||
<ie>
|
||||
<plugins>
|
||||
<plugin location="openvino_intel_cpu_plugin.dll" name="CUSTOM">
|
||||
</plugin>
|
||||
</plugins>
|
||||
</ie>
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1,467 @@
|
||||
<?xml version="1.0" ?>
|
||||
<net name="test_model" version="10">
|
||||
<layers>
|
||||
<layer id="0" name="data" type="Parameter" version="opset1">
|
||||
<data element_type="f16" shape="1,3,32,32"/>
|
||||
<output>
|
||||
<port id="0" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="20/mean/Fused_Mul_614616_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="0" shape="16,3,5,5" size="2400"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>16</dim>
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="19/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>16</dim>
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="3" name="data_add_575/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="2400" shape="1,16,1,1" size="32"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="4" name="19/Fused_Add_" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="5" name="21" type="ReLU" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="6" name="22" type="MaxPool" version="opset1">
|
||||
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="7" name="onnx_initializer_node_8/Output_0/Data__const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="2432" shape="32,16,5,5" size="25600"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="8" name="23/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="9" name="23/Dims351/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="28032" shape="1,32,1,1" size="64"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="10" name="23" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="11" name="25/mean/Fused_Mul_618620_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="28096" shape="64,32,3,3" size="36864"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>64</dim>
|
||||
<dim>32</dim>
|
||||
<dim>3</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="12" name="24/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>64</dim>
|
||||
<dim>32</dim>
|
||||
<dim>3</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="13" name="data_add_578583/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="64960" shape="1,64,1,1" size="128"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="14" name="24/Fused_Add_" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="15" name="26" type="ReLU" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="16" name="27" type="MaxPool" version="opset1">
|
||||
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>9</dim>
|
||||
<dim>9</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="17" name="28/Reshape/Cast_1955_const" type="Const" version="opset1">
|
||||
<data element_type="i64" offset="65088" shape="2" size="16"/>
|
||||
<output>
|
||||
<port id="1" precision="I64">
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="18" name="28/Reshape" type="Reshape" version="opset1">
|
||||
<data special_zero="True"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>9</dim>
|
||||
<dim>9</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="19" name="onnx_initializer_node_17/Output_0/Data__const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="65104" shape="10,5184" size="103680"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>10</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="20" name="29/WithoutBiases" type="MatMul" version="opset1">
|
||||
<data transpose_a="0" transpose_b="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>10</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="21" name="onnx_initializer_node_18/Output_0/Data_/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f16" offset="168784" shape="1,10" size="20"/>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="22" name="29" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="23" name="fc_out" type="SoftMax" version="opset1">
|
||||
<data axis="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP16">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="24" name="fc_out/sink_port_0" type="Result" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
|
||||
<edge from-layer="3" from-port="1" to-layer="4" to-port="1"/>
|
||||
<edge from-layer="4" from-port="2" to-layer="5" to-port="0"/>
|
||||
<edge from-layer="5" from-port="1" to-layer="6" to-port="0"/>
|
||||
<edge from-layer="6" from-port="1" to-layer="8" to-port="0"/>
|
||||
<edge from-layer="7" from-port="1" to-layer="8" to-port="1"/>
|
||||
<edge from-layer="8" from-port="2" to-layer="10" to-port="0"/>
|
||||
<edge from-layer="9" from-port="1" to-layer="10" to-port="1"/>
|
||||
<edge from-layer="10" from-port="2" to-layer="12" to-port="0"/>
|
||||
<edge from-layer="11" from-port="1" to-layer="12" to-port="1"/>
|
||||
<edge from-layer="12" from-port="2" to-layer="14" to-port="0"/>
|
||||
<edge from-layer="13" from-port="1" to-layer="14" to-port="1"/>
|
||||
<edge from-layer="14" from-port="2" to-layer="15" to-port="0"/>
|
||||
<edge from-layer="15" from-port="1" to-layer="16" to-port="0"/>
|
||||
<edge from-layer="16" from-port="1" to-layer="18" to-port="0"/>
|
||||
<edge from-layer="17" from-port="1" to-layer="18" to-port="1"/>
|
||||
<edge from-layer="18" from-port="2" to-layer="20" to-port="0"/>
|
||||
<edge from-layer="19" from-port="1" to-layer="20" to-port="1"/>
|
||||
<edge from-layer="20" from-port="2" to-layer="22" to-port="0"/>
|
||||
<edge from-layer="21" from-port="1" to-layer="22" to-port="1"/>
|
||||
<edge from-layer="22" from-port="2" to-layer="23" to-port="0"/>
|
||||
<edge from-layer="23" from-port="1" to-layer="24" to-port="0"/>
|
||||
</edges>
|
||||
<meta_data>
|
||||
<MO_version value="unknown version"/>
|
||||
<cli_parameters>
|
||||
<blobs_as_inputs value="True"/>
|
||||
<data_type value="FP16"/>
|
||||
<disable_resnet_optimization value="False"/>
|
||||
<disable_weights_compression value="False"/>
|
||||
<enable_concat_optimization value="False"/>
|
||||
<extensions value="DIR"/>
|
||||
<framework value="onnx"/>
|
||||
<freeze_placeholder_with_value value="{}"/>
|
||||
<generate_deprecated_IR_V2 value="False"/>
|
||||
<generate_deprecated_IR_V7 value="False"/>
|
||||
<generate_experimental_IR_V10 value="True"/>
|
||||
<input_model value="DIR/test_model.onnx"/>
|
||||
<keep_quantize_ops_in_IR value="True"/>
|
||||
<keep_shape_ops value="False"/>
|
||||
<log_level value="ERROR"/>
|
||||
<mean_scale_values value="{}"/>
|
||||
<mean_values value="()"/>
|
||||
<model_name value="test_model"/>
|
||||
<move_to_preprocess value="False"/>
|
||||
<output_dir value="DIR"/>
|
||||
<placeholder_data_types value="{}"/>
|
||||
<progress value="False"/>
|
||||
<reverse_input_channels value="False"/>
|
||||
<scale_values value="()"/>
|
||||
<silent value="False"/>
|
||||
<stream_output value="False"/>
|
||||
<unset unset_cli_parameters="batch, disable_fusing, disable_gfusing, finegrain_fusing, input, input_shape, output, placeholder_shapes, scale, transformations_config"/>
|
||||
</cli_parameters>
|
||||
</meta_data>
|
||||
</net>
|
Binary file not shown.
@ -0,0 +1,467 @@
|
||||
<?xml version="1.0" ?>
|
||||
<net name="test_model" version="10">
|
||||
<layers>
|
||||
<layer id="0" name="data" type="Parameter" version="opset1">
|
||||
<data element_type="f32" shape="1,3,32,32"/>
|
||||
<output>
|
||||
<port id="0" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="1" name="20/mean/Fused_Mul_614616_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="0" shape="16,3,5,5" size="4800"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>16</dim>
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="2" name="19/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>3</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>16</dim>
|
||||
<dim>3</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="3" name="data_add_575/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="4800" shape="1,16,1,1" size="64"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="4" name="19/Fused_Add_" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="5" name="21" type="ReLU" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="6" name="22" type="MaxPool" version="opset1">
|
||||
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>32</dim>
|
||||
<dim>32</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="7" name="onnx_initializer_node_8/Output_0/Data__const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="4864" shape="32,16,5,5" size="51200"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="8" name="23/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>5</dim>
|
||||
<dim>5</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="9" name="23/Dims357/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="56064" shape="1,32,1,1" size="128"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="10" name="23" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="11" name="25/mean/Fused_Mul_618620_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="56192" shape="64,32,3,3" size="73728"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>64</dim>
|
||||
<dim>32</dim>
|
||||
<dim>3</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="12" name="24/WithoutBiases" type="Convolution" version="opset1">
|
||||
<data dilations="1,1" output_padding="0,0" pads_begin="2,2" pads_end="2,2" strides="1,1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>32</dim>
|
||||
<dim>16</dim>
|
||||
<dim>16</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>64</dim>
|
||||
<dim>32</dim>
|
||||
<dim>3</dim>
|
||||
<dim>3</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="13" name="data_add_578583/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="129920" shape="1,64,1,1" size="256"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="14" name="24/Fused_Add_" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>1</dim>
|
||||
<dim>1</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="15" name="26" type="ReLU" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="16" name="27" type="MaxPool" version="opset1">
|
||||
<data kernel="2,2" pads_begin="0,0" pads_end="0,0" rounding_type="floor" strides="2,2"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>18</dim>
|
||||
<dim>18</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>9</dim>
|
||||
<dim>9</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="17" name="28/Reshape/Cast_1955_const" type="Const" version="opset1">
|
||||
<data element_type="i64" offset="130176" shape="2" size="16"/>
|
||||
<output>
|
||||
<port id="1" precision="I64">
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="18" name="28/Reshape" type="Reshape" version="opset1">
|
||||
<data special_zero="True"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>64</dim>
|
||||
<dim>9</dim>
|
||||
<dim>9</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>2</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="19" name="onnx_initializer_node_17/Output_0/Data__const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="130192" shape="10,5184" size="207360"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>10</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="20" name="29/WithoutBiases" type="MatMul" version="opset1">
|
||||
<data transpose_a="0" transpose_b="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>10</dim>
|
||||
<dim>5184</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="21" name="onnx_initializer_node_18/Output_0/Data_/copy_const" type="Const" version="opset1">
|
||||
<data element_type="f32" offset="337552" shape="1,10" size="40"/>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="22" name="29" type="Add" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
<port id="1">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="2" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="23" name="fc_out" type="SoftMax" version="opset1">
|
||||
<data axis="1"/>
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
<output>
|
||||
<port id="1" precision="FP32">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</output>
|
||||
</layer>
|
||||
<layer id="24" name="fc_out/sink_port_0" type="Result" version="opset1">
|
||||
<input>
|
||||
<port id="0">
|
||||
<dim>1</dim>
|
||||
<dim>10</dim>
|
||||
</port>
|
||||
</input>
|
||||
</layer>
|
||||
</layers>
|
||||
<edges>
|
||||
<edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
|
||||
<edge from-layer="1" from-port="1" to-layer="2" to-port="1"/>
|
||||
<edge from-layer="2" from-port="2" to-layer="4" to-port="0"/>
|
||||
<edge from-layer="3" from-port="1" to-layer="4" to-port="1"/>
|
||||
<edge from-layer="4" from-port="2" to-layer="5" to-port="0"/>
|
||||
<edge from-layer="5" from-port="1" to-layer="6" to-port="0"/>
|
||||
<edge from-layer="6" from-port="1" to-layer="8" to-port="0"/>
|
||||
<edge from-layer="7" from-port="1" to-layer="8" to-port="1"/>
|
||||
<edge from-layer="8" from-port="2" to-layer="10" to-port="0"/>
|
||||
<edge from-layer="9" from-port="1" to-layer="10" to-port="1"/>
|
||||
<edge from-layer="10" from-port="2" to-layer="12" to-port="0"/>
|
||||
<edge from-layer="11" from-port="1" to-layer="12" to-port="1"/>
|
||||
<edge from-layer="12" from-port="2" to-layer="14" to-port="0"/>
|
||||
<edge from-layer="13" from-port="1" to-layer="14" to-port="1"/>
|
||||
<edge from-layer="14" from-port="2" to-layer="15" to-port="0"/>
|
||||
<edge from-layer="15" from-port="1" to-layer="16" to-port="0"/>
|
||||
<edge from-layer="16" from-port="1" to-layer="18" to-port="0"/>
|
||||
<edge from-layer="17" from-port="1" to-layer="18" to-port="1"/>
|
||||
<edge from-layer="18" from-port="2" to-layer="20" to-port="0"/>
|
||||
<edge from-layer="19" from-port="1" to-layer="20" to-port="1"/>
|
||||
<edge from-layer="20" from-port="2" to-layer="22" to-port="0"/>
|
||||
<edge from-layer="21" from-port="1" to-layer="22" to-port="1"/>
|
||||
<edge from-layer="22" from-port="2" to-layer="23" to-port="0"/>
|
||||
<edge from-layer="23" from-port="1" to-layer="24" to-port="0"/>
|
||||
</edges>
|
||||
<meta_data>
|
||||
<MO_version value="unknown version"/>
|
||||
<cli_parameters>
|
||||
<blobs_as_inputs value="True"/>
|
||||
<data_type value="FP32"/>
|
||||
<disable_resnet_optimization value="False"/>
|
||||
<disable_weights_compression value="False"/>
|
||||
<enable_concat_optimization value="False"/>
|
||||
<extensions value="DIR"/>
|
||||
<framework value="onnx"/>
|
||||
<freeze_placeholder_with_value value="{}"/>
|
||||
<generate_deprecated_IR_V2 value="False"/>
|
||||
<generate_deprecated_IR_V7 value="False"/>
|
||||
<generate_experimental_IR_V10 value="True"/>
|
||||
<input_model value="DIR/test_model.onnx"/>
|
||||
<keep_quantize_ops_in_IR value="True"/>
|
||||
<keep_shape_ops value="False"/>
|
||||
<log_level value="ERROR"/>
|
||||
<mean_scale_values value="{}"/>
|
||||
<mean_values value="()"/>
|
||||
<model_name value="test_model"/>
|
||||
<move_to_preprocess value="False"/>
|
||||
<output_dir value="DIR"/>
|
||||
<placeholder_data_types value="{}"/>
|
||||
<progress value="False"/>
|
||||
<reverse_input_channels value="False"/>
|
||||
<scale_values value="()"/>
|
||||
<silent value="False"/>
|
||||
<stream_output value="False"/>
|
||||
<unset unset_cli_parameters="batch, disable_fusing, disable_gfusing, finegrain_fusing, input, input_shape, output, placeholder_shapes, scale, transformations_config"/>
|
||||
</cli_parameters>
|
||||
</meta_data>
|
||||
</net>
|
Loading…
Reference in New Issue
Block a user