[ONNX FE] Support value freezing without specifying type (#14481)

* [ONNX FE] Support value freezing without specifying type

Implement gete_element_type method for ONNX InputModel.
Implement tests with freezing integer input and float models

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>

* Update src/frontends/onnx/frontend/src/input_model.cpp

Co-authored-by: Tomasz Dołbniak <tomasz.dolbniak@intel.com>

* Update src/frontends/onnx/frontend/src/input_model.cpp

Co-authored-by: Tomasz Dołbniak <tomasz.dolbniak@intel.com>

* Update src/frontends/onnx/frontend/src/input_model.cpp

Co-authored-by: Katarzyna Mitrus <katarzyna.mitrus@intel.com>

* Return only element type for model inputs and add ONNX FE tests

* Rename variable in the test

* Add syntax mark

Signed-off-by: Kazantsev, Roman <roman.kazantsev@intel.com>
Co-authored-by: Tomasz Dołbniak <tomasz.dolbniak@intel.com>
Co-authored-by: Katarzyna Mitrus <katarzyna.mitrus@intel.com>
This commit is contained in:
Roman Kazantsev 2022-12-11 14:58:55 +04:00 committed by GitHub
parent 408bfc50c7
commit e9e05e508a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 136 additions and 6 deletions

View File

@ -3,12 +3,14 @@
# SPDX-License-Identifier: Apache-2.0
import os
import numpy as np
import onnx
import pytest
from onnx.helper import make_graph, make_model, make_tensor_value_info
import numpy as np
from openvino.runtime import Dimension, PartialShape
from openvino.frontend import FrontEndManager, GeneralFailure
from openvino.runtime import Dimension, PartialShape, Type
# ------Test input model 1------
@ -344,6 +346,24 @@ def create_test_onnx_models():
models["test_place_names.onnx"] = make_model(graph, producer_name="ONNX Importer",
opset_imports=[onnx.helper.make_opsetid("", 13)])
# Input model with integer types
add = onnx.helper.make_node("Add", inputs=["x", "y"], outputs=["z"])
const_tensor = onnx.helper.make_tensor("const_tensor",
onnx.TensorProto.INT32,
(2, 2),
[5, 1, 4, 20])
const_node = onnx.helper.make_node("Constant", [], outputs=["const_node"],
value=const_tensor, name="const_node")
mul = onnx.helper.make_node("Mul", inputs=["z", "const_node"], outputs=["out"])
input_tensors = [
make_tensor_value_info("x", onnx.TensorProto.INT32, (2, 2)),
make_tensor_value_info("y", onnx.TensorProto.INT32, (2, 2)),
]
output_tensors = [make_tensor_value_info("out", onnx.TensorProto.FLOAT, (2, 2))]
graph = make_graph([add, const_node, mul], "graph", input_tensors, output_tensors)
models["input_model_int32.onnx"] = make_model(graph, producer_name="ONNX Importer",
opset_imports=[onnx.helper.make_opsetid("", 13)])
return models
@ -1748,3 +1768,32 @@ def test_override_cut_outputs():
with pytest.raises(GeneralFailure) as e:
model.override_all_outputs(outputs=[place_to_cut])
assert "The place OutputEdge{1, 0} is outdated" in str(e.value)
def test_get_element_type():
skip_if_onnx_frontend_is_disabled()
fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)
model = fe.load("input_model_2.onnx")
in1 = model.get_place_by_tensor_name(tensor_name="in1")
assert model.get_element_type(in1) == Type.f32
in1_output_edge = in1.get_consuming_ports()[0]
assert model.get_element_type(in1_output_edge) == Type.f32
def test_get_element_type_int32():
skip_if_onnx_frontend_is_disabled()
fe = fem.load_by_framework(framework=ONNX_FRONTEND_NAME)
model = fe.load("input_model_int32.onnx")
x_input = model.get_place_by_tensor_name(tensor_name="x")
assert model.get_element_type(x_input) == Type.i32
x_output_edge = x_input.get_consuming_ports()[0]
assert model.get_element_type(x_output_edge) == Type.i32
# get_element_type can return the concrete element type only for model inputs
# for other places, it returns undefined type
const_node = model.get_place_by_tensor_name(tensor_name="const_node")
assert model.get_element_type(const_node) == Type.undefined

View File

@ -193,6 +193,33 @@ void InputModel::set_element_type(const ov::frontend::Place::Ptr& place, const n
m_editor->set_input_types(m);
}
ov::element::Type InputModel::get_element_type(const ov::frontend::Place::Ptr& place) const {
OPENVINO_ASSERT(place, "Cannot return a type for nullptr Place.");
std::string tensor_name;
const auto input_edge = std::dynamic_pointer_cast<PlaceInputEdge>(place);
const auto output_edge = std::dynamic_pointer_cast<PlaceOutputEdge>(place);
if (input_edge) {
const auto tensor_names = input_edge->get_source_tensor()->get_names();
OPENVINO_ASSERT(!tensor_names.empty(),
"Cannot retrieve source tensor name for this InputEdge and thus its element type.");
tensor_name = tensor_names[0];
} else if (output_edge) {
const auto tensor_names = output_edge->get_target_tensor()->get_names();
OPENVINO_ASSERT(!tensor_names.empty(),
"Cannot retrieve target tensor name for this OutputEdge and thus its element type.");
tensor_name = tensor_names[0];
} else {
OPENVINO_ASSERT(place->get_names().size() > 0, "Place must have its name.");
tensor_name = place->get_names().at(0);
}
if (place->is_input()) {
return m_editor->get_input_type(tensor_name);
}
// now we can return the concrete element type only for model inputs
return element::undefined;
}
std::shared_ptr<Model> InputModel::decode() {
return m_editor->decode();
}

View File

@ -52,6 +52,7 @@ public:
void set_partial_shape(const ov::frontend::Place::Ptr& place, const ngraph::PartialShape& shape) override;
ngraph::PartialShape get_partial_shape(const ov::frontend::Place::Ptr& place) const override;
void set_element_type(const ov::frontend::Place::Ptr& place, const ngraph::element::Type& type) override;
ov::element::Type get_element_type(const ov::frontend::Place::Ptr& place) const override;
ov::frontend::Place::Ptr add_output(const ov::frontend::Place::Ptr& place) override;
void remove_output(const ov::frontend::Place::Ptr& place) override;

View File

@ -17,7 +17,6 @@ from openvino.frontend import (
) # pylint: disable=no-name-in-module,import-error
from openvino.runtime import Core
from openvino.tools.mo.convert_impl import prepare_ir
from openvino.tools.mo.utils.error import Error
def base_args_config(use_legacy_fe: bool = None, use_new_fe: bool = None):
@ -105,6 +104,22 @@ class TestMoFreezePlaceholder(unittest.TestCase):
)
self.models["test_model_2.onnx"] = model_2
input_tensors_3 = [
make_tensor_value_info("in1", onnx.TensorProto.INT32, (2, 3)),
make_tensor_value_info("in2", onnx.TensorProto.INT32, (3,)),
]
output_tensors_3 = [
make_tensor_value_info("mul_out", onnx.TensorProto.INT32, (2, 3)),
]
mul = onnx.helper.make_node("Mul", inputs=["in1", "in2"], outputs=["mul_out"])
graph_3 = make_graph([mul], "test_graph_3", input_tensors_3, output_tensors_3)
model_3 = make_model(
graph_3,
producer_name="MO tests",
opset_imports=[onnx.helper.make_opsetid("", 13)],
)
self.models["test_model_int.onnx"] = model_3
for name, model in self.models.items():
onnx.save(model, name)
@ -248,6 +263,44 @@ class TestMoFreezePlaceholder(unittest.TestCase):
args = base_args_config(use_new_fe=use_new_fe)
args.input_model = "test_model_2.onnx"
args.input = input_freezing_value
self.assertRaisesRegex(Error, "Please specify type for value freezing in1 node explicitly "
"because the frontend does not support automatic type detection.",
prepare_ir, args)
_, model = prepare_ir(args)
ie = Core()
exec_net = ie.compile_model(model, "CPU")
req = exec_net.create_infer_request()
results = req.infer(inputs)
values = list(results.values())[0]
if dtype is not None:
assert values.dtype == dtype
assert np.allclose(values, expected)
@generate(
*[
(
"in2->[3 2 5]",
True,
{"in1": np.array([[2, 1, 3], [1, 5, 6]], dtype=np.int32)},
np.array([[6, 2, 15], [3, 10, 30]], dtype=np.int32),
np.int32,
),
],
)
def test_value_without_type_int32(self, input_freezing_value, use_new_fe, inputs, expected,
dtype=None):
with patch("openvino.tools.mo.convert_impl.get_default_frontends") as default_fe:
default_fe.return_value = get_test_default_frontends()
args = base_args_config(use_new_fe=use_new_fe)
args.input_model = "test_model_int.onnx"
args.input = input_freezing_value
_, model = prepare_ir(args)
ie = Core()
exec_net = ie.compile_model(model, "CPU")
req = exec_net.create_infer_request()
results = req.infer(inputs)
values = list(results.values())[0]
if dtype is not None:
assert values.dtype == dtype
assert np.allclose(values, expected)