[ONNX] ConvInteger (#8042)

This commit is contained in:
Dawid Kożykowski 2021-10-21 08:58:20 +02:00 committed by GitHub
parent e9777a6da0
commit 64340ec2fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 523 additions and 264 deletions

View File

@ -13,11 +13,9 @@ file(GLOB_RECURSE LIBRARY_PUBLIC_HEADERS ${ONNX_FRONTEND_INCLUDE_DIR}/*.hpp)
# Remove disabled ops
list(REMOVE_ITEM LIBRARY_SRC
${CMAKE_CURRENT_SOURCE_DIR}/src/op/conv_integer.cpp
${CMAKE_CURRENT_SOURCE_DIR}/src/op/quant_conv.cpp
)
list(REMOVE_ITEM LIBRARY_HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/src/op/conv_integer.hpp
${CMAKE_CURRENT_SOURCE_DIR}/src/op/quant_conv.hpp
)

View File

@ -14,6 +14,7 @@
#include "ngraph/op/group_conv.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "onnx_import/core/null_node.hpp"
#include "utils/conv_factory.hpp"
#include "utils/convpool.hpp"
#include "utils/reshape.hpp"
@ -22,34 +23,6 @@ namespace onnx_import {
namespace op {
namespace set_1 {
namespace detail {
std::shared_ptr<ngraph::op::Op> make_ng_convolution(const Output<ngraph::Node>& data,
const Output<ngraph::Node>& filters,
const ngraph::Strides& strides,
const ngraph::Strides& dilations,
const ngraph::CoordinateDiff& padding_below,
const ngraph::CoordinateDiff& padding_above,
int64_t groups,
const ngraph::op::PadType& auto_pad) {
if (groups > 1) {
const auto reshaped_filters = convpool::get_reshaped_filters(filters, groups);
return std::make_shared<default_opset::GroupConvolution>(data,
reshaped_filters,
strides,
padding_below,
padding_above,
dilations,
auto_pad);
} else {
return std::make_shared<default_opset::Convolution>(data,
filters,
strides,
padding_below,
padding_above,
dilations,
auto_pad);
}
}
std::shared_ptr<ngraph::Node> add_bias(const Output<ngraph::Node>& ng_conv, const Output<ngraph::Node>& bias) {
const auto conv_shape = std::make_shared<default_opset::ShapeOf>(ng_conv);
@ -76,8 +49,14 @@ OutputVector conv(const Node& node,
const auto& padding_below = paddings.first;
const auto& padding_above = paddings.second;
const auto conv_node =
make_ng_convolution(data, filters, strides, dilations, padding_below, padding_above, groups, auto_pad_type);
const auto conv_node = conv_factory::make_ng_convolution(data,
filters,
strides,
dilations,
padding_below,
padding_above,
groups,
auto_pad_type);
// no bias param
if (ngraph::op::is_null(bias)) {

View File

@ -2,111 +2,67 @@
// SPDX-License-Identifier: Apache-2.0
//
// Disabled in CMakeList
// Update to higher opset required
#include "op/conv_integer.hpp"
#include "exceptions.hpp"
#include "ngraph/builder/make_constant.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/opsets/opset0.hpp"
#include "default_opset.hpp"
#include "utils/conv_factory.hpp"
#include "utils/convpool.hpp"
#include "utils/reshape.hpp"
using namespace ngraph::builder;
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
OutputVector conv_integer(const Node& node)
{
const OutputVector& inputs = node.get_ng_inputs();
auto num_inputs = inputs.size();
auto input = inputs.at(0);
auto filters = inputs.at(1);
OutputVector conv_integer(const Node& node) {
const OutputVector& inputs = node.get_ng_inputs();
int64_t groups{node.get_attribute_value<int64_t>("group", 1)};
CHECK_VALID_NODE(
node,
groups == 1,
"Only value of 1 for 'group' supported for ConvInteger. Given: ",
groups);
const auto& input = inputs.at(0);
const auto& filter = inputs.at(1);
const auto& input_zero_point =
(inputs.size() > 2) ? inputs.at(2) : ngraph::op::Constant::create(ngraph::element::i32, {1}, {0});
const auto& filter_zero_point =
(inputs.size() > 3) ? inputs.at(3) : ngraph::op::Constant::create(ngraph::element::i32, {1}, {0});
auto window_movement_strides = convpool::get_strides(node);
auto window_dilation_strides = convpool::get_dilations(node);
auto paddings = convpool::get_pads(node);
ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node);
auto& padding_below = paddings.first;
auto& padding_above = paddings.second;
convpool::calculate_auto_pads(input.get_shape(),
filters.get_shape(),
window_movement_strides,
window_dilation_strides,
auto_pad_type,
padding_below,
padding_above);
const auto& converted_input = std::make_shared<default_opset::Convert>(input, element::i32);
const auto& converted_filter = std::make_shared<default_opset::Convert>(filter, element::i32);
const Strides default_data_dilation_strides(input.get_shape().size() - 2, 1);
auto scale_one = make_constant(ngraph::element::f32, Shape{}, 1);
auto input_zero_point = make_constant(input.get_element_type(), Shape{}, 0);
auto filters_zero_point = make_constant(filters.get_element_type(), Shape{}, 0);
auto output_zero_point = make_constant(ngraph::element::i32, Shape{}, 0);
const auto& converted_input_zero_point = std::make_shared<default_opset::Convert>(input_zero_point, element::i32);
const auto& converted_filter_zero_point = std::make_shared<default_opset::Convert>(filter_zero_point, element::i32);
if (num_inputs == 2)
{
return {std::make_shared<ngraph::opset0::QuantizedConvolution>(
input,
filters,
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
default_data_dilation_strides,
scale_one,
input_zero_point,
scale_one,
filters_zero_point,
scale_one,
output_zero_point,
ngraph::element::i32,
ngraph::AxisSet{},
ngraph::AxisSet{},
ngraph::AxisSet{})};
}
const auto& input_shape = std::make_shared<default_opset::ShapeOf>(input, element::i32);
const auto& input_rank = std::make_shared<default_opset::ShapeOf>(input_shape, element::i32);
const auto& input_rank_scalar = reshape::interpret_as_scalar(input_rank);
input_zero_point = inputs.at(2);
if (num_inputs == 4)
{
filters_zero_point = inputs.at(3);
}
const auto& one_node = ngraph::op::Constant::create(ngraph::element::i32, {}, {1});
const auto& missing_dimensions =
std::make_shared<default_opset::Range>(one_node, input_rank_scalar, one_node, element::i32);
const auto& resized_filter_zero_point =
std::make_shared<default_opset::Unsqueeze>(converted_filter_zero_point, missing_dimensions);
return {std::make_shared<ngraph::opset0::QuantizedConvolution>(
input,
filters,
window_movement_strides,
window_dilation_strides,
padding_below,
padding_above,
default_data_dilation_strides,
scale_one,
input_zero_point,
scale_one,
filters_zero_point,
scale_one,
output_zero_point,
ngraph::element::i32,
ngraph::AxisSet{},
ngraph::AxisSet{},
ngraph::AxisSet{})};
}
} // namespace set_1
const auto& shifted_input = std::make_shared<default_opset::Subtract>(converted_input, converted_input_zero_point);
const auto& shifted_filter = std::make_shared<default_opset::Subtract>(converted_filter, resized_filter_zero_point);
} // namespace op
const auto& groups = node.get_attribute_value<int64_t>("group", 1);
const auto& strides = convpool::get_strides(node);
const auto& dilations = convpool::get_dilations(node);
const auto& paddings = convpool::get_pads(node);
const ngraph::op::PadType& auto_pad_type = convpool::get_auto_pad(node);
const auto& padding_below = paddings.first;
const auto& padding_above = paddings.second;
} // namespace onnx_import
const auto conv_node = conv_factory::make_ng_convolution(shifted_input,
shifted_filter,
strides,
dilations,
padding_below,
padding_above,
groups,
auto_pad_type);
} // namespace ngraph
return {conv_node};
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -2,34 +2,27 @@
// SPDX-License-Identifier: Apache-2.0
//
// Disabled in CMakeList
// Update to higher opset required
#pragma once
#include "ngraph/node.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
/// \brief Performs ONNX ConvInteger operation.
///
/// \param node The ONNX node object representing this operation.
///
/// \return The vector containing Ngraph nodes producing output of quantized ONNX
/// convolution operation.
OutputVector conv_integer(const Node& node);
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
/// \brief Performs ONNX ConvInteger operation.
///
/// \param node The ONNX node object representing this operation.
///
/// \return The vector containing Ngraph nodes producing output of quantized ONNX
/// convolution operation.
OutputVector conv_integer(const Node& node);
} // namespace set_1
} // namespace set_1
} // namespace op
} // namespace op
} // namespace onnx_import
} // namespace onnx_import
} // namespace ngraph
} // namespace ngraph

View File

@ -2,6 +2,8 @@
// SPDX-License-Identifier: Apache-2.0
//
#include "ops_bridge.hpp"
#include <functional>
#include <iterator>
#include <map>
@ -38,7 +40,7 @@
#include "op/constant_fill.hpp"
#include "op/constant_of_shape.hpp"
#include "op/conv.hpp"
// #include "op/conv_integer.hpp"
#include "op/conv_integer.hpp"
#include "op/conv_transpose.hpp"
#include "op/cos.hpp"
#include "op/cosh.hpp"
@ -156,7 +158,6 @@
#include "op/upsample.hpp"
#include "op/where.hpp"
#include "op/xor.hpp"
#include "ops_bridge.hpp"
namespace ngraph {
namespace onnx_import {
@ -304,7 +305,7 @@ OperatorsBridge::OperatorsBridge() {
REGISTER_OPERATOR("Constant", 13, constant);
REGISTER_OPERATOR("ConstantOfShape", 1, constant_of_shape);
REGISTER_OPERATOR("Conv", 1, conv);
// REGISTER_OPERATOR("ConvInteger", 1, conv_integer);
REGISTER_OPERATOR("ConvInteger", 1, conv_integer);
REGISTER_OPERATOR("ConvTranspose", 1, conv_transpose);
REGISTER_OPERATOR("Compress", 1, compress);
REGISTER_OPERATOR("Cos", 1, cos);

View File

@ -0,0 +1,50 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "utils/conv_factory.hpp"
#include "default_opset.hpp"
#include "exceptions.hpp"
#include "ngraph/builder/reshape.hpp"
#include "ngraph/op/group_conv.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "onnx_import/core/null_node.hpp"
#include "utils/conv_factory.hpp"
#include "utils/convpool.hpp"
#include "utils/reshape.hpp"
namespace ngraph {
namespace onnx_import {
namespace conv_factory {
std::shared_ptr<ov::op::Op> make_ng_convolution(const Output<ngraph::Node>& data,
const Output<ngraph::Node>& filters,
const ngraph::Strides& strides,
const ngraph::Strides& dilations,
const ngraph::CoordinateDiff& padding_below,
const ngraph::CoordinateDiff& padding_above,
int64_t groups,
const ngraph::op::PadType& auto_pad) {
if (groups > 1) {
const auto reshaped_filters = convpool::get_reshaped_filters(filters, groups);
return std::make_shared<default_opset::GroupConvolution>(data,
reshaped_filters,
strides,
padding_below,
padding_above,
dilations,
auto_pad);
} else {
return std::make_shared<default_opset::Convolution>(data,
filters,
strides,
padding_below,
padding_above,
dilations,
auto_pad);
}
}
} // namespace conv_factory
} // namespace onnx_import
} // namespace ngraph

View File

@ -0,0 +1,24 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph {
namespace onnx_import {
namespace conv_factory {
std::shared_ptr<ov::op::Op> make_ng_convolution(const Output<ngraph::Node>& data,
const Output<ngraph::Node>& filters,
const ngraph::Strides& strides,
const ngraph::Strides& dilations,
const ngraph::CoordinateDiff& padding_below,
const ngraph::CoordinateDiff& padding_above,
int64_t groups,
const ngraph::op::PadType& auto_pad);
} // namespace conv_factory
} // namespace onnx_import
} // namespace ngraph

View File

@ -1,27 +1,15 @@
ir_version: 5
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
input: "w"
input: "x_zero_point"
input: "w_zero_point"
output: "y"
name: "node1"
op_type: "ConvInteger"
attribute {
name: "group"
i: 1
type: INT
}
attribute {
name: "auto_pad"
s: "NOTSET"
type: STRING
}
doc_string: "ConvInteger"
domain: ""
}
name: "test"
name: "ConvInt"
input {
name: "x"
type {
@ -72,6 +60,22 @@ graph {
tensor_type {
elem_type: 2
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "w_zero_point"
type {
tensor_type {
elem_type: 2
shape {
dim {
dim_value: 1
}
}
}
}

View File

@ -0,0 +1,109 @@
ir_version: 7
producer_name: "backend-test"
graph {
node {
input: "x"
input: "w"
input: "x_zero_point"
input: "w_zero_point"
output: "y"
op_type: "ConvInteger"
}
name: "ConvInt"
input {
name: "x"
type {
tensor_type {
elem_type: 3
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 3
}
dim {
dim_value: 3
}
}
}
}
}
input {
name: "w"
type {
tensor_type {
elem_type: 3
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "x_zero_point"
type {
tensor_type {
elem_type: 3
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "w_zero_point"
type {
tensor_type {
elem_type: 3
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
domain: ""
version: 10
}

View File

@ -1,31 +1,18 @@
ir_version: 5
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
input: "w"
output: "y"
name: "node1"
op_type: "ConvInteger"
attribute {
name: "group"
i: 1
type: INT
}
attribute {
name: "auto_pad"
s: "NOTSET"
type: STRING
}
doc_string: "ConvInteger"
domain: ""
}
name: "test"
name: "ConvInt"
input {
name: "x"
type {
tensor_type {
elem_type: 2
elem_type: 3
shape {
dim {
dim_value: 1
@ -47,7 +34,7 @@ graph {
name: "w"
type {
tensor_type {
elem_type: 2
elem_type: 3
shape {
dim {
dim_value: 1

View File

@ -0,0 +1,109 @@
ir_version: 7
producer_name: "backend-test"
graph {
node {
input: "x"
input: "w"
input: "x_zero_point"
input: "w_zero_point"
output: "y"
op_type: "ConvInteger"
}
name: "ConvInt"
input {
name: "x"
type {
tensor_type {
elem_type: 2
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 3
}
dim {
dim_value: 3
}
}
}
}
}
input {
name: "w"
type {
tensor_type {
elem_type: 3
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "x_zero_point"
type {
tensor_type {
elem_type: 2
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "w_zero_point"
type {
tensor_type {
elem_type: 3
shape {
dim {
dim_value: 1
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
domain: ""
version: 10
}

View File

@ -1,35 +1,15 @@
ir_version: 5
ir_version: 7
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
input: "w"
input: "x_zero_point"
input: "w_zero_point"
output: "y"
name: "node1"
op_type: "ConvInteger"
attribute {
name: "group"
i: 1
type: INT
}
attribute {
name: "pads"
ints: 1
ints: 1
ints: 1
ints: 1
type: INTS
}
attribute {
name: "auto_pad"
s: "NOTSET"
type: STRING
}
doc_string: "ConvInteger"
domain: ""
}
name: "test"
name: "ConvInt"
input {
name: "x"
type {
@ -37,16 +17,16 @@ graph {
elem_type: 2
shape {
dim {
dim_value: 1
dim_value: 2
}
dim {
dim_value: 1
}
dim {
dim_value: 3
dim_value: 4
}
dim {
dim_value: 3
dim_value: 4
}
}
}
@ -59,16 +39,16 @@ graph {
elem_type: 2
shape {
dim {
dim_value: 1
dim_value: 2
}
dim {
dim_value: 1
}
dim {
dim_value: 2
dim_value: 3
}
dim {
dim_value: 2
dim_value: 3
}
}
}
@ -80,6 +60,22 @@ graph {
tensor_type {
elem_type: 2
shape {
dim {
dim_value: 1
}
}
}
}
}
input {
name: "w_zero_point"
type {
tensor_type {
elem_type: 2
shape {
dim {
dim_value: 2
}
}
}
}
@ -91,16 +87,16 @@ graph {
elem_type: 6
shape {
dim {
dim_value: 1
dim_value: 2
}
dim {
dim_value: 1
dim_value: 2
}
dim {
dim_value: 4
dim_value: 2
}
dim {
dim_value: 4
dim_value: 2
}
}
}

View File

@ -751,52 +751,131 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_qlinear_matmul_3d) {
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_conv_integer) {
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_conv_integer_simple_zero_point) {
auto function = onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/conv_integer.onnx"));
auto test_case = test::TestCase<TestEngine>(function);
test_case.add_input(std::vector<uint8_t>{2, 3, 4, 5, 6, 7, 8, 9, 10}); // x
test_case.add_input(std::vector<uint8_t>{1, 1, 1, 1}); // w
test_case.add_input(std::vector<uint8_t>{1}); // x_zero_point
// don't change style for better readibility
// clang-format off
test_case.add_input(std::vector<uint8_t>{11, 22, 33,
44, 55, 66,
77, 88, 99}); // x
test_case.add_input(std::vector<uint8_t>{1, 2,
3, 4}); // w
test_case.add_input(std::vector<uint8_t>{111}); // x_zero_point
test_case.add_input(std::vector<uint8_t>{1}); // w_zero_point
test_case.add_expected_output({1, 1, 2, 2}, std::vector<uint8_t>{12, 16, 24, 28}); // y
test_case.add_expected_output({1, 1, 2, 2}, std::vector<int32_t>{-391, -325,
-193, -127}); // y
// clang-format on
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_conv_integer_zero_point_zero) {
auto function = onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/conv_integer.onnx"));
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_conv_integer_int8) {
auto function = onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/conv_integer_int8.onnx"));
auto test_case = test::TestCase<TestEngine>(function);
test_case.add_input(std::vector<uint8_t>{1, 2, 3, 4, 5, 6, 7, 8, 9}); // x
test_case.add_input(std::vector<uint8_t>{1, 1, 1, 1}); // w
test_case.add_input(std::vector<uint8_t>{0}); // x_zero_point
// don't change style for better readibility
// clang-format off
test_case.add_input(std::vector<int8_t>{-11, 22, -33,
44, -55, 66,
-77, 88, -99}); // x
test_case.add_input(std::vector<int8_t>{ 1, -2,
-3, 4}); // w
test_case.add_input(std::vector<int8_t>{-5}); // x_zero_point
test_case.add_input(std::vector<int8_t>{-5}); // w_zero_point
test_case.add_expected_output({1, 1, 2, 2}, std::vector<uint8_t>{12, 16, 24, 28}); // y
test_case.add_expected_output({1, 1, 2, 2}, std::vector<int32_t>{-307, 617,
837, -747}); // y
// clang-format on
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_conv_integer_no_zero_point) {
auto function =
onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/conv_integer_no_zero_point.onnx"));
auto test_case = test::TestCase<TestEngine>(function);
test_case.add_input(std::vector<uint8_t>{1, 2, 3, 4, 5, 6, 7, 8, 9}); // x
test_case.add_input(std::vector<uint8_t>{1, 1, 1, 1}); // w
// don't change style for better readibility
// clang-format off
test_case.add_input(std::vector<int8_t>{-100, -89, -78,
-67, -56, -45,
-34, -23, -12}); // x
test_case.add_input(std::vector<int8_t>{0, 1,
2, 3}); // w
test_case.add_expected_output({1, 1, 2, 2}, std::vector<uint8_t>{12, 16, 24, 28}); // y
test_case.add_expected_output({1, 1, 2, 2}, std::vector<int32_t>{-391, -325,
-193, -127}); // y
// clang-format on
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_conv_integer_pads) {
auto function = onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/conv_integer_pads.onnx"));
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_conv_integer_vector_w_zero_point) {
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/conv_integer_vector_w_zero_point.onnx"));
auto test_case = test::TestCase<TestEngine>(function);
test_case.add_input(std::vector<uint8_t>{2, 3, 4, 5, 6, 7, 8, 9, 10}); // x
test_case.add_input(std::vector<uint8_t>{1, 1, 1, 1}); // w
test_case.add_input(std::vector<uint8_t>{1}); // x_zero_point
// don't change style for better readibility
// clang-format off
test_case.add_input(std::vector<uint8_t>{11, 22, 33, 44,
55, 66, 77, 88,
99, 88, 77, 66,
55, 44, 33, 22,
test_case.add_expected_output({1, 1, 4, 4},
std::vector<uint8_t>{1, 3, 5, 3, 5, 12, 16, 9, 11, 24, 28, 15, 7, 15, 17, 9}); // y
1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16}); // x
test_case.add_input(std::vector<uint8_t>{2, 2, 3,
4, 5, 6,
7, 8, 9,
2, 2, 3,
4, 5, 6,
7, 8, 9}); // w
test_case.add_input(std::vector<uint8_t>{1}); // x_zero_point
test_case.add_input(std::vector<uint8_t>{1, 2}); // w_zero_point
test_case.add_expected_output({2, 2, 2, 2}, std::vector<int32_t>{2702, 2647,
2174, 1855,
2183, 2095,
1589, 1303,
258, 295,
406, 443,
213, 241,
325, 353}); // y
// clang-format on
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_conv_integer_overload) {
auto function =
onnx_import::import_onnx_model(file_util::path_join(SERIALIZED_ZOO, "onnx/conv_integer_overload.onnx"));
auto test_case = test::TestCase<TestEngine>(function);
// don't change style for better readibility
// clang-format off
test_case.add_input(std::vector<uint8_t>{255, 255, 255,
0, 0, 0,
255, 255, 255}); // x
test_case.add_input(std::vector<int8_t>{127, -128,
-128, 127}); // w
test_case.add_input(std::vector<uint8_t>{255}); // x_zero_point
test_case.add_input(std::vector<int8_t>{-128}); // w_zero_point
test_case.add_expected_output({1, 1, 2, 2}, std::vector<int32_t>{-65025, -65025,
-65025, -65025}); // y
// clang-format on
test_case.run();
}

View File

@ -24,12 +24,6 @@ IE_CPU.onnx_model_dequantize_linear_1d_zero_scale_int8
# C++ exception with description "Input data precision not supported. Expected float.
IE_CPU.onnx_model_dequantize_linear_1d_zero_scale_int8_4d
# Not supported ONNX op: QuantizedConvolution
onnx_model_conv_integer
onnx_model_conv_integer_zero_point_zero
onnx_model_conv_integer_no_zero_point
onnx_model_conv_integer_pads
# No support yet for RandomUniform
onnx_model_random_uniform
onnx_model_random_uniform_like

View File

@ -68,10 +68,6 @@ INTERPRETER.onnx_model_dequantize_linear_1d_zero_scale_uint8
INTERPRETER.onnx_model_dequantize_linear_1d_zero_scale_int8
INTERPRETER.onnx_model_dequantize_linear_1d_zero_scale_int8_4d
INTERPRETER.onnx_model_dequantize_linear_1d_zero_scale_uint8_negative_axis
INTERPRETER.onnx_model_conv_integer
INTERPRETER.onnx_model_conv_integer_zero_point_zero
INTERPRETER.onnx_model_conv_integer_no_zero_point
INTERPRETER.onnx_model_conv_integer_pads
# Legacy tests with unsupported features from opset4 LSTM/GRU/RNN
# Peepholes input unsupported

View File

@ -74,8 +74,6 @@ xfail_issue_38724 = xfail_test(reason="RuntimeError: While validating ONNX node
"half_pixel")
xfail_issue_38725 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Loop): "
"value info has no element type specified")
xfail_issue_38732 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
"ConvInteger")
xfail_issue_38734 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "
"ai.onnx.preview.training.Adam")
xfail_issue_38735 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations: "

View File

@ -26,7 +26,6 @@ from tests import (
xfail_issue_38710,
xfail_issue_38713,
xfail_issue_38724,
xfail_issue_38732,
xfail_issue_38734,
xfail_issue_38735,
xfail_issue_39658,
@ -364,12 +363,6 @@ tests_expected_to_fail = [
"OnnxBackendNodeModelTest.test_det_2d_cpu",
"OnnxBackendNodeModelTest.test_det_nd_cpu",
),
(
xfail_issue_38732,
"OnnxBackendNodeModelTest.test_convinteger_without_padding_cpu",
"OnnxBackendNodeModelTest.test_convinteger_with_padding_cpu",
"OnnxBackendNodeModelTest.test_basic_convinteger_cpu",
),
(
xfail_issue_38734,
"OnnxBackendNodeModelTest.test_adam_multiple_cpu",

View File

@ -25,7 +25,6 @@ from tests_compatibility import (
xfail_issue_38710,
xfail_issue_38713,
xfail_issue_38724,
xfail_issue_38732,
xfail_issue_38734,
xfail_issue_38735,
xfail_issue_39658,
@ -323,12 +322,6 @@ tests_expected_to_fail = [
"OnnxBackendNodeModelTest.test_det_2d_cpu",
"OnnxBackendNodeModelTest.test_det_nd_cpu",
),
(
xfail_issue_38732,
"OnnxBackendNodeModelTest.test_convinteger_without_padding_cpu",
"OnnxBackendNodeModelTest.test_convinteger_with_padding_cpu",
"OnnxBackendNodeModelTest.test_basic_convinteger_cpu",
),
(
xfail_issue_38734,
"OnnxBackendNodeModelTest.test_adam_multiple_cpu",