[ONNX] Add ONNX Compress operator. (#5400)

* [ONNX] Add support for ONNX's Compress operator

* NonZero fixes

* Add unit tests as constant_network

* Style

* Move compress tests

* xfail python tests

* Fix func test

* Review Fix I
This commit is contained in:
Tomasz Socha 2021-04-29 06:49:05 +02:00 committed by GitHub
parent c97bb90a91
commit 64a032fa18
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 443 additions and 14 deletions

View File

@ -40,7 +40,7 @@ TEST(nop_elimination, eliminate_convert) {
TEST(nop_elimination, convert_type_agnostic) {
Shape shape{};
auto type = element::from<char>();
auto type = element::from<int8_t>();
auto A = make_shared<op::Parameter>(type, shape);
auto c1 = make_shared<op::v0::Convert>(A, element::from<uint8_t>());
auto c = make_shared<op::v0::Convert>(c1, element::f32);

View File

@ -51,7 +51,7 @@ void op::v3::NonZero::validate_and_infer_types()
const auto input_et = get_input_element_type(0);
NODE_VALIDATION_CHECK(this,
input_et.is_integral() || input_et.is_real(),
input_et.is_integral_number() || input_et.is_real(),
"NonZero input data type needs to be a numeric type. Got: ",
input_et);
NODE_VALIDATION_CHECK(this,
@ -154,13 +154,18 @@ namespace nonzero
switch (input->get_element_type())
{
NGRAPH_TYPE_CASE(evaluate_nonzero, i8, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, i16, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, i32, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, i64, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, u8, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, u16, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, u32, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, u64, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, bf16, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, f16, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, f32, input, output);
NGRAPH_TYPE_CASE(evaluate_nonzero, f64, input, output);
default: rc = false; break;
}
return rc;

View File

@ -60,5 +60,5 @@ namespace ngraph
/// OutputEdge(5, "out1")
/// OutputEdge(5, "out2")
using OutputEdge = Edge<EdgeType::OUTPUT>;
}
}
} // namespace onnx_editor
} // namespace ngraph

View File

@ -0,0 +1,51 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <memory>
#include "default_opset.hpp"
#include "ngraph/builder/reshape.hpp"
#include "op/compress.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
OutputVector compress(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
auto condition = std::make_shared<default_opset::Convert>(
node.get_ng_inputs().at(1), element::u8);
int64_t axis = 0;
if (node.has_attribute("axis"))
{
axis = node.get_attribute_value<int64_t>("axis");
}
else
{
data = std::make_shared<default_opset::Squeeze>(
ngraph::builder::opset1::flatten(data, axis));
}
auto axis_node = default_opset::Constant::create(element::i64, Shape{}, {axis});
auto zero_node = default_opset::Constant::create(element::i64, Shape{}, {0});
auto result = std::make_shared<default_opset::Gather>(
data,
std::make_shared<default_opset::Squeeze>(
std::make_shared<default_opset::NonZero>(condition), zero_node),
axis_node);
return {result};
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once
#include "ngraph/output_vector.hpp"
#include "onnx_import/core/node.hpp"
namespace ngraph
{
namespace onnx_import
{
namespace op
{
namespace set_1
{
OutputVector compress(const Node& node);
}
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -17,7 +17,11 @@ namespace ngraph
{
OutputVector non_zero(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
auto data = node.get_ng_inputs().at(0);
if (data.get_element_type() == element::boolean)
{
data = std::make_shared<default_opset::Convert>(data, element::u8);
}
return {std::make_shared<default_opset::NonZero>(data, element::i64)};
}

View File

@ -27,6 +27,7 @@
#include "op/cast.hpp"
#include "op/ceil.hpp"
#include "op/clip.hpp"
#include "op/compress.hpp"
#include "op/concat.hpp"
#include "op/constant.hpp"
#include "op/constant_fill.hpp"
@ -326,6 +327,7 @@ namespace ngraph
REGISTER_OPERATOR("Conv", 1, conv);
// REGISTER_OPERATOR("ConvInteger", 1, conv_integer);
REGISTER_OPERATOR("ConvTranspose", 1, conv_transpose);
REGISTER_OPERATOR("Compress", 1, compress);
REGISTER_OPERATOR("Cos", 1, cos);
REGISTER_OPERATOR("Cosh", 1, cosh);
REGISTER_OPERATOR("ConstantFill", 1, constant_fill);

View File

@ -42,8 +42,6 @@ xfail_issue_33596 = xfail_test(reason="RuntimeError: nGraph does not support dif
"SequenceEmpty, SequenceInsert, SequenceErase, SequenceLength ")
xfail_issue_33606 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"Det")
xfail_issue_33644 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"Compress")
xfail_issue_33651 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"
"TfIdfVectorizer")
xfail_issue_33581 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:"

View File

@ -19,7 +19,6 @@ from tests import (BACKEND_NAME,
xfail_issue_33596,
xfail_issue_33606,
xfail_issue_33633,
xfail_issue_33644,
xfail_issue_33651,
xfail_issue_38091,
xfail_issue_38699,
@ -312,11 +311,6 @@ tests_expected_to_fail = [
(xfail_issue_33606,
"OnnxBackendNodeModelTest.test_det_2d_cpu",
"OnnxBackendNodeModelTest.test_det_nd_cpu"),
(xfail_issue_33644,
"OnnxBackendNodeModelTest.test_compress_negative_axis_cpu",
"OnnxBackendNodeModelTest.test_compress_default_axis_cpu",
"OnnxBackendNodeModelTest.test_compress_1_cpu",
"OnnxBackendNodeModelTest.test_compress_0_cpu"),
(xfail_issue_38732,
"OnnxBackendNodeModelTest.test_convinteger_with_padding_cpu",
"OnnxBackendNodeModelTest.test_basic_convinteger_cpu"),
@ -374,6 +368,10 @@ tests_expected_to_fail = [
(xfail_issue_44956,
"OnnxBackendNodeModelTest.test_loop11_cpu"),
(xfail_issue_44957,
"OnnxBackendNodeModelTest.test_compress_0_cpu",
"OnnxBackendNodeModelTest.test_compress_1_cpu",
"OnnxBackendNodeModelTest.test_compress_default_axis_cpu",
"OnnxBackendNodeModelTest.test_compress_negative_axis_cpu",
"OnnxBackendNodeModelTest.test_nonzero_example_cpu"),
(xfail_issue_44958,
"OnnxBackendNodeModelTest.test_upsample_nearest_cpu"),

View File

@ -439,7 +439,9 @@ endif()
if (NGRAPH_ONNX_EDITOR_ENABLE)
list(APPEND SRC onnx/onnx_editor.cpp)
list(APPEND MULTI_TEST_SRC onnx/onnx_test_utils.in.cpp)
list(APPEND MULTI_TEST_SRC
onnx/onnx_test_utils.in.cpp
onnx/onnx_import_with_editor.in.cpp)
endif()
add_clang_format_target(unit-test_clang FOR_SOURCES ${SRC} ${MULTI_TEST_SRC})

View File

@ -0,0 +1,64 @@
ir_version: 3
producer_name: "backend-test"
graph {
node {
input: "input"
input: "condition"
output: "output"
op_type: "Compress"
attribute {
name: "axis"
i: 0
type: INT
}
}
name: "test_compress_0"
input {
name: "input"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "condition"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 3
}
}
}
}
}
output {
name: "output"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
version: 9
}

View File

@ -0,0 +1,64 @@
ir_version: 3
producer_name: "backend-test"
graph {
node {
input: "input"
input: "condition"
output: "output"
op_type: "Compress"
attribute {
name: "axis"
i: 1
type: INT
}
}
name: "test_compress_1"
input {
name: "input"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "condition"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 2
}
}
}
}
}
output {
name: "output"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 9
}

View File

@ -0,0 +1,56 @@
ir_version: 3
producer_name: "backend-test"
graph {
node {
input: "input"
input: "condition"
output: "output"
op_type: "Compress"
}
name: "test_compress_default_axis"
input {
name: "input"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "condition"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 5
}
}
}
}
}
output {
name: "output"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
version: 9
}

View File

@ -0,0 +1,64 @@
ir_version: 6
producer_name: "backend-test"
graph {
node {
input: "input"
input: "condition"
output: "output"
op_type: "Compress"
attribute {
name: "axis"
i: -1
type: INT
}
}
name: "test_compress_negative_axis"
input {
name: "input"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 2
}
}
}
}
}
input {
name: "condition"
type {
tensor_type {
elem_type: 9
shape {
dim {
dim_value: 2
}
}
}
}
}
output {
name: "output"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 3
}
dim {
dim_value: 1
}
}
}
}
}
}
opset_import {
version: 11
}

View File

@ -0,0 +1,98 @@
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "onnx_editor/editor.hpp"
#include "ngraph/ngraph.hpp"
#include "util/test_case.hpp"
#include "util/test_control.hpp"
#include "util/engine/test_engines.hpp"
using namespace ngraph;
static std::string s_manifest = "${MANIFEST}";
using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME});
// ############################################################################ CORE TESTS
NGRAPH_TEST(${BACKEND_NAME}, onnx_compress_axis_0)
{
onnx_editor::ONNXModelEditor editor{
file_util::path_join(SERIALIZED_ZOO, "onnx/compress_0.prototxt")};
std::map<std::string, std::shared_ptr<ngraph::op::Constant>> in_vals;
in_vals.emplace("input", op::Constant::create(element::f32, Shape{3, 2}, {1., 2., 3., 4., 5., 6.}));
in_vals.emplace("condition", op::Constant::create(element::boolean, Shape{3}, {false, true, true}));
editor.set_input_values(in_vals);
const auto function = editor.get_function();
auto test_case = test::TestCase<TestEngine>(function);
test_case.add_expected_output<float>(Shape{2, 2}, {3., 4., 5., 6.});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_compress_axis_1)
{
onnx_editor::ONNXModelEditor editor{
file_util::path_join(SERIALIZED_ZOO, "onnx/compress_1.prototxt")};
std::map<std::string, std::shared_ptr<ngraph::op::Constant>> in_vals;
in_vals.emplace("input", op::Constant::create(element::f32, Shape{3, 2}, {1., 2., 3., 4., 5., 6.}));
in_vals.emplace("condition", op::Constant::create(element::boolean, Shape{2}, {false, true}));
editor.set_input_values(in_vals);
const auto function = editor.get_function();
auto test_case = test::TestCase<TestEngine>(function);
test_case.add_expected_output<float>(Shape{3, 1}, {2., 4., 6.});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_compress_default_axis)
{
onnx_editor::ONNXModelEditor editor{
file_util::path_join(SERIALIZED_ZOO, "onnx/compress_default_axis.prototxt")};
std::map<std::string, std::shared_ptr<ngraph::op::Constant>> in_vals;
in_vals.emplace("input", op::Constant::create(element::f32, Shape{3, 2}, {1., 2., 3., 4., 5., 6.}));
in_vals.emplace("condition", op::Constant::create(element::boolean, Shape{5}, {false, true, false, false, true}));
editor.set_input_values(in_vals);
const auto function = editor.get_function();
auto test_case = test::TestCase<TestEngine>(function);
test_case.add_expected_output<float>(Shape{2}, {2., 5.});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_compress_negative_axis)
{
onnx_editor::ONNXModelEditor editor{
file_util::path_join(SERIALIZED_ZOO, "onnx/compress_negative_axis.prototxt")};
std::map<std::string, std::shared_ptr<ngraph::op::Constant>> in_vals;
in_vals.emplace("input", op::Constant::create(element::f32, Shape{3, 2}, {1., 2., 3., 4., 5., 6.}));
in_vals.emplace("condition", op::Constant::create(element::boolean, Shape{2}, {false, true}));
editor.set_input_values(in_vals);
const auto function = editor.get_function();
auto test_case = test::TestCase<TestEngine>(function);
test_case.add_expected_output<float>(Shape{3, 1}, {2., 4., 6.});
test_case.run();
}