ONNX Adaptive average pooling implementation (#14682)

This commit is contained in:
Tomasz Dołbniak 2022-12-16 15:39:40 +01:00 committed by GitHub
parent e9812caff5
commit 56606ec46f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 242 additions and 0 deletions

View File

@ -0,0 +1,27 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op/adaptive_avg_pooling2d.hpp"
#include "default_opset.hpp"
#include "exceptions.hpp"
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
OutputVector adaptive_avg_pooling2d(const Node& node) {
const auto inputs = node.get_ng_inputs();
const auto num_inputs = inputs.size();
CHECK_VALID_NODE(node, num_inputs == 2, "adaptive_avg_pooling2d expects 2 input tensors. Got: ", num_inputs);
return {std::make_shared<default_opset::AdaptiveAvgPool>(inputs[0], inputs[1])};
}
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -0,0 +1,15 @@
// Copyright (C) 2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "onnx_import/core/node.hpp"
namespace ngraph {
namespace onnx_import {
namespace op {
namespace set_1 {
OutputVector adaptive_avg_pooling2d(const Node& node);
} // namespace set_1
} // namespace op
} // namespace onnx_import
} // namespace ngraph

View File

@ -15,6 +15,7 @@
#include "op/abs.hpp"
#include "op/acos.hpp"
#include "op/acosh.hpp"
#include "op/adaptive_avg_pooling2d.hpp"
#include "op/add.hpp"
#include "op/affine.hpp"
#include "op/and.hpp"
@ -281,6 +282,7 @@ void OperatorsBridge::overwrite_operator(const std::string& name, const std::str
}
static const char* const MICROSOFT_DOMAIN = "com.microsoft";
static const char* const PYTORCH_ATEN_DOMAIN = "org.pytorch.aten";
#define REGISTER_OPERATOR(name_, ver_, fn_) \
m_map[""][name_].emplace(ver_, std::bind(op::set_##ver_::fn_, std::placeholders::_1));
@ -518,6 +520,8 @@ OperatorsBridge::OperatorsBridge() {
REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "EmbedLayerNormalization", 1, embed_layer_normalization);
REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "SkipLayerNormalization", 1, skip_layer_normalization);
REGISTER_OPERATOR_WITH_DOMAIN(MICROSOFT_DOMAIN, "Trilu", 1, trilu);
REGISTER_OPERATOR_WITH_DOMAIN(PYTORCH_ATEN_DOMAIN, "adaptive_avg_pool2d", 1, adaptive_avg_pooling2d);
}
#undef REGISTER_OPERATOR

View File

@ -74,6 +74,7 @@ set(MULTI_TEST_SRC
onnx_import_dyn_shapes.in.cpp
onnx_import_external_data.in.cpp
onnx_import_org_openvino.in.cpp
onnx_import_org_pytorch.in.cpp
onnx_import_reshape.in.cpp
onnx_import_rnn.in.cpp
onnx_import_quant.in.cpp

View File

@ -0,0 +1,60 @@
ir_version: 7
producer_name: "backend-test"
graph {
node {
input: "x"
input: "out_shape"
output: "y"
op_type: "adaptive_avg_pool2d"
domain: "org.pytorch.aten"
}
name: "test_adaptive_avg_pool2d"
initializer {
dims: 1
data_type: 7
int64_data: 2
name: "out_shape"
}
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 4
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
domain: "com.microsoft"
version: 1
}

View File

@ -0,0 +1,67 @@
ir_version: 7
producer_name: "backend-test"
graph {
node {
input: "x"
input: "out_shape"
output: "y"
op_type: "adaptive_avg_pool2d"
domain: "org.pytorch.aten"
}
name: "test_adaptive_avg_pool2d"
initializer {
dims: 2
data_type: 6
int32_data: 2
int32_data: 2
name: "out_shape"
}
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 4
}
dim {
dim_value: 4
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_value: 1
}
dim {
dim_value: 2
}
dim {
dim_value: 2
}
}
}
}
}
}
opset_import {
domain: "com.microsoft"
version: 1
}

View File

@ -0,0 +1,68 @@
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <vector>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "common_test_utils/file_utils.hpp"
#include "default_opset.hpp"
#include "engines_util/test_case.hpp"
#include "engines_util/test_engines.hpp"
#include "onnx_import/onnx.hpp"
#include "util/test_control.hpp"
NGRAPH_SUPPRESS_DEPRECATED_START
using namespace ngraph;
static std::string s_manifest = "${MANIFEST}";
static std::string s_device = test::backend_name_to_device("${BACKEND_NAME}");
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_adaptive_avg_pooling2d_nchw) {
const auto function =
onnx_import::import_onnx_model(file_util::path_join(CommonTestUtils::getExecutableDirectory(),
SERIALIZED_ZOO,
"onnx/org.pytorch/adaptive_avg_pooling2d_nchw.onnx"));
auto test_case = test::TestCase(function, s_device);
test_case.add_input<float>({0.9945,
0.3466,
0.2894,
0.9318,
0.0115,
0.4867,
0.7608,
0.1550,
0.8485,
0.4971,
0.8833,
0.4579,
0.3673,
0.5410,
0.2004,
0.1519});
test_case.add_expected_output<float>(Shape{1, 1, 2, 2}, {0.4598249, 0.5342500, 0.5634750, 0.4233750});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_adaptive_avg_pooling2d_chw) {
const auto function =
onnx_import::import_onnx_model(file_util::path_join(CommonTestUtils::getExecutableDirectory(),
SERIALIZED_ZOO,
"onnx/org.pytorch/adaptive_avg_pooling2d_chw.onnx"));
auto test_case = test::TestCase(function, s_device);
test_case.add_input<float>({12.0, -1.0, -56.0, 20.0, 1.0, -8.0, 7.0, 9.0});
test_case.add_expected_output<float>(Shape{1, 2, 2}, {5.5, -18.0, -3.5, 8.0});
test_case.run();
}