Fix Max and Average Pooling op behavior with ceil rounding (#5204)

* Fix Max and Avg Pooling behavior with ceil rounding

* Remove redundant code

* Add backend test to check the problematic case
This commit is contained in:
Bartosz Lesniewski 2021-04-29 05:58:03 +02:00 committed by GitHub
parent 31b161097d
commit 39b4b65ffd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 117 additions and 6 deletions

View File

@ -26,8 +26,10 @@ const std::vector<std::vector<size_t >> kernels = {{3, 3},
{3, 5}};
const std::vector<std::vector<size_t >> kernel3D = {{2, 2, 2}};
const std::vector<std::vector<size_t >> strides = {{1, 1},
{1, 2}};
const std::vector<std::vector<size_t>> strides = {{1, 1},
{1, 2},
{2, 1},
{2, 2}};
const std::vector<std::vector<size_t >> strides3D = {{1, 1, 1},
{2, 2, 2}};
const std::vector<std::vector<size_t >> stridess3D = {{2, 2, 2}};

View File

@ -10,8 +10,6 @@
std::vector<std::string> disabledTestPatterns() {
std::vector<std::string> retVector{
// TODO: Issue 26264
R"(.*(MaxPool|AvgPool).*S\(1\.2\).*Rounding=ceil.*)",
// TODO: Issue 31841
R"(.*(QuantGroupConvBackpropData3D).*)",
// TODO: Issue 31843

View File

@ -177,6 +177,13 @@ namespace ngraph
input_batch_transform_start[i] + window_shape_this_dim;
input_batch_transform_padding_below[i] = padding_below[i - 2];
input_batch_transform_padding_above[i] = padding_above[i - 2];
// If a window (kernel) is out of arg shape bounds, trim it to fit
auto padded_upper_bound =
arg_shape[i] + padding_below[i - 2] + padding_above[i - 2];
if (input_batch_transform_end[i] > padded_upper_bound)
{
input_batch_transform_end[i] = padded_upper_bound;
}
}
for (size_t i = 0; i < arg_shape.size(); i++)
@ -204,6 +211,20 @@ namespace ngraph
T result = 0;
size_t n_elements = 0;
// The below conditions are to provide conformance between the ref and plugins:
// If exclude_padding is disabled (include_padding... enabled), then:
// The size of window doesn't change even if the window was clipped to fit the
// input, number of elements will be equal to window_size.width *
// window_size.height. The exception from this rule is if padding is not
// present, then window size is calculated each time.
auto padding_present = padding_below[0] != 0 || padding_below[1] != 0 ||
padding_above[0] != 0 || padding_above[1] != 0;
if (include_padding_in_avg_computation && padding_present)
{
n_elements = shape_size(window_shape);
}
for (const Coordinate& input_batch_coord : input_batch_transform)
{
bool in_bounds =
@ -214,7 +235,11 @@ namespace ngraph
T v = in_bounds ? arg[input_batch_transform.index(input_batch_coord)]
: static_cast<T>(0);
result += v;
n_elements++;
if (!padding_present ||
(in_bounds && !include_padding_in_avg_computation))
{
n_elements++;
}
}
}

View File

@ -6,7 +6,6 @@
#include <cmath>
#include <numeric>
#include "ngraph/coordinate_transform.hpp"
namespace ngraph
@ -78,6 +77,13 @@ namespace ngraph
input_batch_transform_start[i] = movement_stride * out_coord[i];
input_batch_transform_end[i] =
input_batch_transform_start[i] + window_shape_this_dim;
// If a window (kernel) is out of arg shape bounds, trim it to fit
auto padded_upper_bound =
arg_shape[i] + padding_below[i - 2] + padding_above[i - 2];
if (input_batch_transform_end[i] > padded_upper_bound)
{
input_batch_transform_end[i] = padded_upper_bound;
}
input_batch_transform_padding_below[i] = padding_below[i - 2];
input_batch_transform_padding_above[i] = padding_above[i - 2];
}

View File

@ -102,6 +102,61 @@ NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_pad)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_ceil_stride_pad)
{
Shape in_shape{1, 1, 1, 5};
Shape out_shape{1, 1, 1, 3};
const Strides& strides{1, 2};
const Shape& pads_begin{1, 1};
const Shape& pads_end{1, 1};
const Shape& kernel{3, 3};
const bool exclude_pad = true;
const op::RoundingType rounding_type = op::RoundingType::CEIL;
const op::PadType pad_type = op::PadType::EXPLICIT;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto avgPool = make_shared<op::v1::AvgPool>(
A, strides, pads_begin, pads_end, kernel, exclude_pad, rounding_type, pad_type);
auto f = make_shared<Function>(avgPool, ParameterVector{A});
std::vector<float> a{1, 2, 3, 4, 5};
std::vector<float> result{1.5, 3, 4.5};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_ceil_stride_pad_include_padding)
{
Shape in_shape{1, 1, 1, 5};
Shape out_shape{1, 1, 1, 3};
const Strides& strides{1, 2};
const Shape& pads_begin{1, 1};
const Shape& pads_end{1, 1};
const Shape& kernel{3, 3};
const bool exclude_pad = false;
const op::RoundingType rounding_type = op::RoundingType::CEIL;
const op::PadType pad_type = op::PadType::EXPLICIT;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto avgPool = make_shared<op::v1::AvgPool>(
A, strides, pads_begin, pads_end, kernel, exclude_pad, rounding_type, pad_type);
auto f = make_shared<Function>(avgPool, ParameterVector{A});
std::vector<float> a{2.5, 2, 12, 4, 5};
std::vector<float> result{0.5, 2, 1};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, avg_pool_2d_same_upper)
{
Shape in_shape{1, 1, 3, 3};

View File

@ -99,6 +99,31 @@ NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_pad)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_ceil_stride_pad)
{
Shape in_shape{1, 1, 1, 5};
Shape out_shape{1, 1, 1, 3};
const Strides& strides{1, 2};
const Shape& pads_begin{1, 1};
const Shape& pads_end{1, 1};
const Shape& kernel{3, 3};
const op::RoundingType rounding_type = op::RoundingType::CEIL;
const op::PadType pad_type = op::PadType::EXPLICIT;
auto A = make_shared<op::Parameter>(element::f32, in_shape);
auto maxPool = make_shared<op::v1::MaxPool>(
A, strides, pads_begin, pads_end, kernel, rounding_type, pad_type);
auto f = make_shared<Function>(maxPool, ParameterVector{A});
std::vector<float> a{1, 2, 3, 4, 5};
std::vector<float> result{2, 4, 5};
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>({a});
test_case.add_expected_output<float>(out_shape, result);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, max_pool_2d_same_upper)
{
Shape in_shape{1, 1, 3, 3};