Maxpool fix bug (#3718)

* style-apply

* Update spec

* Remove maxpool back_prop method

* style-apply
This commit is contained in:
Piotr Szmelczynski 2021-01-22 12:19:34 +01:00 committed by GitHub
parent 1044ed4352
commit e346bdde14
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 101 additions and 103 deletions

View File

@ -67,7 +67,7 @@
**Inputs**:
* **1**: 4D or 5D input tensor. Required.
* **1**: 3D, 4D or 5D input tensor. Required.
**Mathematical Formulation**

View File

@ -27,100 +27,6 @@ namespace ngraph
{
namespace reference
{
template <typename T>
void max_pool_backprop(const T* arg_forward,
const T* delta,
T* out,
const Shape& delta_shape,
const Shape& out_shape, // same as arg_forward_shape
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above)
{
CoordinateTransform out_transform(out_shape);
for (const Coordinate& out_coord : out_transform)
{
out[out_transform.index(out_coord)] = 0;
}
CoordinateTransform delta_transform(delta_shape);
for (const Coordinate& delta_coord : delta_transform)
{
size_t img_index = delta_coord[0];
size_t channel = delta_coord[1];
size_t n_image_dimensions = out_shape.size() - 2;
Coordinate source_window_transform_start(2 + n_image_dimensions);
Coordinate source_window_transform_end(2 + n_image_dimensions);
Strides source_window_transform_source_strides(2 + n_image_dimensions, 1);
AxisVector source_window_transform_source_axis_order(2 + n_image_dimensions);
CoordinateDiff source_window_transform_padding_below(2 + n_image_dimensions);
CoordinateDiff source_window_transform_padding_above(2 + n_image_dimensions);
source_window_transform_start[0] = img_index;
source_window_transform_end[0] = img_index + 1;
source_window_transform_start[1] = channel;
source_window_transform_end[1] = channel + 1;
source_window_transform_padding_below[0] = 0;
source_window_transform_padding_below[1] = 0;
source_window_transform_padding_above[0] = 0;
source_window_transform_padding_above[1] = 0;
for (size_t i = 2; i < n_image_dimensions + 2; i++)
{
size_t window_shape_this_dim = window_shape[i - 2];
size_t movement_stride = window_movement_strides[i - 2];
source_window_transform_start[i] = movement_stride * delta_coord[i];
source_window_transform_end[i] =
source_window_transform_start[i] + window_shape_this_dim;
source_window_transform_padding_below[i] = padding_below[i - 2];
source_window_transform_padding_above[i] = padding_above[i - 2];
}
std::iota(begin(source_window_transform_source_axis_order),
end(source_window_transform_source_axis_order),
0);
CoordinateTransform source_window_transform(
out_shape,
source_window_transform_start,
source_window_transform_end,
source_window_transform_source_strides,
source_window_transform_source_axis_order,
source_window_transform_padding_below,
source_window_transform_padding_above);
Coordinate argmax_coord;
bool argmax_coord_valid = false;
T max_val = 0; // just initializing to keep compiler happy, this 0 is ignored
for (const Coordinate& source_window_coord : source_window_transform)
{
if (source_window_transform.has_source_coordinate(source_window_coord))
{
T candidate =
arg_forward[source_window_transform.index(source_window_coord)];
if (!argmax_coord_valid || candidate > max_val)
{
max_val = candidate;
argmax_coord = source_window_coord;
argmax_coord_valid = true;
}
}
}
if (argmax_coord_valid)
{
out[source_window_transform.index(argmax_coord)] +=
delta[delta_transform.index(delta_coord)];
}
}
}
template <typename T>
void max_pool(const T* arg,
T* out,

View File

@ -99,23 +99,59 @@ void op::v1::MaxPool::validate_and_infer_types()
}
const PartialShape& arg_shape = get_input_partial_shape(0);
NODE_VALIDATION_CHECK(this,
arg_shape.rank().compatible(3) || arg_shape.rank().compatible(4) ||
arg_shape.rank().compatible(5),
"Expected a 3D, 4D or 5D tensor for the input. Got: ",
arg_shape);
if (arg_shape.rank().is_static())
{
NODE_VALIDATION_CHECK(this,
m_pads_end.size() == arg_shape.rank().get_max_length() - 2,
"Expected pads_end size to be equal to input size - 2. Got: ",
m_pads_end.size());
NODE_VALIDATION_CHECK(this,
m_pads_begin.size() == arg_shape.rank().get_max_length() - 2,
"Expected pads_begin size to be equal to input size - 2. Got: ",
m_pads_begin.size());
NODE_VALIDATION_CHECK(this,
m_kernel.size() == arg_shape.rank().get_max_length() - 2,
"Expected kernel size to be equal to input size - 2. Got: ",
m_kernel.size());
NODE_VALIDATION_CHECK(this,
m_strides.size() == arg_shape.rank().get_max_length() - 2,
"Expected strides size to be equal to input size - 2. Got: ",
m_kernel.size());
}
auto output_shape = PartialShape::dynamic();
if (arg_shape.rank().is_static())
{
output_shape = std::vector<Dimension>(arg_shape.rank().get_length(), Dimension::dynamic());
if (arg_shape.rank().get_length() > 1)
output_shape =
std::vector<Dimension>(arg_shape.rank().get_max_length(), Dimension::dynamic());
if (arg_shape[0].is_static())
{
output_shape[0] = arg_shape[0]; // batch size
}
if (arg_shape.rank().get_length() > 2)
if (arg_shape[1].is_static())
{
output_shape[1] = arg_shape[1]; // channel size
}
}
const bool update_auto_padding_succeed =
update_auto_padding(arg_shape, m_pads_end, m_pads_begin);
bool update_auto_padding_succeed = true;
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
update_auto_padding_succeed = update_auto_padding(arg_shape, m_pads_end, m_pads_begin);
}
if (m_auto_pad == PadType::VALID)
{
m_pads_end = Shape(m_pads_end.size(), 0);
m_pads_begin = Shape(m_pads_begin.size(), 0);
}
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
CoordinateDiff pads_begin(m_pads_begin.begin(), m_pads_begin.end());

View File

@ -21,7 +21,44 @@
using namespace std;
using namespace ngraph;
TEST(type_prop, max_pool_auto_padding)
TEST(type_prop, max_pool_valid_auto_padding)
{
const PartialShape arg_shape{1, 3, 32};
const Strides strides{1};
const Shape pads_begin{2};
const Shape pads_end{2};
const Shape kernel_shape{2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::VALID;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(
arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 31}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{0}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0}));
}
TEST(type_prop, max_pool_1D_auto_padding)
{
const PartialShape arg_shape{1, 3, 32};
const Strides strides{1};
const Shape pads_begin{0};
const Shape pads_end{0};
const Shape kernel_shape{2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(
arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0}));
}
TEST(type_prop, max_pool_2D_auto_padding)
{
const PartialShape arg_shape{1, 3, 32, 32};
const Strides strides{1, 1};
@ -40,7 +77,26 @@ TEST(type_prop, max_pool_auto_padding)
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0}));
}
TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_lower)
TEST(type_prop, max_pool_auto_padding_1D_nc_dims_dynamic_same_lower)
{
const PartialShape arg_shape{Dimension::dynamic(), 32, 32};
const Strides strides{1};
const Shape pads_begin{0};
const Shape pads_end{0};
const Shape kernel_shape{2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(
arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0}));
}
TEST(type_prop, max_pool_auto_padding_2D_nc_dims_dynamic_same_lower)
{
const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32};
const Strides strides{1, 1};