Pad reference implementation which use only CoordinateTransformBasic (#4845)

Co-authored-by: Patryk Elszkowski <patryk.elszkowki@intel.com>
This commit is contained in:
Patryk Elszkowski 2021-03-26 09:46:29 +01:00 committed by GitHub
parent 0cfd007206
commit 961437895b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 340 additions and 187 deletions

View File

@ -4,12 +4,9 @@
#pragma once
#include <cmath>
#include "ngraph/axis_vector.hpp"
#include "ngraph/check.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/op/pad.hpp" // for op::PadMode
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/op/util/attr_types.hpp" // for op::PadMode
#include "ngraph/shape.hpp"
namespace ngraph
{
@ -27,5 +24,5 @@ namespace ngraph
const CoordinateDiff& padding_above,
const op::PadMode pad_mode);
}
}
}
} // namespace runtime
} // namespace ngraph

View File

@ -4,10 +4,271 @@
#include "ngraph/runtime/reference/pad.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/check.hpp"
#include "ngraph/coordinate_index.hpp"
#include "ngraph/coordinate_transform.hpp"
#include <cassert>
namespace ngraph
{
namespace runtime
{
namespace impl
{
namespace
{
template <typename T>
T clamp(T v, T lo, T hi)
{
if (v < lo)
{
return lo;
}
if (v > hi)
{
return hi;
}
return v;
}
struct PadBase
{
PadBase(const char* const data,
const char* const pad_value,
char* const out,
const size_t elem_size,
const Shape& data_shape,
const Shape& out_shape,
const CoordinateDiff& padding_begin,
const CoordinateDiff& padding_end,
const op::PadMode pad_mode)
: data(data)
, pad_value(pad_value)
, out(out)
, elem_size(elem_size)
, data_shape(data_shape)
, out_shape(out_shape)
, padding_begin(padding_begin)
, padding_end(padding_end)
, pad_mode(pad_mode)
, coord(data_shape)
{
}
virtual ~PadBase() = default;
void run() const
{
check_inputs();
CoordinateTransformBasic out_coordinate(out_shape);
char* out_data = out;
for (const auto& out_coord : out_coordinate)
{
const auto in_coord = transform_to_input_data_coord(out_coord);
if (in_coord)
{
const auto in_index = coordinate_index(*in_coord, data_shape);
const auto in_data = data + in_index * elem_size;
std::copy(in_data, in_data + elem_size, out_data);
}
else
{
std::copy(pad_value, pad_value + elem_size, out_data);
}
out_data += elem_size;
}
}
virtual const Coordinate*
transform_to_input_data_coord(const Coordinate& out_coord) const = 0;
virtual void check_inputs() const {}
///
/// DATA
///
const char* const data;
const char* const pad_value;
char* const out;
const size_t elem_size;
const Shape& data_shape;
const Shape& out_shape;
const CoordinateDiff& padding_begin;
const CoordinateDiff& padding_end;
const op::PadMode pad_mode;
mutable Coordinate coord;
};
struct ConstPad : PadBase
{
using PadBase::PadBase;
const Coordinate*
transform_to_input_data_coord(const Coordinate& out_coord) const override
{
assert(out_coord.size() == coord.size());
for (size_t i = 0; i != coord.size(); ++i)
{
const auto sc = static_cast<std::ptrdiff_t>(out_coord[i]);
const auto cc = sc - padding_begin[i];
if (0 <= cc && cc < data_shape[i])
{
coord[i] = cc;
}
else
{
return nullptr;
}
}
return std::addressof(coord);
}
};
struct EdgePad : PadBase
{
using PadBase::PadBase;
const Coordinate*
transform_to_input_data_coord(const Coordinate& out_coord) const override
{
assert(out_coord.size() == coord.size());
for (size_t i = 0; i != coord.size(); ++i)
{
const auto sc = static_cast<std::ptrdiff_t>(out_coord[i]);
const auto cc = sc - padding_begin[i];
coord[i] = clamp<std::ptrdiff_t>(cc, 0, data_shape[i] - 1);
}
return std::addressof(coord);
}
};
struct SymmetricAndReflectPad : PadBase
{
SymmetricAndReflectPad(const char* const data,
const char* const pad_value,
char* const out,
const size_t elem_size,
const Shape& data_shape,
const Shape& out_shape,
const CoordinateDiff& padding_begin,
const CoordinateDiff& padding_end,
const op::PadMode pad_mode)
: PadBase(data,
pad_value,
out,
elem_size,
data_shape,
out_shape,
padding_begin,
padding_end,
pad_mode)
, axis_correction(pad_mode == op::PadMode::SYMMETRIC ? 1 : 0)
{
}
const Coordinate*
transform_to_input_data_coord(const Coordinate& out_coord) const override
{
assert(out_coord.size() == coord.size());
for (size_t i = 0; i != coord.size(); ++i)
{
const auto shape_dim = static_cast<std::ptrdiff_t>(data_shape[i]);
const auto sc = static_cast<std::ptrdiff_t>(out_coord[i]);
const auto cc = sc - padding_begin[i];
const auto rollfront_cc = cc >= 0 ? cc : -cc - axis_correction;
const auto rollback_cc =
shape_dim - (rollfront_cc + 2 - shape_dim) + axis_correction;
coord[i] = rollfront_cc < shape_dim ? rollfront_cc : rollback_cc;
assert(0 <= coord[i] && coord[i] < data_shape[i]);
}
return std::addressof(coord);
}
void check_inputs() const override
{
for (size_t i = 0; i != padding_begin.size(); ++i)
{
const auto axis_size = static_cast<std::ptrdiff_t>(data_shape[i]);
NGRAPH_CHECK(padding_begin[i] - axis_correction < axis_size,
"padding below should be less than data shape");
NGRAPH_CHECK(padding_end[i] - axis_correction < axis_size,
"padding should be less than data shape");
}
}
int axis_correction{};
};
}
void pad(const char* data,
const char* pad_value,
char* out,
const size_t elem_size,
const Shape& data_shape,
const Shape& out_shape,
const CoordinateDiff& padding_below,
const CoordinateDiff& padding_above,
const op::PadMode pad_mode)
{
switch (pad_mode)
{
case op::PadMode::CONSTANT:
{
impl::ConstPad pad{data,
pad_value,
out,
elem_size,
data_shape,
out_shape,
padding_below,
padding_above,
pad_mode};
pad.run();
}
break;
case op::PadMode::EDGE:
{
impl::EdgePad pad{data,
pad_value,
out,
elem_size,
data_shape,
out_shape,
padding_below,
padding_above,
pad_mode};
pad.run();
}
break;
case op::PadMode::REFLECT:
case op::PadMode::SYMMETRIC:
{
impl::SymmetricAndReflectPad pad{data,
pad_value,
out,
elem_size,
data_shape,
out_shape,
padding_below,
padding_above,
pad_mode};
pad.run();
}
break;
default: break;
}
}
} // namespace impl
namespace reference
{
void pad(const char* data,
@ -20,182 +281,16 @@ namespace ngraph
const CoordinateDiff& padding_above,
const op::PadMode pad_mode)
{
Coordinate input_start(data_shape.size(), 0); // start at (0,0,...,0)
Coordinate input_end = out_shape; // end at (d'0,d'1,...,d'n), the outer corner of
// the post-padding shape
Strides input_strides(data_shape.size(), 1);
AxisVector input_axis_order(data_shape.size());
for (size_t i = 0; i < data_shape.size(); i++)
{
input_axis_order[i] = i;
}
CoordinateTransform input_transform(data_shape,
input_start,
input_end,
input_strides,
input_axis_order,
padding_below,
padding_above);
CoordinateTransform output_transform(out_shape);
CoordinateTransform::Iterator output_it = output_transform.begin();
NGRAPH_CHECK(shape_size(input_transform.get_target_shape()) ==
shape_size(output_transform.get_target_shape()));
// depending on the data tensor element type, allocate enough bytes to fit a
// single value of this type
std::vector<char> v(elem_size, 0);
for (const Coordinate& in_coord : input_transform)
{
if (output_it == output_transform.end())
break;
const Coordinate& out_coord = *output_it;
std::fill(v.begin(), v.end(), 0);
switch (pad_mode)
{
case op::PadMode::CONSTANT:
// If the coordinate is out of bounds, substitute *pad_value.
if (input_transform.has_source_coordinate(in_coord))
{
const auto* offset = data + input_transform.index(in_coord) * elem_size;
std::copy(offset, offset + elem_size, v.begin());
}
else
{
std::copy(pad_value, pad_value + elem_size, v.begin());
}
break;
case op::PadMode::EDGE:
{
Coordinate c = in_coord; // have to copy because in_coord is const
// Truncate each out-of-bound dimension.
for (size_t i = 0; i < c.size(); i++)
{
if (static_cast<ptrdiff_t>(c[i]) < padding_below[i])
{
c[i] = padding_below[i];
}
if (static_cast<ptrdiff_t>(c[i]) >=
(padding_below[i] + static_cast<ptrdiff_t>(data_shape[i])))
{
c[i] = static_cast<size_t>(
padding_below[i] + static_cast<ptrdiff_t>(data_shape[i]) - 1);
}
}
const auto* offset = data + input_transform.index(c) * elem_size;
std::copy(offset, offset + elem_size, v.begin());
break;
}
case op::PadMode::REFLECT:
{
// clang-format off
// The algorithm here is a bit complicated because if the padding is
// bigger than the tensor, we may reflect multiple times.
//
// Example:
//
// Input shape: [2]
// Padding: 6 below, 6 above
// Output shape: [14]
//
// Input: a b
// Expected output: a b a b a b a b a b a b a b
//
// Computation for coordinate 13 of output:
//
// . . . . . . a b . . . . .[.] -> (oob above by 6 spaces, so reflection is at top-6)
// .[.]. . . . a b . . . . . . -> (oob below by 5 spaces, so reflection is at bottom+5)
// . . . . . . a b . . .[.]. . -> (oob above by 4 spaces, so reflection is at top-4)
// . . .[.]. . a b . . . . . . -> (oob below by 3 spaces, so reflection is at bottom+3)
// . . . . . . a b .[.]. . . . -> (oob above by 2 spaces, so reflection is at top-2)
// . . . . .[.]a b . . . . . . -> (oob below by 1 space, so reflection is at bottom+1)
// . . . . . . a[b]. . . . . . -> (no longer oob, so copy from here)
//
// Note that this algorithm works because REFLECT padding only makes sense
// if each dim is >= 2.
// clang-format on
Coordinate c = in_coord; // have to copy because in_coord is const
for (size_t i = 0; i < c.size(); i++)
{
ptrdiff_t new_dim = c[i];
bool done_reflecting = false;
while (!done_reflecting)
{
if (new_dim < padding_below[i])
{
ptrdiff_t distance_oob = padding_below[i] - new_dim;
new_dim = padding_below[i] + distance_oob;
}
else if (new_dim >=
padding_below[i] + static_cast<ptrdiff_t>(data_shape[i]))
{
ptrdiff_t distance_oob =
new_dim - padding_below[i] -
(static_cast<ptrdiff_t>(data_shape[i]) - 1);
new_dim = padding_below[i] +
static_cast<ptrdiff_t>(data_shape[i]) - distance_oob -
1;
}
else
{
done_reflecting = true;
}
}
c[i] = static_cast<size_t>(new_dim);
}
const auto* offset = data + input_transform.index(c) * elem_size;
std::copy(offset, offset + elem_size, v.begin());
break;
}
case op::PadMode::SYMMETRIC:
{
Coordinate c = in_coord; // have to copy because in_coord is const
for (size_t i = 0; i < c.size(); i++)
{
ptrdiff_t pos = padding_below[i] - (c[i] + 1);
if (pos >= 0)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
pos = -(pos + 1);
ptrdiff_t src_dim = static_cast<ptrdiff_t>(data_shape[i]);
if (pos < src_dim)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
c[i] = static_cast<size_t>(2 * (padding_below[i] + src_dim) -
c[i] - 1);
}
}
}
const auto* offset = data + input_transform.index(c) * elem_size;
std::copy(offset, offset + elem_size, v.begin());
break;
}
}
std::copy(
v.begin(), v.end(), out + output_transform.index(out_coord) * elem_size);
++output_it;
}
impl::pad(data,
pad_value,
out,
elem_size,
data_shape,
out_shape,
padding_below,
padding_above,
pad_mode);
}
}
}
}
} // namespace reference
} // namespace runtime
} // namespace ngraph

View File

@ -455,6 +455,67 @@ NGRAPH_INSTANTIATE_TEST_CASE_P(${BACKEND_NAME},
}),
op::PadMode::SYMMETRIC}));
NGRAPH_TEST(${BACKEND_NAME}, pad_to_large_symmetric_padding)
{
const auto params_to_large = Params<float>{test::NDArray<float, 2>({
{1, 2},
{4, 5},
}),
test::NDArray<int64_t, 1>({0, 3}),
test::NDArray<int64_t, 1>({0, 0}),
test::NDArray<float, 2>({
{0, 0, 0, 0, 0},
{0, 0, 0, 0, 0},
}),
op::PadMode::SYMMETRIC};
EXPECT_ANY_THROW(PadBackendTest::execute_test(params_to_large));
const auto params_ok = Params<float>{test::NDArray<float, 2>({
{1, 2},
{4, 5},
}),
test::NDArray<int64_t, 1>({0, 2}),
test::NDArray<int64_t, 1>({0, 0}),
test::NDArray<float, 2>({
{2, 1, 1, 2},
{5, 4, 4, 5},
}),
op::PadMode::SYMMETRIC};
EXPECT_NO_THROW(PadBackendTest::execute_test(params_ok));
}
NGRAPH_TEST(${BACKEND_NAME}, pad_to_large_reflect_padding)
{
const auto params_to_large = Params<float>{test::NDArray<float, 2>({
{1, 2},
{4, 5},
}),
test::NDArray<int64_t, 1>({0, 2}),
test::NDArray<int64_t, 1>({0, 0}),
test::NDArray<float, 2>({
{0, 0, 0, 0},
{0, 0, 0, 0},
}),
op::PadMode::REFLECT};
EXPECT_ANY_THROW(PadBackendTest::execute_test(params_to_large));
const auto params_ok = Params<float>{test::NDArray<float, 2>({
{1, 2},
{4, 5},
}),
test::NDArray<int64_t, 1>({0, 1}),
test::NDArray<int64_t, 1>({0, 0}),
test::NDArray<float, 2>({
{2, 1, 2},
{5, 4, 5},
}),
op::PadMode::REFLECT};
EXPECT_NO_THROW(PadBackendTest::execute_test(params_ok));
}
NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_1d)
{
const Shape data_shape{6};
@ -862,7 +923,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg_bigger_than_tensor)
std::vector<float>({4, 3}), read_vector<float>(result), MIN_FLOAT_TOLERANCE_BITS));
}
NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_multi_reflect)
NGRAPH_TEST(${BACKEND_NAME}, DISABLED_pad_reflect_1d_multi_reflect)
{
const Shape data_shape{3};
const auto data = make_shared<op::Parameter>(element::f32, data_shape);