Enable importing TinyYOLOv2 (#1121)

* Fixed bugs in TinyYOLOv2 ops

* Added tests

* styles applied

* styles applied

* code review remarks introduced

* code review remarks (unit tests added)
This commit is contained in:
Mateusz Bencer 2020-07-09 11:26:00 +02:00 committed by GitHub
parent 95677afe29
commit f4b76d4e5e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 303 additions and 45 deletions

View File

@ -62,6 +62,32 @@ void op::v1::Convolution::validate_and_infer_types()
const PartialShape& filters_shape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);
PartialShape result_shape = PartialShape::dynamic();
if (data_batch_shape.rank().is_static())
{
result_shape =
std::vector<Dimension>(data_batch_shape.rank().get_length(), Dimension::dynamic());
if (data_batch_shape.rank().get_length() > 1)
{
result_shape[0] = data_batch_shape[0]; // batch size
}
}
if (filters_shape.rank().is_static() && filters_shape.rank().get_length() > 1)
{
result_shape[1] = filters_shape[0]; // filter channel size
}
element::Type result_et;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(result_et, data_batch_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
data_batch_et,
", filters element type: ",
filters_et,
").");
if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
@ -84,34 +110,28 @@ void op::v1::Convolution::validate_and_infer_types()
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
bool auto_padding_applied = false;
if (filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
auto_padding_applied = try_apply_auto_padding(data_batch_shape,
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
if (!auto_padding_applied)
{
set_output_type(0, result_et, result_shape);
return;
}
}
element::Type result_et;
PartialShape result_shape;
NODE_VALIDATION_CHECK(
this,
element::Type::merge(result_et, data_batch_et, filters_et),
"Element types for data batch and filters do not match (data batch element type: ",
data_batch_et,
", filters element type: ",
filters_et,
").");
result_shape = infer_convolution_forward(this,
data_batch_shape,
Strides(m_strides.size(), 1), // dummy data dilations

View File

@ -130,26 +130,26 @@ void op::v0::MaxPool::update_auto_padding(const PartialShape& in_shape,
}
}
void op::v1::MaxPool::update_auto_padding(const PartialShape& in_shape,
bool op::v1::MaxPool::update_auto_padding(const PartialShape& in_shape,
Shape& new_pads_end,
Shape& new_pads_begin)
{
bool update_auto_padding_succeed = true;
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
if (in_shape.is_static())
{
CoordinateDiff pads_end, pads_begin;
infer_auto_padding(in_shape.to_shape(),
m_kernel,
m_strides,
Strides(m_kernel.size(), 1), // No dilation
m_auto_pad,
pads_end,
pads_begin);
new_pads_end = Shape(pads_end.begin(), pads_end.end());
new_pads_begin = Shape(pads_begin.begin(), pads_begin.end());
}
CoordinateDiff pads_end, pads_begin;
update_auto_padding_succeed =
try_apply_auto_padding(in_shape,
m_kernel,
m_strides,
Strides(m_kernel.size(), 1), // No dilation
m_auto_pad,
pads_end,
pads_begin);
new_pads_end = Shape(pads_end.begin(), pads_end.end());
new_pads_begin = Shape(pads_begin.begin(), pads_begin.end());
}
return update_auto_padding_succeed;
}
op::v0::MaxPool::MaxPool(const Output<Node>& arg,
@ -240,8 +240,22 @@ void op::v1::MaxPool::validate_and_infer_types()
}
const PartialShape& arg_shape = get_input_partial_shape(0);
auto output_shape = PartialShape::dynamic();
if (arg_shape.rank().is_static())
{
output_shape = std::vector<Dimension>(arg_shape.rank().get_length(), Dimension::dynamic());
if (arg_shape.rank().get_length() > 1)
{
output_shape[0] = arg_shape[0]; // batch size
}
if (arg_shape.rank().get_length() > 2)
{
output_shape[1] = arg_shape[1]; // channel size
}
}
update_auto_padding(arg_shape, m_pads_end, m_pads_begin);
const bool update_auto_padding_succeed =
update_auto_padding(arg_shape, m_pads_end, m_pads_begin);
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
// now still take Shape (no negative padding).
@ -250,14 +264,16 @@ void op::v1::MaxPool::validate_and_infer_types()
set_output_type(0,
get_input_element_type(0),
infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
m_kernel,
m_strides,
true,
m_rounding_type == op::RoundingType::CEIL));
update_auto_padding_succeed
? infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
m_kernel,
m_strides,
true,
m_rounding_type == op::RoundingType::CEIL)
: output_shape);
}
shared_ptr<Node> op::v1::MaxPool::clone_with_new_inputs(const OutputVector& new_args) const

View File

@ -240,7 +240,7 @@ namespace ngraph
op::RoundingType m_rounding_type{op::RoundingType::FLOOR};
private:
void update_auto_padding(const PartialShape& in_shape,
bool update_auto_padding(const PartialShape& in_shape,
Shape& new_pads_end,
Shape& new_pads_begin);
};

View File

@ -608,11 +608,45 @@ void ngraph::infer_auto_padding(const Shape& image_shape,
const op::PadType pad_type,
CoordinateDiff& padding_above,
CoordinateDiff& padding_below)
{
const auto image_dims = std::vector<Dimension>(std::begin(image_shape), std::end(image_shape));
// because image_shape is fully known result of try_apply_infer_auto_padding is ignored
try_apply_auto_padding(image_dims,
filter_shape,
filter_strides,
filter_dilations,
pad_type,
padding_above,
padding_below);
}
bool ngraph::try_apply_auto_padding(const PartialShape& image_shape,
const Shape& filter_shape,
const Strides& filter_strides,
const Strides& filter_dilations,
const op::PadType pad_type,
CoordinateDiff& padding_above,
CoordinateDiff& padding_below)
{
NGRAPH_CHECK(pad_type == op::PadType::SAME_UPPER || pad_type == op::PadType::SAME_LOWER);
if (image_shape.rank().is_dynamic())
{
return false;
}
const auto image_dims = static_cast<std::vector<Dimension>>(image_shape);
const bool are_spatial_dims_static =
std::all_of(std::begin(image_dims) + 2, std::end(image_dims), [](const Dimension& dim) {
return dim.is_static();
});
if (!are_spatial_dims_static)
{
return false;
}
for (size_t i = 0; i < static_cast<size_t>(filter_shape.size()); i++)
{
int64_t image_size = static_cast<int64_t>(image_shape[i + 2]);
int64_t image_size = static_cast<int64_t>(image_dims[i + 2]);
int64_t filter_size = (static_cast<int64_t>(filter_shape[i]) - 1) * filter_dilations[i] + 1;
int64_t filter_stride = static_cast<int64_t>(filter_strides[i]);
auto output_size = (image_size + filter_stride - 1) / filter_stride;
@ -624,6 +658,7 @@ void ngraph::infer_auto_padding(const Shape& image_shape,
padding_below.push_back(pad_type == op::PadType::SAME_UPPER ? padding_lhs : padding_rhs);
padding_above.push_back(pad_type == op::PadType::SAME_UPPER ? padding_rhs : padding_lhs);
}
return true;
}
PartialShape ngraph::infer_slice_shape(const Node* node,

View File

@ -89,6 +89,29 @@ namespace ngraph
const PartialShape& gamma_shape,
const PartialShape& beta_shape);
/// \brief Apply auto padding to padding_above and padding_below inputs
/// if all needed informations are known.
///
/// \param image_shape The shape of input image.
/// \param filter_shape The shape of filter input.
/// \param filter_strides The strides of applied padding.
/// \param filter_dilations The dilations of applied padding.
/// \param pad_type The type of padding. Auto padding is applied only
/// for SAME_UPPER and SAME_LOWER mode.
/// \param padding_above The beginning of padding shape.
/// \param end The beginning of padding shape.
///
/// \return true if auto padding was applied successfully (all needed informations such as
/// spatial dims are known), false otherwise.
NGRAPH_API
bool try_apply_auto_padding(const PartialShape& image_shape,
const Shape& filter_shape,
const Strides& filter_strides,
const Strides& filter_dilations,
const op::PadType pad_type,
CoordinateDiff& padding_above,
CoordinateDiff& padding_below);
NGRAPH_API
void infer_auto_padding(const Shape& image_shape,
const Shape& filter_shape,

View File

@ -2557,6 +2557,91 @@ TEST(type_prop, conv_v1_partial_rank)
ASSERT_TRUE(conv->get_output_partial_shape(0).is_dynamic());
}
TEST(type_prop, conv_v1_partial_auto_padding_same)
{
const PartialShape data_batch_shape{1, 1, 5, 5};
const PartialShape filters_shape{1, 1, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::Convolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lower)
{
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{1, 1, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::Convolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_upper)
{
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{1, 1, 2, 2};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_UPPER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::Convolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, conv_v1_partial_auto_padding_same_spatial_dims_dynamic)
{
const PartialShape data_batch_shape{1, 1, Dimension::dynamic(), 5};
const PartialShape filters_shape{1, 1, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::Convolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(
{1, 1, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{}));
}
TEST(type_prop, deformable_conv_incorrect_group)
{
const PartialShape data_batch_shape{1, 3, 96, 96};

View File

@ -588,3 +588,82 @@ TEST(type_prop, max_pool_partial_rank_static_dynamic_padded_window_not_too_big)
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
PartialShape{5, Dimension::dynamic(), 1, Dimension::dynamic(), 1, 3}));
}
TEST(type_prop, max_pool_auto_padding)
{
const PartialShape arg_shape{1, 3, 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(
arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0}));
}
TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_lower)
{
const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(
arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0}));
}
TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_upper)
{
const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_UPPER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(
arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{0, 0}));
ASSERT_EQ(mp->get_pads_end(), (Shape{1, 1}));
}
TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic)
{
const PartialShape arg_shape{1, 3, 32, Dimension::dynamic()};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::MaxPool>(
arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{1, 3, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{}));
ASSERT_EQ(mp->get_pads_end(), (Shape{}));
}