Extend dynamic shape support for ops which use auto padding mode (#1432)

This commit is contained in:
Mateusz Bencer 2020-08-10 13:48:18 +02:00 committed by GitHub
parent e88c7b5ed7
commit f5884231d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 515 additions and 77 deletions

View File

@ -93,22 +93,34 @@ void op::v1::AvgPool::validate_and_infer_types()
}
const PartialShape& arg_shape = get_input_partial_shape(0);
auto output_shape = PartialShape::dynamic();
if (arg_shape.rank().is_static())
{
output_shape = std::vector<Dimension>(arg_shape.rank().get_length(), Dimension::dynamic());
if (arg_shape.rank().get_length() > 1)
{
output_shape[0] = arg_shape[0]; // batch size
}
if (arg_shape.rank().get_length() > 2)
{
output_shape[1] = arg_shape[1]; // channel size
}
}
bool update_auto_padding_succeed = true;
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
if (arg_shape.is_static())
{
CoordinateDiff pads_end, pads_begin;
infer_auto_padding(arg_shape.to_shape(),
m_kernel,
m_strides,
Strides(m_kernel.size(), 1), // No dilation
m_auto_pad,
pads_end,
pads_begin);
m_pads_end = Shape(pads_end.begin(), pads_end.end());
m_pads_begin = Shape(pads_begin.begin(), pads_begin.end());
}
CoordinateDiff pads_end, pads_begin;
update_auto_padding_succeed =
try_apply_auto_padding(arg_shape,
m_kernel,
m_strides,
Strides(m_kernel.size(), 1), // No dilation
m_auto_pad,
pads_end,
pads_begin);
m_pads_end = Shape(pads_end.begin(), pads_end.end());
m_pads_begin = Shape(pads_begin.begin(), pads_begin.end());
}
// infer_batched_forward_pooling wants CoordinateDiffs for these, while the pooling ops for
@ -118,14 +130,16 @@ void op::v1::AvgPool::validate_and_infer_types()
set_output_type(0,
get_input_element_type(0),
infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
m_kernel,
m_strides,
!m_exclude_pad,
m_rounding_type == op::RoundingType::CEIL));
update_auto_padding_succeed
? infer_batched_pooling_forward(this,
arg_shape,
pads_begin,
pads_end,
m_kernel,
m_strides,
!m_exclude_pad,
m_rounding_type == op::RoundingType::CEIL)
: output_shape);
}
const Shape& op::v1::AvgPool::get_kernel() const

View File

@ -77,6 +77,23 @@ void op::v1::BinaryConvolution::validate_and_infer_types()
const PartialShape& filters_shape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);
PartialShape result_shape = PartialShape::dynamic();
if (data_batch_shape.rank().is_static())
{
result_shape =
std::vector<Dimension>(data_batch_shape.rank().get_length(), Dimension::dynamic());
if (data_batch_shape.rank().get_length() > 1)
{
result_shape[0] = data_batch_shape[0]; // batch size
}
if (filters_shape.rank().is_static() && filters_shape.rank().get_length() > 1)
{
result_shape[1] = filters_shape[0]; // filter channel size
}
}
if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
@ -99,23 +116,28 @@ void op::v1::BinaryConvolution::validate_and_infer_types()
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
bool auto_padding_applied = false;
if (filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
auto_padding_applied = try_apply_auto_padding(data_batch_shape,
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
if (!auto_padding_applied)
{
set_output_type(0, data_batch_et, result_shape);
return;
}
}
PartialShape result_shape;
result_shape = infer_convolution_forward(this,
data_batch_shape,
Strides(data_batch_shape.rank().get_length() - 2, 1),

View File

@ -71,44 +71,6 @@ void op::v1::DeformableConvolution::validate_and_infer_types()
element::Type deformable_values_et = get_input_element_type(1);
element::Type filters_et = get_input_element_type(2);
if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_dilations.size() == 0)
{
m_dilations = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_pads_begin.size() == 0)
{
m_pads_begin = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_pads_end.size() == 0)
{
m_pads_end = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
if (data_batch_shape.is_static() && filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
infer_auto_padding(data_batch_shape.to_shape(),
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
}
if (deformable_values_shape.rank().is_static())
{
NODE_VALIDATION_CHECK(
@ -160,15 +122,75 @@ void op::v1::DeformableConvolution::validate_and_infer_types()
filters_et,
").");
const PartialShape result_shape =
infer_convolution_forward(this,
data_batch_shape,
Strides(m_strides.size(), 1), // dummy data dilations
m_pads_begin,
m_pads_end,
filters_shape,
m_strides,
m_dilations);
PartialShape result_shape = PartialShape::dynamic();
if (data_batch_shape.rank().is_static())
{
result_shape =
std::vector<Dimension>(data_batch_shape.rank().get_length(), Dimension::dynamic());
if (data_batch_shape.rank().get_length() > 1)
{
result_shape[0] = data_batch_shape[0]; // batch size
}
if (filters_shape.rank().is_static() && filters_shape.rank().get_length() > 1)
{
result_shape[1] = filters_shape[0]; // filter channel size
}
}
if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_dilations.size() == 0)
{
m_dilations = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_pads_begin.size() == 0)
{
m_pads_begin = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_pads_end.size() == 0)
{
m_pads_end = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
bool auto_padding_applied = false;
if (filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filter_shape = filters_shape.to_shape();
filter_shape.erase(filter_shape.begin(), filter_shape.begin() + 2); // Remove {O,I}
auto_padding_applied = try_apply_auto_padding(data_batch_shape,
filter_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
if (!auto_padding_applied)
{
set_output_type(0, data_batch_et, result_shape);
return;
}
}
result_shape = infer_convolution_forward(this,
data_batch_shape,
Strides(m_strides.size(), 1), // dummy data dilations
m_pads_begin,
m_pads_end,
filters_shape,
m_strides,
m_dilations);
set_output_type(0, result_et, result_shape);
}

View File

@ -91,8 +91,10 @@ set(SRC
tensor.cpp
type_prop/any.cpp
type_prop/assign.cpp
type_prop/avg_pool.cpp
type_prop/batch_norm.cpp
type_prop/batch_to_space.cpp
type_prop/binary_convolution.cpp
type_prop/binary_elementwise.cpp
type_prop/broadcast.cpp
type_prop/bucketize.cpp
@ -102,6 +104,7 @@ set(SRC
type_prop/convert.cpp
type_prop/convolution.cpp
type_prop/ctc_loss.cpp
type_prop/deformable_convolution.cpp
type_prop/deformable_psroi_pooling.cpp
type_prop/depth_to_space.cpp
type_prop/dequantize.cpp

View File

@ -0,0 +1,105 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, avg_pool_auto_padding)
{
const PartialShape arg_shape{1, 3, 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const bool exclude_pad = false;
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(
arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme({1, 3, 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0}));
}
TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_lower)
{
const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const bool exclude_pad = true;
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(
arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{1, 1}));
ASSERT_EQ(mp->get_pads_end(), (Shape{0, 0}));
}
TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_upper)
{
const PartialShape arg_shape{Dimension::dynamic(), Dimension::dynamic(), 32, 32};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const bool exclude_pad = false;
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_UPPER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(
arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{Dimension::dynamic(), Dimension::dynamic(), 32, 32}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{0, 0}));
ASSERT_EQ(mp->get_pads_end(), (Shape{1, 1}));
}
TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic)
{
const PartialShape arg_shape{1, 3, 32, Dimension::dynamic()};
const Strides strides{1, 1};
const Shape pads_begin{0, 0};
const Shape pads_end{0, 0};
const Shape kernel_shape{2, 2};
const bool exclude_pad = true;
const auto rounding_mode = op::RoundingType::FLOOR;
const auto auto_pad = op::PadType::SAME_LOWER;
auto arg = make_shared<op::Parameter>(element::f32, arg_shape);
auto mp = make_shared<op::v1::AvgPool>(
arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad);
ASSERT_TRUE(mp->get_output_partial_shape(0).same_scheme(
{1, 3, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_EQ(mp->get_pads_begin(), (Shape{}));
ASSERT_EQ(mp->get_pads_end(), (Shape{}));
}

View File

@ -0,0 +1,115 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, binary_conv_v1_partial_auto_padding_same)
{
const PartialShape data_batch_shape{1, 1, 5, 5};
const PartialShape filters_shape{1, 1, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::BinaryConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, binary_conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lower)
{
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{1, 1, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::BinaryConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, binary_conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_upper)
{
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{1, 1, 2, 2};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = op::PadType::SAME_UPPER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::BinaryConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 1, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, binary_conv_v1_partial_auto_padding_same_spatial_dims_dynamic)
{
const PartialShape data_batch_shape{1, 1, Dimension::dynamic(), 5};
const PartialShape filters_shape{1, 1, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto mode = op::v1::BinaryConvolution::BinaryConvolutionMode::XNOR_POPCOUNT;
const float pad_value = 1.0f;
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::BinaryConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(
{1, 1, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{}));
}

View File

@ -0,0 +1,157 @@
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/type_prop.hpp"
using namespace std;
using namespace ngraph;
TEST(type_prop, deformable_conv_v1_partial_auto_padding_same)
{
const PartialShape data_batch_shape{1, 4, 5, 5};
const PartialShape deformable_shape{1, 4, 3, 3};
const PartialShape filters_shape{4, 4, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
const int64_t group = 4;
const int64_t deformable_group = 2;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto deformable_values = make_shared<op::Parameter>(element::f32, deformable_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto deformable_conv = make_shared<op::v1::DeformableConvolution>(data_batch,
deformable_values,
filters,
strides,
pads_begin,
pads_end,
dilations,
auto_pad,
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lower)
{
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape deformable_shape{1, 4, 3, 3};
const PartialShape filters_shape{4, 4, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
const int64_t group = 4;
const int64_t deformable_group = 2;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto deformable_values = make_shared<op::Parameter>(element::f32, deformable_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto deformable_conv = make_shared<op::v1::DeformableConvolution>(data_batch,
deformable_values,
filters,
strides,
pads_begin,
pads_end,
dilations,
auto_pad,
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
PartialShape{Dimension::dynamic(), 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_upper)
{
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape deformable_shape{1, 4, 2, 2};
const PartialShape filters_shape{4, 4, 2, 2};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_UPPER;
const int64_t group = 4;
const int64_t deformable_group = 2;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto deformable_values = make_shared<op::Parameter>(element::f32, deformable_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto deformable_conv = make_shared<op::v1::DeformableConvolution>(data_batch,
deformable_values,
filters,
strides,
pads_begin,
pads_end,
dilations,
auto_pad,
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
PartialShape{Dimension::dynamic(), 4, 5, 5}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_spatial_dims_dynamic)
{
const PartialShape data_batch_shape{1, 4, Dimension::dynamic(), 5};
const PartialShape deformable_shape{1, 4, 3, 3};
const PartialShape filters_shape{4, 4, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
const int64_t group = 4;
const int64_t deformable_group = 2;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto deformable_values = make_shared<op::Parameter>(element::f32, deformable_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto deformable_conv = make_shared<op::v1::DeformableConvolution>(data_batch,
deformable_values,
filters,
strides,
pads_begin,
pads_end,
dilations,
auto_pad,
group,
deformable_group);
ASSERT_TRUE(deformable_conv->get_output_partial_shape(0).same_scheme(
{1, 4, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_EQ(deformable_conv->get_pads_begin(), (CoordinateDiff{}));
ASSERT_EQ(deformable_conv->get_pads_end(), (CoordinateDiff{}));
}