Enable importing of TF_NASNet_Mobile (#1252)

This commit is contained in:
Adam Osewski
2020-07-14 10:54:39 +02:00
committed by GitHub
parent e8ce8523ed
commit b16c8faceb
9 changed files with 585 additions and 222 deletions

View File

@@ -19,6 +19,7 @@
#include <vector>
#include "default_opset.hpp"
#include "exceptions.hpp"
#include "gather.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/constant.hpp"
@@ -38,121 +39,143 @@ namespace ngraph
{
namespace op
{
namespace set_10
namespace
{
namespace
std::vector<uint64_t>
get_normalized_axes_vector(const Node& onnx_node,
const Rank& data_rank,
const std::vector<int64_t> axes_attr)
{
/// \brief Transform Slice axes input to mask which is attribute of
/// StridedSlice:v1 interface.
///
/// \note Mask attributes of StridedSlice:10 operator indicates
/// if corresponding begin/end/strides input indices should be applied (0
/// value) or ignored (1 value)
///
/// \param[in] axes Axes input of ONNX Slice operator
/// \param[in] slice_indices_length Lenght of Slice indices
/// (starts, ends, steps)
///
/// \return Mask attribute in format required by StridedSlice:v1
std::vector<int64_t> axes_to_mask(const std::vector<uint64_t>& axes,
uint64_t slice_indices_length)
if (data_rank.is_static())
{
std::vector<int64_t> mask(slice_indices_length, 1);
for (auto axis : axes)
{
mask[axis] = 0;
}
return mask;
const auto normalized_axes_vec =
normalize_axes(onnx_node.get_description(), axes_attr, data_rank);
return std::vector<uint64_t>(std::begin(normalized_axes_vec),
std::end(normalized_axes_vec));
}
/// \brief Adjsut ONNX Slice indices: starts, ends, steps to StridedSlice:v1
/// interface.
///
/// \note StridedSlice:v1 doesn't support axes paramets.
/// The axes parameters detrmines to which dimension of input data slice
/// operation should be applied.
/// The retuned sub-graph provide proper adjustement of Slice indices if
/// it is needed.
///
/// \param[in] indices Parameters of Slice operator: starts, ends,
/// steps.
/// \param[in] axes Determines dimensions on which slice
/// operation should be applied.
/// \param[in] slice_indices_length Indices length after adjustment
/// \param[in] fill_in_value Neutral value (`0` for starts and ends,
/// `1` for steps) which is set to indices
/// in order to provide adjustment.
///
/// \return Sub-graph represents adjusted indices or input indices
/// if any transformation was needed.
std::shared_ptr<ngraph::Node>
adjust_indices_if_needed(const std::shared_ptr<ngraph::Node>& indices,
const std::vector<uint64_t>& axes,
uint64_t slice_indices_length,
int64_t fill_in_value)
else
{
const bool are_axes_sorted = std::is_sorted(axes.begin(), axes.end());
const auto indices_shape = indices->get_output_partial_shape(0);
// if length of slice indices vector is known
if (indices_shape.rank().is_static() &&
indices_shape.rank().get_length() == 1 && indices_shape[0].is_static())
{
if (indices_shape[0].get_length() >= slice_indices_length &&
are_axes_sorted)
{
// adjusting indices is not needed
return indices;
}
}
// Handle a case when starts/ends/steps lengths are less than provided axes
// in order to ensure compatibility with `StridedSlice:v1` interface
// Example:
// data_shape: {3, 3, 3, 3}
// starts: [1, 1] - after extending --> [0, 0, 1, 1]
// ends: [2, 2] - after extending --> [0, 0, 2, 2]
// steps : [0, 1] - after extending --> [1, 1, 0, 1] (`1` is neutral as a
// strides value)
// axes: [2, 3] - apply slice values to 2 and 3 dimension of input data
// expected_output_shape: {3, 3, 1, 1}
OutputVector adjusted_indices(slice_indices_length);
std::vector<uint64_t> target_axes(axes);
const auto gather_axis =
default_opset::Constant::create(element::i64, {}, {0});
int added_indices_number = 0;
for (int i = 0; i < slice_indices_length; ++i)
{
if (std::find(std::begin(axes), std::end(axes), i) == axes.end())
{
adjusted_indices[i] = default_opset::Constant::create(
element::i64, {1}, {fill_in_value});
target_axes.insert(std::next(target_axes.begin(), i), i);
++added_indices_number;
}
else
{
adjusted_indices[i] = std::make_shared<default_opset::Gather>(
indices,
default_opset::Constant::create(
element::i64, {1}, {i - added_indices_number}),
gather_axis);
}
}
if (!are_axes_sorted)
{
OutputVector indices_tmp(adjusted_indices);
for (int i = 0; i < target_axes.size(); ++i)
{
adjusted_indices[target_axes[i]] = indices_tmp[i];
}
}
return std::make_shared<default_opset::Concat>(adjusted_indices, 0);
CHECK_VALID_NODE(onnx_node,
std::all_of(std::begin(axes_attr),
std::end(axes_attr),
[](int64_t axis) { return axis >= 0; }),
"All axes must be positive when data rank is unknown");
return std::vector<uint64_t>(std::begin(axes_attr), std::end(axes_attr));
}
}
/// \brief Transform Slice axes input to mask which is attribute of
/// StridedSlice:v1 interface.
///
/// \note Mask attributes of StridedSlice:10 operator indicates
/// if corresponding begin/end/strides input indices should be applied (0
/// value) or ignored (1 value)
///
/// \param[in] axes Axes input of ONNX Slice operator
/// \param[in] slice_indices_length Lenght of Slice indices
/// (starts, ends, steps)
///
/// \return Mask attribute in format required by StridedSlice:v1
std::vector<int64_t> axes_to_mask(const std::vector<uint64_t>& axes,
uint64_t slice_indices_length)
{
std::vector<int64_t> mask(slice_indices_length, 1);
for (auto axis : axes)
{
mask[axis] = 0;
}
return mask;
}
/// \brief Adjsut ONNX Slice indices: starts, ends, steps to StridedSlice:v1
/// interface.
///
/// \note StridedSlice:v1 doesn't support axes paramets.
/// The axes parameters detrmines to which dimension of input data slice
/// operation should be applied.
/// The retuned sub-graph provide proper adjustement of Slice indices if
/// it is needed.
///
/// \param[in] indices Parameters of Slice operator: starts, ends,
/// steps.
/// \param[in] axes Determines dimensions on which slice
/// operation should be applied.
/// \param[in] slice_indices_length Indices length after adjustment
/// \param[in] fill_in_value Neutral value (`0` for starts and ends,
/// `1` for steps) which is set to indices
/// in order to provide adjustment.
///
/// \return Sub-graph represents adjusted indices or input indices
/// if any transformation was needed.
std::shared_ptr<ngraph::Node>
adjust_indices_if_needed(const std::shared_ptr<ngraph::Node>& indices,
const std::vector<uint64_t>& axes,
uint64_t slice_indices_length,
int64_t fill_in_value)
{
const bool are_axes_sorted = std::is_sorted(axes.begin(), axes.end());
const auto indices_shape = indices->get_output_partial_shape(0);
// if length of slice indices vector is known
if (indices_shape.rank().is_static() &&
indices_shape.rank().get_length() == 1 && indices_shape[0].is_static())
{
if (indices_shape[0].get_length() >= slice_indices_length &&
are_axes_sorted)
{
// adjusting indices is not needed
return indices;
}
}
// Handle a case when starts/ends/steps lengths are less than provided axes
// in order to ensure compatibility with `StridedSlice:v1` interface
// Example:
// data_shape: {3, 3, 3, 3}
// starts: [1, 1] - after extending --> [0, 0, 1, 1]
// ends: [2, 2] - after extending --> [0, 0, 2, 2]
// steps : [0, 1] - after extending --> [1, 1, 0, 1] (`1` is neutral as a
// strides value)
// axes: [2, 3] - apply slice values to 2 and 3 dimension of input data
// expected_output_shape: {3, 3, 1, 1}
OutputVector adjusted_indices(slice_indices_length);
std::vector<uint64_t> target_axes(axes);
const auto gather_axis = default_opset::Constant::create(element::i64, {}, {0});
int added_indices_number = 0;
for (int i = 0; i < slice_indices_length; ++i)
{
if (std::find(std::begin(axes), std::end(axes), i) == axes.end())
{
adjusted_indices[i] =
default_opset::Constant::create(element::i64, {1}, {fill_in_value});
target_axes.insert(std::next(target_axes.begin(), i), i);
++added_indices_number;
}
else
{
adjusted_indices[i] = std::make_shared<default_opset::Gather>(
indices,
default_opset::Constant::create(
element::i64, {1}, {i - added_indices_number}),
gather_axis);
}
}
if (!are_axes_sorted)
{
OutputVector indices_tmp(adjusted_indices);
for (int i = 0; i < target_axes.size(); ++i)
{
adjusted_indices[target_axes[i]] = indices_tmp[i];
}
}
return std::make_shared<default_opset::Concat>(adjusted_indices, 0);
}
}
namespace set_10
{
NodeVector slice(const Node& node)
{
NodeVector inputs{node.get_ng_inputs()};
@@ -167,12 +190,14 @@ namespace ngraph
if (inputs.size() >= 4) // axes input provided
{
axes = inputs.at(3);
NGRAPH_CHECK(axes->is_constant(), "Axes input must be constant");
CHECK_VALID_NODE(node, axes->is_constant(), "Axes input must be constant");
}
else
{
NGRAPH_CHECK(data_rank.is_static(),
"Data rank must be static when axes input is not provided");
CHECK_VALID_NODE(
node,
data_rank.is_static(),
"Data rank must be static when axes input is not provided");
const size_t data_rank_value = data_rank.get_length();
axes = default_opset::Constant::create(
element::i64,
@@ -182,23 +207,8 @@ namespace ngraph
const auto axes_const = as_type_ptr<default_opset::Constant>(axes);
auto raw_axes_vec = axes_const->cast_vector<int64_t>();
std::vector<uint64_t> axes_vec;
if (data_rank.is_static())
{
const auto normalized_axes_vec =
normalize_axes(node.get_description(), raw_axes_vec, data_rank);
axes_vec = std::vector<uint64_t>(std::begin(normalized_axes_vec),
std::end(normalized_axes_vec));
}
else
{
NGRAPH_CHECK(std::all_of(std::begin(raw_axes_vec),
std::end(raw_axes_vec),
[](int64_t axis) { return axis >= 0; }),
"All axes must be positive when data rank is unknown");
axes_vec =
std::vector<uint64_t>(std::begin(raw_axes_vec), std::end(raw_axes_vec));
}
std::vector<uint64_t> axes_vec =
get_normalized_axes_vector(node, data_rank, raw_axes_vec);
const uint64_t slice_indices_length =
*std::max_element(std::begin(axes_vec), std::end(axes_vec)) + 1;
@@ -231,51 +241,51 @@ namespace ngraph
NodeVector slice(const Node& node)
{
std::shared_ptr<ngraph::Node> data = node.get_ng_inputs().at(0);
Shape data_shape = data->get_shape();
const auto data_rank = data_shape.size();
const auto data_rank = data->get_output_partial_shape(0).rank();
auto starts = node.get_attribute_value<std::vector<int64_t>>("starts");
auto ends = node.get_attribute_value<std::vector<int64_t>>("ends");
const auto starts_atr =
node.get_attribute_value<std::vector<int64_t>>("starts");
const auto ends_atr = node.get_attribute_value<std::vector<int64_t>>("ends");
std::shared_ptr<ngraph::Node> starts =
std::make_shared<default_opset::Constant>(
element::i64, Shape{starts_atr.size()}, starts_atr);
std::shared_ptr<ngraph::Node> ends = std::make_shared<default_opset::Constant>(
element::i64, Shape{ends_atr.size()}, ends_atr);
auto axes = node.get_attribute_value<std::vector<int64_t>>(
"axes", common::get_monotonic_range<int64_t>(data_shape.size()));
"axes", std::vector<int64_t>());
Shape lower_bounds(data_rank);
Shape upper_bounds = data_shape;
for (size_t idx = 0; idx < axes.size(); ++idx)
if (axes.empty())
{
size_t axis = axes.at(idx);
lower_bounds.at(axis) =
get_valid_array_idx(starts.at(idx), data_shape.at(axis));
upper_bounds.at(axis) =
get_valid_array_idx(ends.at(idx), data_shape.at(axis));
CHECK_VALID_NODE(
node,
data_rank.is_static(),
"Data rank must be static when axes input is not provided");
axes = common::get_monotonic_range<int64_t>(data_rank.get_length());
}
// Check for cases when start is greater than end and change them to "empty"
// slice.
for (size_t idx = 0; idx < lower_bounds.size(); ++idx)
{
if (lower_bounds.at(idx) > upper_bounds.at(idx))
{
upper_bounds.at(idx) = lower_bounds.at(idx);
}
}
std::vector<uint64_t> normalized_axes =
get_normalized_axes_vector(node, data_rank, axes);
const auto begin = default_opset::Constant::create(
element::i64, Shape{lower_bounds.size()}, lower_bounds);
const auto end = default_opset::Constant::create(
element::i64, Shape{upper_bounds.size()}, upper_bounds);
const auto strides = default_opset::Constant::create(
element::i64, Shape{data_rank}, std::vector<int64_t>(data_rank, 1));
const uint64_t slice_indices_length =
*std::max_element(std::begin(normalized_axes), std::end(normalized_axes)) +
1;
const auto begin_end_mask = axes_to_mask(normalized_axes, slice_indices_length);
std::shared_ptr<ngraph::Node> strides = default_opset::Constant::create(
element::i64,
Shape{slice_indices_length},
std::vector<int64_t>(slice_indices_length, 1));
starts =
adjust_indices_if_needed(starts, normalized_axes, slice_indices_length, 0);
ends = adjust_indices_if_needed(ends, normalized_axes, slice_indices_length, 0);
strides =
adjust_indices_if_needed(strides, normalized_axes, slice_indices_length, 1);
return {std::make_shared<default_opset::StridedSlice>(
data,
begin,
end,
strides,
std::vector<int64_t>(data_rank, 0),
std::vector<int64_t>(data_rank, 0))};
data, starts, ends, strides, begin_end_mask, begin_end_mask)};
}
} // namespace set_1

View File

@@ -69,67 +69,11 @@ bool ngraph::op::v1::GroupConvolution::visit_attributes(AttributeVisitor& visito
void op::v1::GroupConvolution::validate_and_infer_types()
{
const PartialShape& data_batch_pshape = get_input_partial_shape(0);
PartialShape data_batch_shape = get_input_partial_shape(0);
PartialShape filters_shape = get_input_partial_shape(1);
element::Type data_batch_et = get_input_element_type(0);
const PartialShape& filters_pshape = get_input_partial_shape(1);
element::Type filters_et = get_input_element_type(1);
PartialShape result_shape{PartialShape::dynamic()};
// we need to adjust filters_shape to reuse helpers for normal convolution
if (filters_pshape.is_static() && data_batch_pshape.is_static())
{
auto filters_shape = filters_pshape.to_shape();
auto groups = filters_shape[0];
filters_shape[1] *= groups;
filters_shape.erase(filters_shape.begin());
auto data_batch_shape = data_batch_pshape.to_shape();
data_batch_shape[1] /= groups;
if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_dilations.size() == 0)
{
m_dilations = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_pads_begin.size() == 0 || m_auto_pad == PadType::VALID)
{
m_pads_begin = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_pads_end.size() == 0 || m_auto_pad == PadType::VALID)
{
m_pads_end = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
m_pads_begin.clear();
m_pads_end.clear();
infer_auto_padding(
data_batch_shape,
Shape(filters_shape.begin() + 2, filters_shape.end()), // Remove {O,I}
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
result_shape =
infer_convolution_forward(this,
data_batch_shape,
Strides(m_strides.size(), 1), // dummy data dilations
m_pads_begin,
m_pads_end,
filters_shape,
m_strides,
m_dilations);
}
element::Type result_et;
NODE_VALIDATION_CHECK(
@@ -141,6 +85,90 @@ void op::v1::GroupConvolution::validate_and_infer_types()
filters_et,
").");
PartialShape result_shape{PartialShape::dynamic()};
if (data_batch_shape.rank().is_static())
{
result_shape =
std::vector<Dimension>(data_batch_shape.rank().get_length(), Dimension::dynamic());
result_shape[0] = data_batch_shape[0];
}
Dimension groups(1);
// we need to adjust filters_shape to reuse helpers for normal convolution
if (filters_shape.rank().is_static() && filters_shape.rank().get_length() > 2)
{
groups = filters_shape[0];
filters_shape[1] *= groups;
auto dim_vec = static_cast<std::vector<Dimension>>(filters_shape);
dim_vec.erase(dim_vec.begin());
filters_shape = PartialShape(dim_vec);
if (data_batch_shape.rank().is_static())
{
result_shape[1] = filters_shape[0];
}
}
if (data_batch_shape.rank().is_static() && data_batch_shape.rank().get_length() > 2 &&
data_batch_shape[1].is_static())
{
data_batch_shape[1] = Dimension(data_batch_shape[1].get_length() / groups.get_length());
}
if (m_strides.size() == 0)
{
m_strides = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_dilations.size() == 0)
{
m_dilations = conv_default_strides(this, data_batch_shape, filters_shape);
}
if (m_pads_begin.size() == 0 || m_auto_pad == PadType::VALID)
{
m_pads_begin = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_pads_end.size() == 0 || m_auto_pad == PadType::VALID)
{
m_pads_end = conv_default_padding(this, data_batch_shape, filters_shape);
}
if (m_auto_pad == PadType::SAME_UPPER || m_auto_pad == PadType::SAME_LOWER)
{
bool auto_padding_applied = false;
if (filters_shape.is_static())
{
m_pads_begin.clear();
m_pads_end.clear();
auto filters_static_shape = filters_shape.to_shape();
filters_static_shape.erase(filters_static_shape.begin(),
filters_static_shape.begin() + 2); // Remove {O,I}
auto_padding_applied = try_apply_auto_padding(data_batch_shape,
filters_static_shape,
m_strides,
m_dilations,
m_auto_pad,
m_pads_end,
m_pads_begin);
}
if (!auto_padding_applied)
{
set_output_type(0, result_et, result_shape);
return;
}
}
result_shape = infer_convolution_forward(this,
data_batch_shape,
Strides(m_strides.size(), 1), // dummy data dilations
m_pads_begin,
m_pads_end,
filters_shape,
m_strides,
m_dilations);
set_output_type(0, result_et, result_shape);
}

View File

@@ -40,7 +40,7 @@ namespace ngraph
/// \param data_batch The node producing the input data batch tensor.<br>
/// `[N, C_IN, D1, ... Df]`
/// \param filters The node producing the filters tensor.<br>
/// `[C_OUT, C_IN, F1, ... Ff]`
/// `[GROUPS, FC_OUT, FC_IN, F1, ... Ff]`
/// \param strides The strides.<br>
/// `[f]`
/// \param dilations The dilations.<br>
@@ -52,7 +52,7 @@ namespace ngraph
/// \param auto_pad The pad type for automatically computing padding sizes.<br>
/// `[f]`
///
/// Output `[N, C_OUT, R1, ... Rf]`
/// Output `[N, FC_OUT * GROUPS, R1, ... Rf]`
///
GroupConvolution(const Output<Node>& data_batch,
const Output<Node>& filters,

View File

@@ -0,0 +1,58 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "test_slice"
node {
input: "data"
output: "sliced"
name: "Slice"
op_type: "Slice"
attribute {
name: "axes"
ints: 0
ints: 1
type: INTS
}
attribute {
name: "starts"
ints: 0
ints: 1
type: INTS
}
attribute {
name: "ends"
ints: -1
ints: 1000
type: INTS
}
}
input {
name: "data"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value:2
}
dim {
dim_value:4
}
}
}
}
}
output {
name: "sliced"
type {
tensor_type {
elem_type: 1
shape {
}
}
}
}
}
opset_import {
version: 1
}

View File

@@ -0,0 +1,58 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "test_slice"
node {
input: "data"
output: "sliced"
name: "Slice"
op_type: "Slice"
attribute {
name: "axes"
ints: 0
ints: 1
type: INTS
}
attribute {
name: "starts"
ints: 1
ints: 0
type: INTS
}
attribute {
name: "ends"
ints: 2
ints: 4
type: INTS
}
}
input {
name: "data"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value:2
}
dim {
dim_value:4
}
}
}
}
}
output {
name: "sliced"
type {
tensor_type {
elem_type: 1
shape {
}
}
}
}
}
opset_import {
version: 1
}

View File

@@ -0,0 +1,64 @@
ir_version: 6
producer_name: "nGraph ONNX Importer"
graph {
name: "test_slice"
node {
input: "data"
output: "sliced"
name: "Slice"
op_type: "Slice"
attribute {
name: "axes"
ints: 3
ints: 1
type: INTS
}
attribute {
name: "starts"
ints: -3
ints: 1
type: INTS
}
attribute {
name: "ends"
ints: 2147483647
ints: 2147483647
type: INTS
}
}
input {
name: "data"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 1
}
dim {
dim_param: "C"
}
dim {
dim_param: "H"
}
dim {
dim_param: "W"
}
}
}
}
}
output {
name: "sliced"
type {
tensor_type {
elem_type: 1
shape {
}
}
}
}
}
opset_import {
version: 1
}

View File

@@ -1223,3 +1223,40 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_upsample9_scales_input_nearest_infer)
3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_1_2d_input)
{
auto function = onnx_import::import_onnx_model(
file_util::path_join(SERIALIZED_ZOO, "onnx/dynamic_shapes/slice_2d_input_opset1.prototxt"));
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
test_case.add_input<float>(std::vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
test_case.add_expected_output<float>(Shape{1, 4}, {5, 6, 7, 8});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_1_clamp_neg_ends)
{
auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/dynamic_shapes/slice_2d_clamp_neg_ends_opset1.prototxt"));
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
test_case.add_input<float>(std::vector<float>{1, 2, 3, 4, 5, 6, 7, 8});
test_case.add_expected_output<float>(Shape{1, 3}, {2, 3, 4});
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, onnx_dyn_shapes_slice_1_3d_input_21_axes_ends_max)
{
auto function = onnx_import::import_onnx_model(file_util::path_join(
SERIALIZED_ZOO, "onnx/dynamic_shapes/slice_3d_input_21_axes_ends_max_opset1.prototxt"));
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(function);
const Shape input_shape{1, 2, 3, 4};
std::vector<float> input_values(shape_size(input_shape));
std::iota(input_values.begin(), input_values.end(), 0);
test_case.add_input<float>(input_shape, input_values);
test_case.add_expected_output<float>(Shape{1, 1, 3, 3}, {13, 14, 15, 17, 18, 19, 21, 22, 23});
test_case.run();
}

View File

@@ -167,6 +167,8 @@ arg_min_no_keep_dims_dyn_shape
# Dynamic function 'get_shape was called on a descriptor::Tensor with dynamic shape'
onnx_upsample9_scales_input_nearest_infer
onnx_dyn_shapes_slice_1_3d_input_21_axes_ends_max
#-------------------------------------------------------------------------------
#

View File

@@ -127,3 +127,109 @@ TEST(type_prop, group_conv_invalid_groups)
FAIL() << "Deduced type check failed for unexpected reason";
}
}
TEST(type_prop, group_conv_v1_partial_auto_padding_same_lower)
{
const PartialShape data_batch_shape{1, 4, 5, 5};
const PartialShape filters_shape{2, 1, 2, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::GroupConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, group_conv_v1_partial_auto_padding_same_upper)
{
const PartialShape data_batch_shape{1, 4, 5, 5};
const PartialShape filters_shape{2, 1, 2, 2, 2};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_UPPER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::GroupConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, group_conv_v1_partial_auto_padding_same_lower_nc_dims_dynamic)
{
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{2, 1, 2, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::GroupConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 2, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{1, 1}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, group_conv_v1_partial_auto_padding_same_upper_nc_dims_dynamic)
{
const PartialShape data_batch_shape{Dimension::dynamic(), Dimension::dynamic(), 5, 5};
const PartialShape filters_shape{2, 1, 2, 2, 2};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_UPPER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::GroupConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme({Dimension::dynamic(), 2, 5, 5}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{0, 0}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{1, 1}));
}
TEST(type_prop, group_conv_v1_partial_auto_padding_same_spatial_dims_dynamic)
{
const PartialShape data_batch_shape{1, 4, Dimension::dynamic(), 5};
const PartialShape filters_shape{2, 1, 2, 3, 3};
Strides strides{1, 1};
CoordinateDiff pads_begin{0, 0};
CoordinateDiff pads_end{0, 0};
Strides dilations{1, 1};
const auto auto_pad = op::PadType::SAME_LOWER;
auto data_batch = make_shared<op::Parameter>(element::f32, data_batch_shape);
auto filters = make_shared<op::Parameter>(element::f32, filters_shape);
auto conv = make_shared<op::v1::GroupConvolution>(
data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad);
ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(
{1, 2, Dimension::dynamic(), Dimension::dynamic()}));
ASSERT_EQ(conv->get_pads_begin(), (CoordinateDiff{}));
ASSERT_EQ(conv->get_pads_end(), (CoordinateDiff{}));
}