Dynamic shape support for builder collapse (#1971)

This commit is contained in:
Gabriele Galiero Casay
2020-08-31 18:26:36 +02:00
committed by GitHub
parent b176578e13
commit deeb0577f2
5 changed files with 165 additions and 15 deletions

View File

@@ -86,6 +86,21 @@ namespace ngraph
NGRAPH_API
std::shared_ptr<Node> squeeze(const Output<Node>& value,
std::vector<std::size_t> axes = {0});
/// \brief Collapse specified axes into single one.
///
/// \note Collapsed axes create a continuous range starting from outermost axis.
///
/// \param[in] value The value to be reshaped.
/// \param[in] start_axis The start axis index.
/// \param[in] end_axis The end axis (inclusive) index.
///
/// \return The node with collapsed specified axes.
///
NGRAPH_API
std::shared_ptr<Node> collapse(const Output<Node>& value,
const std::size_t start_axis,
const std::size_t end_axis);
}
} // namespace builder
} // namespace ngraph

View File

@@ -73,19 +73,6 @@ Output<Node> builder::MatmulFactory::get_right()
OutputVector builder::MatmulFactory::make_matmul_op()
{
auto collapse = [](const Output<Node>& value, const size_t start_axis, const size_t end_axis) {
auto shape = value.get_shape();
size_t collapsed_axis_size = accumulate(next(begin(shape), start_axis),
next(begin(shape), end_axis + 1),
1UL,
multiplies<size_t>());
Shape output_shape{collapsed_axis_size};
output_shape.insert(end(output_shape), next(begin(shape), end_axis + 1), end(shape));
return make_shared<op::Reshape>(
value, get_default_order(value.get_shape().size()), output_shape)
->add_provenance_group_members_above({value});
};
auto left = get_left();
auto right = get_right();
@@ -120,11 +107,11 @@ OutputVector builder::MatmulFactory::make_matmul_op()
// This will make easier further dot product calculations.
if (left_shape.size() > 3)
{
left = collapse(left, 0, left_shape.size() - 3);
left = builder::opset1::collapse(left, 0, left_shape.size() - 3);
}
if (right_shape.size() > 3)
{
right = collapse(right, 0, right_shape.size() - 3);
right = builder::opset1::collapse(right, 0, right_shape.size() - 3);
}
// Perform multiple small dot products

View File

@@ -202,3 +202,53 @@ shared_ptr<Node> builder::opset1::squeeze(const Output<Node>& value, vector<size
}
return builder::opset1::reshape(value, output_shape);
}
shared_ptr<Node> builder::opset1::collapse(const Output<Node>& value,
const size_t start_axis,
const size_t end_axis)
{
if (start_axis == end_axis)
{
return value.get_node_shared_ptr();
}
if (value.get_partial_shape().is_static())
{
auto shape = value.get_shape();
// Multiply all alements of shape from start_axis to end_axis inclusive
size_t collapsed_axis_size = accumulate(next(begin(shape), start_axis),
next(begin(shape), end_axis + 1),
1UL,
multiplies<size_t>());
Shape output_shape{};
output_shape.insert(begin(output_shape), begin(shape), next(begin(shape), start_axis));
output_shape.insert(end(output_shape), collapsed_axis_size);
output_shape.insert(end(output_shape), next(begin(shape), end_axis + 1), end(shape));
return builder::opset1::reshape(value, output_shape);
}
const auto shape = make_shared<ngraph::opset1::ShapeOf>(value);
const auto rank = make_shared<ngraph::opset1::ShapeOf>(shape);
// Split lengths used in VariadicSplit
const auto start_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {start_axis});
const auto end_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {end_axis + 1});
const auto collapsed_axis =
make_shared<ngraph::opset1::Subtract>(end_axis_node, start_axis_node);
const auto post_axis = make_shared<ngraph::opset1::Subtract>(rank, end_axis_node);
const auto split_lengths = make_shared<ngraph::opset1::Concat>(
OutputVector{start_axis_node, collapsed_axis, post_axis}, 0);
const auto split_axis = ngraph::opset1::Constant::create(element::i64, {}, {0});
const auto split_node =
make_shared<ngraph::opset1::VariadicSplit>(shape, split_axis, split_lengths);
const auto reduced_axis = ngraph::opset1::Constant::create(element::i64, {1}, {0});
const auto collapsed_axis_size =
make_shared<ngraph::opset1::ReduceProd>(split_node->output(1), reduced_axis, true);
const auto collapsed_shape = make_shared<ngraph::opset1::Concat>(
OutputVector{split_node->output(0), collapsed_axis_size, split_node->output(2)}, 0);
return make_shared<ngraph::opset1::Reshape>(value, collapsed_shape, false);
}

View File

@@ -81,3 +81,94 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_mean_dynamic_2)
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_5d_to_3d)
{
Shape shape_input{1, 2, 3, 4, 5};
Shape shape_r{1, 24, 5};
const auto elems_in_tensor = shape_size(shape_input);
const auto A = make_shared<op::Parameter>(element::f32, shape_input);
const auto builder_collapse = builder::opset1::collapse(A, 1, shape_input.size() - 2);
const auto f = make_shared<Function>(builder_collapse, ParameterVector{A});
vector<float> a(elems_in_tensor, 1);
vector<float> b(elems_in_tensor, 1);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(shape_input, {a});
test_case.add_expected_output<float>(shape_r, b);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_all_dims)
{
Shape shape_input{1, 2, 3, 4, 5, 6};
Shape shape_r{720};
const auto elems_in_tensor = shape_size(shape_input);
const auto A = make_shared<op::Parameter>(element::f32, shape_input);
const auto builder_collapse = builder::opset1::collapse(A, 0, shape_input.size() - 1);
const auto f = make_shared<Function>(builder_collapse, ParameterVector{A});
vector<float> a(elems_in_tensor, 1);
vector<float> b(elems_in_tensor, 1);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(shape_input, {a});
test_case.add_expected_output<float>(shape_r, b);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_none)
{
Shape shape_input{1, 2, 3, 4, 5, 6};
const auto elems_in_tensor = shape_size(shape_input);
const auto A = make_shared<op::Parameter>(element::f32, shape_input);
const auto builder_collapse = builder::opset1::collapse(A, 2, shape_input.size() - 4);
const auto f = make_shared<Function>(builder_collapse, ParameterVector{A});
vector<float> a(elems_in_tensor, 1);
vector<float> b(elems_in_tensor, 1);
auto test_case = test::TestCase<TestEngine>(f);
test_case.add_input<float>(shape_input, {a});
test_case.add_expected_output<float>(shape_input, b);
test_case.run();
}
NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_dyn_shape)
{
PartialShape pshape_input{1, 2, 3, 4, 5, Dimension()};
PartialShape pshape_output{1, 24, 5, Dimension()};
const auto A = make_shared<op::Parameter>(element::f32, pshape_input);
EXPECT_TRUE(A->get_output_partial_shape(0).same_scheme(
PartialShape{1, 2, 3, 4, 5, Dimension::dynamic()}));
const auto builder_collapse = builder::opset1::collapse(A, 1, 3);
const auto f = make_shared<Function>(builder_collapse, ParameterVector{A});
auto test_case = test::TestCase<TestEngine, TestCaseType::DYNAMIC>(f);
const size_t NUM_DIMENSIONS_TO_TEST = 5;
for (size_t dim = 1; dim < NUM_DIMENSIONS_TO_TEST; dim++)
{
Shape shape_input{1, 2, 3, 4, 5, dim};
Shape shape_output{1, 24, 5, dim};
const auto elems_in_tensor = shape_size(shape_input);
std::vector<float> input_values(elems_in_tensor, 1);
std::vector<float> expected_values(elems_in_tensor, 1);
test_case.add_input<float>(shape_input, {input_values});
test_case.add_expected_output<float>(shape_output, expected_values);
test_case.run();
}
}

View File

@@ -79,6 +79,7 @@ bool_const_op
onnx_model_tile
onnx_model_tile_static
onnx_model_softmax_0D
builder_opset1_collapse_none
# nGraph function's output number 0 was not found in the CNNNetwork built from it.
onnx_model_split_equal_parts_2d
@@ -1078,6 +1079,9 @@ IE_CPU.atanh
IE_CPU.asinh
IE_CPU.acosh
# Unsupported collapse op with dynamic shape
IE_CPU.builder_opset1_collapse_dyn_shape
# Interpolate-1 in linear mode
# 2.666666507720947266 is not close to 3 at index 1
IE_CPU.interpolate_down_scales_const_linear
@@ -1434,6 +1438,9 @@ IE_GPU.matmul_2x3_3x3
IE_GPU.matmul_3x2_3x3_transpose
IE_GPU.matmul_3x2_2x3_transpose
# Unsupported collapse op with dynamic shape
IE_GPU.builder_opset1_collapse_dyn_shape
IE_GPU.onnx_model_fake_quantize_const_inputs_infer
IE_GPU.onnx_model_fake_quantize_nonconst_inputs_infer