From cb241a8e4a937a22c1cb37b9b1e276e0c1f960dd Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Thu, 16 Mar 2023 20:22:03 +0100 Subject: [PATCH] [Core] Non constant support for b2s and s2b nodes (#16290) * Fix inference for non-const inputs for operators: - batch to space - space to batch * Evaluate of b2s, s2b supports all parameter inputs - update template plugin test to use parameters instead constants --- .../batch_to_space_shape_inference.hpp | 70 ++++++++++++------- .../space_to_batch_shape_inference.hpp | 59 ++++++++++------ src/core/src/op/batch_to_space.cpp | 63 ++++++----------- src/core/src/op/space_to_batch.cpp | 23 +++++- src/core/tests/type_prop/batch_to_space.cpp | 42 +++++++++-- src/core/tests/type_prop/space_to_batch.cpp | 51 +++++++++++--- .../op_reference/batch_to_space.cpp | 14 ++-- .../op_reference/space_to_batch.cpp | 14 ++-- 8 files changed, 223 insertions(+), 113 deletions(-) diff --git a/src/core/shape_inference/include/batch_to_space_shape_inference.hpp b/src/core/shape_inference/include/batch_to_space_shape_inference.hpp index fb725928079..33a603dd2c4 100644 --- a/src/core/shape_inference/include/batch_to_space_shape_inference.hpp +++ b/src/core/shape_inference/include/batch_to_space_shape_inference.hpp @@ -5,10 +5,11 @@ #pragma once #include -#include -#include -#include +#include "dimension_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/batch_to_space.hpp" +#include "openvino/opsets/opset2.hpp" #include "utils.hpp" namespace ov { @@ -19,6 +20,7 @@ template std::vector shape_infer(const BatchToSpace* op, const std::vector& input_shapes, const std::map& constant_data = {}) { + using namespace ov::util; using ValType = typename TShape::value_type::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); @@ -43,13 +45,15 @@ std::vector shape_infer(const BatchToSpace* op, "block_shape and crops inputs must have rank 1. Got: ", inputs_same_ps.rank()); - const ov::Rank data_rank = data_shape.rank(); + const auto data_rank = data_shape.rank(); if (data_rank.is_static()) { constexpr size_t spatial_dim_offset = 1; + const auto data_rank_size = data_shape.size(); + NODE_VALIDATION_CHECK(op, - (data_shape.size() > spatial_dim_offset), + (data_rank_size > spatial_dim_offset), "data input must have rank greater or equal than 2. Got: ", - data_shape.size()); + data_rank_size); if (inputs_same_ps.is_static()) { NODE_VALIDATION_CHECK(op, data_rank.get_length() == inputs_same_ps[0].get_length(), @@ -60,38 +64,52 @@ std::vector shape_infer(const BatchToSpace* op, data_rank); } - auto out_shape = data_shape; - std::vector block_val, crops_begin_val, crops_end_val; + TShape out_shape; + out_shape.reserve(data_rank_size); - if (get_data_as_int64(1, op, block_val, constant_data) && - get_data_as_int64(2, op, crops_begin_val, constant_data) && - get_data_as_int64(3, op, crops_end_val, constant_data)) { + const auto blocks = get_input_const_data_as(op, 1, constant_data); + if (blocks) { NODE_VALIDATION_CHECK(op, - std::none_of(begin(block_val), end(block_val), cmp::Less(1)), + std::none_of(begin(*blocks), end(*blocks), cmp::Less(1)), "Elements of block_shape input must be greater or equal to one."); + const auto divisor = static_cast( + std::accumulate(begin(*blocks), end(*blocks), int64_t(1), std::multiplies())); + out_shape.push_back(data_shape[0] / divisor); + check_divided_result(op, out_shape[0], data_shape[0], divisor); + } else { + out_shape.emplace_back(dim::inf_bound); + } + std::vector crops_begin_val, crops_end_val; + if (get_data_as_int64(2, op, crops_begin_val, constant_data) && + get_data_as_int64(3, op, crops_end_val, constant_data)) { constexpr auto is_invalid_crop = cmp::Less(0); NODE_VALIDATION_CHECK(op, std::none_of(begin(crops_begin_val), end(crops_begin_val), is_invalid_crop) && std::none_of(begin(crops_end_val), end(crops_end_val), is_invalid_crop), "Elements of crops_begin and crops_end inputs must be greater or equal to zero."); - const auto divisor = static_cast( - std::accumulate(begin(block_val), end(block_val), int64_t(1), std::multiplies())); + if (blocks) { + for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) { + auto d = data_shape[idx] * static_cast((*blocks)[idx]); + auto crop = static_cast(crops_begin_val[idx] + crops_end_val[idx]); + NODE_VALIDATION_CHECK( + op, + d.is_dynamic() || crop <= d.get_length(), + "crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]"); - out_shape[0] /= divisor; - check_divided_result(op, out_shape[0], data_shape[0], divisor); - - for (auto idx = spatial_dim_offset; idx < out_shape.size(); ++idx) { - out_shape[idx] *= static_cast(block_val[idx]); - auto crop = static_cast(crops_begin_val[idx] + crops_end_val[idx]); - NODE_VALIDATION_CHECK( - op, - out_shape[idx].is_dynamic() || crop <= out_shape[idx].get_length(), - "crops_begin[i] + crops_end[i] must be less or equal to block_shape[i] * input_shape[i]"); - - out_shape[idx] = out_shape[idx] - crop; + out_shape.push_back(d - crop); + } + } else { + const auto block = Dimension(1, dim::inf_bound); + for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) { + auto d = data_shape[idx] * block; + auto crop = static_cast(crops_begin_val[idx] + crops_end_val[idx]); + out_shape.push_back(d - crop); + } } + } else { + out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, Dimension::dynamic()); } return {out_shape}; } else { diff --git a/src/core/shape_inference/include/space_to_batch_shape_inference.hpp b/src/core/shape_inference/include/space_to_batch_shape_inference.hpp index 792c7ddc776..7cc04ec0ac3 100644 --- a/src/core/shape_inference/include/space_to_batch_shape_inference.hpp +++ b/src/core/shape_inference/include/space_to_batch_shape_inference.hpp @@ -5,10 +5,11 @@ #pragma once #include -#include -#include -#include +#include "dimension_util.hpp" +#include "openvino/core/validation_util.hpp" +#include "openvino/op/space_to_batch.hpp" +#include "openvino/opsets/opset2.hpp" #include "utils.hpp" namespace ov { @@ -19,6 +20,7 @@ template std::vector shape_infer(const SpaceToBatch* op, const std::vector& input_shapes, const std::map& constant_data = {}) { + using namespace ov::util; using TVal = typename TShape::value_type::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); @@ -45,30 +47,45 @@ std::vector shape_infer(const SpaceToBatch* op, if (data_shape.rank().is_static()) { constexpr size_t spatial_dim_offset = 1; + const auto data_rank_size = data_shape.size(); NODE_VALIDATION_CHECK(op, - (data_shape.size() > spatial_dim_offset), + (data_rank_size > spatial_dim_offset), "The data tensor with rank lower than 2 is not supported (data rank: ", - data_shape.size(), + data_rank_size, ")"); - auto out_shape = data_shape; - std::vector block, pads_begin, pads_end; - if (get_data_as_int64(1, op, block, constant_data) && - get_data_as_int64(2, op, pads_begin, constant_data) && - get_data_as_int64(3, op, pads_end, constant_data)) { - TVal block_prod = std::accumulate(begin(block), end(block), 1, std::multiplies()); + TShape out_shape; + out_shape.reserve(data_rank_size); - out_shape[0] *= block_prod; - for (auto idx = spatial_dim_offset; idx < out_shape.size(); ++idx) { - NODE_VALIDATION_CHECK(op, block[idx] > 0, "block_shape values must be greater than 0"); - if (out_shape[idx].is_static() || out_shape[idx] != Dimension::dynamic()) { - const auto padded_dim = out_shape[idx] + static_cast(pads_begin[idx] + pads_end[idx]); - const auto divisor = static_cast(block[idx]); - out_shape[idx] = padded_dim / divisor; - check_divided_result(op, out_shape[idx], padded_dim, divisor); - } - } + auto blocks = get_input_const_data_as(op, 1, constant_data); + if (blocks) { + TVal block_prod = std::accumulate(begin(*blocks), end(*blocks), 1, std::multiplies()); + out_shape.push_back(data_shape[0] * block_prod); + } else { + out_shape.emplace_back(dim::inf_bound); } + + std::vector pads_begin, pads_end; + if (blocks && get_data_as_int64(2, op, pads_begin, constant_data) && + get_data_as_int64(3, op, pads_end, constant_data)) { + for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) { + NODE_VALIDATION_CHECK(op, (*blocks)[idx] > 0, "block_shape values must be greater than 0"); + + const auto padded_dim = data_shape[idx] + static_cast(pads_begin[idx] + pads_end[idx]); + const auto divisor = static_cast((*blocks)[idx]); + + if (padded_dim.get_max_length() == dim::inf_bound) { + out_shape.emplace_back(ceil_div(padded_dim.get_min_length(), divisor), dim::inf_bound); + } else { + out_shape.push_back(padded_dim / divisor); + } + + check_divided_result(op, out_shape[idx], padded_dim, divisor); + } + } else { + out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, dim::inf_bound); + } + return {out_shape}; } else { return {PartialShape::dynamic()}; diff --git a/src/core/src/op/batch_to_space.cpp b/src/core/src/op/batch_to_space.cpp index 6541a90765b..dfac266a0d0 100644 --- a/src/core/src/op/batch_to_space.cpp +++ b/src/core/src/op/batch_to_space.cpp @@ -78,55 +78,16 @@ bool ngraph::op::v1::BatchToSpace::visit_attributes(ngraph::AttributeVisitor& vi namespace { bool batch_to_space_evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) { auto data = inputs[0]; - size_t elem_size = data->get_element_type().size(); + const auto elem_size = data->get_element_type().size(); - if (data->get_partial_shape().is_dynamic()) { - return false; - } auto data_shape = data->get_shape(); - auto data_rank = data_shape.size(); - if (data_rank < 2) { - return false; - } - size_t block_values_size = shape_size(inputs[1]->get_shape()); - size_t crops_begin_size = shape_size(inputs[2]->get_shape()); - size_t crops_end_size = shape_size(inputs[3]->get_shape()); - NGRAPH_CHECK(block_values_size == data_rank && crops_begin_size == data_rank && crops_end_size == data_rank, - "Invalid block_shape/crops_begin/crops_end shape with respect to rank of data input"); + auto const block_values_size = shape_size(inputs[1]->get_shape()); const auto* block_values = inputs[1]->get_data_ptr(); const auto* crops_begin_values = inputs[2]->get_data_ptr(); const auto* crops_end_values = inputs[3]->get_data_ptr(); - const bool block_vals_valid = std::all_of(block_values, block_values + block_values_size, [](int64_t elem) { - return elem >= 1; - }); - NGRAPH_CHECK(block_vals_valid, "Invalid element values of block_shape input"); - - const bool crops_begin_vals_valid = - std::all_of(crops_begin_values, crops_begin_values + crops_begin_size, [](int64_t elem) { - return elem >= 0; - }); - const bool crops_end_vals_valid = - std::all_of(crops_end_values, crops_end_values + crops_end_size, [](int64_t elem) { - return elem >= 0; - }); - NGRAPH_CHECK(crops_begin_vals_valid && crops_end_vals_valid, - "Invalid element values of crops_begin/crops_end input/s"); - - const std::size_t block_prod = - std::accumulate(block_values, block_values + block_values_size, int64_t(1), std::multiplies()); - NGRAPH_CHECK(data_shape[0] % block_prod == 0, - "Invalid batch axis of data input with respect to block_shape values"); - - for (size_t i = 0; i < data_rank; i++) { - const bool is_valid_crops_and_shape = - crops_begin_values[i] + crops_end_values[i] <= block_values[i] * static_cast(data_shape[i]); - NGRAPH_CHECK(is_valid_crops_and_shape, - "Invalid crops values (out of bounds) with respect to the shape of data input"); - } - ov::Shape dispersed_shape(1); dispersed_shape.insert(dispersed_shape.end(), data_shape.begin(), data_shape.end()); std::vector axes_order(block_values_size + 1); @@ -214,6 +175,26 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, con OV_OP_SCOPE(v1_BatchToSpace_evaluate); NGRAPH_CHECK(validate_host_tensor_vector(inputs, 4)); NGRAPH_CHECK(validate_host_tensor_vector(outputs, 1)); + + if (outputs[0]->get_partial_shape().is_dynamic()) { + std::map constant_data; + std::vector input_shapes; + input_shapes.reserve(inputs.size()); + + for (size_t i = 0; i < inputs.size(); ++i) { + input_shapes.push_back(inputs[i]->get_partial_shape()); + if (input_shapes.back().is_dynamic()) { + return false; + } + constant_data.emplace(i, inputs[i]); + } + + const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape(); + + outputs[0]->set_element_type(inputs[0]->get_element_type()); + outputs[0]->set_shape(output_shape); + } + return batch_to_space_evaluate(outputs, inputs); } diff --git a/src/core/src/op/space_to_batch.cpp b/src/core/src/op/space_to_batch.cpp index a36cf37c752..92c9d95f20a 100644 --- a/src/core/src/op/space_to_batch.cpp +++ b/src/core/src/op/space_to_batch.cpp @@ -75,13 +75,29 @@ bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& vi bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVector& outputs, const HostTensorVector& inputs) const { + if (outputs[0]->get_partial_shape().is_dynamic()) { + std::map constant_data; + std::vector input_shapes; + input_shapes.reserve(inputs.size()); + + for (size_t i = 0; i < inputs.size(); ++i) { + input_shapes.push_back(inputs[i]->get_partial_shape()); + if (input_shapes.back().is_dynamic()) { + return false; + } + constant_data.emplace(i, inputs[i]); + } + + const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape(); + + outputs[0]->set_element_type(inputs[0]->get_element_type()); + outputs[0]->set_shape(output_shape); + } + const auto& data = inputs[0]; const auto& out = outputs[0]; size_t elem_size = data->get_element_type().size(); - if (data->get_partial_shape().is_dynamic()) { - return false; - } auto data_shape = data->get_shape(); if (!(data->get_shape().size() == 4 || data->get_shape().size() == 5)) { @@ -188,6 +204,7 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto bool ngraph::op::v1::SpaceToBatch::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { OV_OP_SCOPE(v1_SpaceToBatch_evaluate); + return evaluate_space_to_batch(outputs, inputs); } diff --git a/src/core/tests/type_prop/batch_to_space.cpp b/src/core/tests/type_prop/batch_to_space.cpp index 97a9fd57b88..f35afb75fc9 100644 --- a/src/core/tests/type_prop/batch_to_space.cpp +++ b/src/core/tests/type_prop/batch_to_space.cpp @@ -388,19 +388,19 @@ TEST(type_prop, batch_to_space_input_interval_shape_block_one) { } TEST(type_prop, batch_to_space_and_space_to_batch) { - auto data = make_shared(element::f32, Shape{4800, 9, 11, 2}); + auto data = make_shared(element::f32, PartialShape{4800, 9, {11, -1}, 2}); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_shape(), - (Shape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, 11 * 100 - 38 - 38, 2 * 2 - 1})); + ASSERT_EQ(batch_to_space->get_output_partial_shape(0), + (PartialShape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, {11 * 100 - 38 - 38, -1}, 2 * 2 - 1})); auto space_to_batch = make_shared(batch_to_space, block_shape, crops_begin, crops_end); ASSERT_EQ(space_to_batch->get_element_type(), element::f32); - ASSERT_EQ(space_to_batch->get_shape(), (Shape{4800, 9, 11, 2})); + ASSERT_EQ(space_to_batch->get_output_partial_shape(0), (PartialShape{4800, 9, {11, -1}, 2})); } TEST(type_prop, batch_to_space_dynamic_shape_static_rank) { @@ -441,3 +441,37 @@ TEST(type_prop, batch_to_space_default_ctor) { EXPECT_EQ(batch_to_space->get_element_type(), element::i16); EXPECT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); } + +TEST(type_prop, batch_to_space_non_const_inputs) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto crops_begin = make_shared(element::i64, PartialShape{4}); + auto crops_end = make_shared(element::i64, PartialShape{4}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + + EXPECT_EQ(batch_to_space->get_element_type(), element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape::dynamic(4)); +} + +TEST(type_prop, batch_to_space_block_non_constant_only) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto crops_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto crops_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + + EXPECT_EQ(batch_to_space->get_element_type(), element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({-1, {1, -1}, {12, -1}, {3, -1}})); +} + +TEST(type_prop, batch_to_space_crops_non_constant_only) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 5, 1}); + auto crops_begin = make_shared(element::i64, PartialShape{4}); + auto crops_end = make_shared(element::i64, PartialShape{4}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + + EXPECT_EQ(batch_to_space->get_element_type(), element::f32); + EXPECT_EQ(batch_to_space->get_output_partial_shape(0), PartialShape({10, -1, -1, -1})); +} diff --git a/src/core/tests/type_prop/space_to_batch.cpp b/src/core/tests/type_prop/space_to_batch.cpp index b3d5b9bcbb2..b58ca9ee729 100644 --- a/src/core/tests/type_prop/space_to_batch.cpp +++ b/src/core/tests/type_prop/space_to_batch.cpp @@ -49,7 +49,7 @@ TEST(type_prop, space_to_batch_output_shape_5D) { } TEST(type_prop, space_to_batch_and_batch_to_space) { - auto data = make_shared(element::f32, Shape{2, 100, 1024, 3}); + auto data = make_shared(element::f32, PartialShape{2, {100, -1}, 1024, 3}); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); @@ -57,12 +57,12 @@ TEST(type_prop, space_to_batch_and_batch_to_space) { auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); ASSERT_EQ(space_to_batch->get_element_type(), element::f32); - ASSERT_EQ(space_to_batch->get_shape(), - (Shape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2})); + ASSERT_EQ(space_to_batch->get_output_partial_shape(0), + (PartialShape{2 * 12 * 100 * 2, {(100 + 3 + 5) / 12, -1}, (1024 + 38 + 38) / 100, (3 + 1) / 2})); auto batch_to_space = make_shared(space_to_batch, block_shape, pads_begin, pads_end); ASSERT_EQ(batch_to_space->get_element_type(), element::f32); - ASSERT_EQ(batch_to_space->get_shape(), (Shape{2, 100, 1024, 3})); + ASSERT_EQ(batch_to_space->get_output_partial_shape(0), (PartialShape{2, {100, -1}, 1024, 3})); } TEST(type_prop, space_to_batch_when_space_is_static) { @@ -117,13 +117,13 @@ TEST(type_prop, space_to_batch_when_space_is_dynamic) { TEST(type_prop, space_to_batch_dynamic_shape_static_rank) { auto data = make_shared(element::f32, PartialShape::dynamic(4)); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); - auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 2, 0}); auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); ASSERT_EQ(space_to_batch->get_element_type(), element::f32); - ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4)); + ASSERT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape({-1, {1, -1}, {1, -1}, -1})); } TEST(type_prop, space_to_batch_dynamic_shape_dynamic_rank) { @@ -151,7 +151,7 @@ TEST(type_prop, space_to_batch_dynamic_rank_shape_block_and_pads_not_const) { } TEST(type_prop, space_to_batch_default_ctor) { - auto data = make_shared(element::f32, PartialShape{{2, 5}, 100, {100, 1024}, 3}); + auto data = make_shared(element::f32, PartialShape{{2, 5}, 100, {100, -1}, 3}); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 4, 1}); auto pads_begin = make_shared(element::i64, Shape{4}, vector{1, 1, 2, 0}); auto pads_end = make_shared(element::i64, Shape{4}, vector{1, 1, 6, 0}); @@ -164,7 +164,42 @@ TEST(type_prop, space_to_batch_default_ctor) { EXPECT_EQ(space_to_batch->get_output_size(), 1); EXPECT_EQ(space_to_batch->get_output_element_type(0), element::f32); EXPECT_EQ(space_to_batch->get_output_partial_shape(0), - PartialShape({{2 * 2 * 4, 5 * 2 * 4}, (100 + 2) / 2, {(100 + 2 + 6) / 4, (1024 + 2 + 6) / 4}, 3})); + PartialShape({{2 * 2 * 4, 5 * 2 * 4}, (100 + 2) / 2, {(100 + 2 + 6) / 4, -1}, 3})); +} + +TEST(type_prop, space_to_batch_non_const_inputs) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto pads_begin = make_shared(element::i64, PartialShape{4}); + auto pads_end = make_shared(element::i64, PartialShape{4}); + auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); + + EXPECT_EQ(space_to_batch->get_element_type(), element::f32); + EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4)); +} + +TEST(type_prop, space_to_batch_block_non_constant_only) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + auto block_shape = make_shared(element::i64, PartialShape{4}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); + + EXPECT_EQ(space_to_batch->get_element_type(), element::f32); + EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape::dynamic(4)); +} + +TEST(type_prop, space_to_batch_crops_non_constant_only) { + auto data = make_shared(element::f32, PartialShape{100, 7, 13, 3}); + + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 2, 5, 1}); + auto pads_begin = make_shared(element::i64, PartialShape{4}); + auto pads_end = make_shared(element::i64, PartialShape{4}); + auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); + + EXPECT_EQ(space_to_batch->get_element_type(), element::f32); + EXPECT_EQ(space_to_batch->get_output_partial_shape(0), PartialShape({1000, -1, -1, -1})); } TEST(type_prop, space_to_batch_invalid_element_type_block_shape) { diff --git a/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp b/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp index 3fd1e25ec7a..e9f5ce61384 100644 --- a/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp +++ b/src/plugins/template/tests/functional/op_reference/batch_to_space.cpp @@ -33,7 +33,10 @@ public: void SetUp() override { auto params = GetParam(); function = CreateFunction(params); - inputData = {params.dataTensor.data}; + inputData = {params.dataTensor.data, + params.blockShapeTensor.data, + params.cropsBeginTensor.data, + params.cropsEndTensor.data}; refOutData = {params.expectedTensor.data}; } @@ -61,11 +64,12 @@ public: private: static std::shared_ptr CreateFunction(const BatchToSpaceParams& params) { const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape, params.blockShapeTensor.data.data()); - const auto cropsBegin = std::make_shared(element::i64, params.cropsBeginTensor.shape, params.cropsBeginTensor.data.data()); - const auto cropsEnd = std::make_shared(element::i64, params.cropsEndTensor.shape, params.cropsEndTensor.data.data()); + const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape); + const auto cropsBegin = std::make_shared(element::i64, params.cropsBeginTensor.shape); + const auto cropsEnd = std::make_shared(element::i64, params.cropsEndTensor.shape); const auto batchToSpace = std::make_shared(data, blockShape, cropsBegin, cropsEnd); - return std::make_shared(NodeVector {batchToSpace}, ParameterVector {data}); + return std::make_shared(NodeVector{batchToSpace}, + ParameterVector{data, blockShape, cropsBegin, cropsEnd}); } }; diff --git a/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp b/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp index 1050f7cd54d..38210a96d95 100644 --- a/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp +++ b/src/plugins/template/tests/functional/op_reference/space_to_batch.cpp @@ -34,7 +34,10 @@ public: void SetUp() override { auto params = GetParam(); function = CreateFunction(params); - inputData = {params.dataTensor.data}; + inputData = {params.dataTensor.data, + params.blockShapeTensor.data, + params.padsBeginTensor.data, + params.padsEndTensor.data}; refOutData = {params.expectedTensor.data}; } @@ -62,11 +65,12 @@ public: private: static std::shared_ptr CreateFunction(const SpaceToBatchParams& params) { const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); - const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape, params.blockShapeTensor.data.data()); - const auto padsBegin = std::make_shared(element::i64, params.padsBeginTensor.shape, params.padsBeginTensor.data.data()); - const auto padsEnd = std::make_shared(element::i64, params.padsEndTensor.shape, params.padsEndTensor.data.data()); + const auto blockShape = std::make_shared(element::i64, params.blockShapeTensor.shape); + const auto padsBegin = std::make_shared(element::i64, params.padsBeginTensor.shape); + const auto padsEnd = std::make_shared(element::i64, params.padsEndTensor.shape); const auto batchToSpace = std::make_shared(data, blockShape, padsBegin, padsEnd); - return std::make_shared(NodeVector {batchToSpace}, ParameterVector {data}); + return std::make_shared(NodeVector{batchToSpace}, + ParameterVector{data, blockShape, padsBegin, padsEnd}); } };