diff --git a/src/core/include/openvino/core/dimension.hpp b/src/core/include/openvino/core/dimension.hpp index 7f9156093b9..ea70483de34 100644 --- a/src/core/include/openvino/core/dimension.hpp +++ b/src/core/include/openvino/core/dimension.hpp @@ -132,6 +132,18 @@ public: /// \return Smallest interval dimension enclosing inputs Dimension operator-(const Dimension& dim) const; + /// \brief Division operator for Dimension divided by a value_type parameter. + /// \param divisor Right operand for division. + /// \return Smallest interval dimension enclosing inputs + Dimension operator/(const value_type divisor) const; + + /// \brief Divided-into operator for Dimension. + /// \param divisor Right operand for multiplication. + /// \return A reference to `*this`, after updating `*this` to the value `*this * dim`. + Dimension& operator/=(const value_type divisor) { + return (*this = *this / divisor); + } + /// \brief Multiplication operator for Dimension. /// \param dim Right operand for multiplicaiton. /// \return Smallest interval containing all "produces" which are 0 if either of `this` or diff --git a/src/core/include/openvino/op/depth_to_space.hpp b/src/core/include/openvino/op/depth_to_space.hpp index 062179d628c..253f068acfa 100644 --- a/src/core/include/openvino/op/depth_to_space.hpp +++ b/src/core/include/openvino/op/depth_to_space.hpp @@ -42,7 +42,7 @@ public: DepthToSpace(const Output& data, const std::string& mode, std::size_t block_size = 1); bool visit_attributes(AttributeVisitor& visitor) override; - std::size_t get_block_size() const { + const std::size_t& get_block_size() const { return m_blocksize; } DepthToSpaceMode get_mode() const { diff --git a/src/core/include/openvino/op/space_to_depth.hpp b/src/core/include/openvino/op/space_to_depth.hpp index 76de12d7ecc..b956a7a3dcf 100644 --- a/src/core/include/openvino/op/space_to_depth.hpp +++ b/src/core/include/openvino/op/space_to_depth.hpp @@ -40,7 +40,7 @@ public: SpaceToDepth(const Output& data, const std::string& mode, std::size_t block_size = 1); bool visit_attributes(AttributeVisitor& visitor) override; - std::size_t get_block_size() const { + const std::size_t& get_block_size() const { return m_blocksize; } SpaceToDepthMode get_mode() const { diff --git a/src/core/shape_inference/include/batch_to_space_shape_inference.hpp b/src/core/shape_inference/include/batch_to_space_shape_inference.hpp new file mode 100644 index 00000000000..c48bedda68e --- /dev/null +++ b/src/core/shape_inference/include/batch_to_space_shape_inference.hpp @@ -0,0 +1,119 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "utils.hpp" + +namespace ov { +namespace op { +namespace v1 { + +template +void shape_infer(const ov::op::v1::BatchToSpace* op, + const std::vector& input_shapes, + std::vector& output_shapes, + const std::map>& constant_data = {}) { + using ValType = typename std::iterator_traits::value_type::value_type; + NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 && output_shapes.size() == 1); + const auto& data_shape = input_shapes[0]; + const auto& block_shape = input_shapes[1]; + const auto& crops_begin_shape = input_shapes[2]; + const auto& crops_end_shape = input_shapes[3]; + bool got_const_data = false; + + auto inputs_same_ps = crops_begin_shape; + NODE_VALIDATION_CHECK(op, + T::merge_into(inputs_same_ps, crops_end_shape) && T::merge_into(inputs_same_ps, block_shape), + "block_shape, crops_begin and crops_end inputs must have the same shape. Got: ", + block_shape, + ", ", + crops_begin_shape, + " and ", + crops_end_shape); + + NODE_VALIDATION_CHECK(op, + inputs_same_ps.rank().compatible(1), + "block_shape and crops inputs must have rank 1. Got: ", + inputs_same_ps.rank()); + + const ov::Rank data_rank = data_shape.rank(); + if (data_rank.is_static()) { + NODE_VALIDATION_CHECK(op, + (data_rank.get_length() >= 2), + "data input must have rank greater or equal than 2. Got: ", + data_rank.get_length()); + if (inputs_same_ps.is_static()) { + NODE_VALIDATION_CHECK(op, + data_rank.get_length() == inputs_same_ps[0].get_length(), + "block_shape and crop inputs must have same number of elements " + "as data input rank. Got: ", + inputs_same_ps[0], + " and ", + data_rank); + } + + auto& output_shape = output_shapes[0]; + output_shape.resize(data_shape.size()); + + std::vector block_val, crops_begin_val, crops_end_val; + + if (get_data_as_int64(1, op, block_val, constant_data) && + get_data_as_int64(2, op, crops_begin_val, constant_data) && + get_data_as_int64(3, op, crops_end_val, constant_data)) { + got_const_data = true; + bool block_vals_valid = std::all_of(begin(block_val), end(block_val), [](int64_t elem) { + return elem >= 1; + }); + NODE_VALIDATION_CHECK(op, + block_vals_valid, + "Elements of block_shape input must be greater or equal to one."); + + bool crops_begin_vals_valid = std::all_of(begin(crops_begin_val), end(crops_begin_val), [](int64_t elem) { + return elem >= 0; + }); + bool crops_end_vals_valid = std::all_of(begin(crops_end_val), end(crops_end_val), [](int64_t elem) { + return elem >= 0; + }); + NODE_VALIDATION_CHECK(op, + crops_begin_vals_valid && crops_end_vals_valid, + "Elements of crops_begin and crops_end inputs must be greater or equal to zero."); + if (data_shape.is_static()) { + for (size_t idx = 0; idx < data_shape.size(); idx++) { + const bool is_valid_crops_and_shape = + crops_begin_val[idx] + crops_end_val[idx] <= block_val[idx] * data_shape[idx].get_length(); + NODE_VALIDATION_CHECK(op, + is_valid_crops_and_shape, + "crops_begin[i] + crops_end[i] must be less or equal to " + "block_shape[i] * input_shape[i]"); + } + } + + int64_t block_prod = std::accumulate(begin(block_val), end(block_val), 1, std::multiplies()); + const auto divisor = static_cast(block_prod); + + output_shape[0] = data_shape[0] / divisor; + check_divided_result(op, output_shape[0], data_shape[0], divisor); + + for (size_t idx = 1; idx < data_shape.size(); idx++) { + output_shape[idx] = data_shape[idx] * static_cast(block_val[idx]) - + static_cast(crops_begin_val[idx]) - + static_cast(crops_end_val[idx]); + } + } + } + if (!got_const_data) + // For PartialShape, Set the output to be dynamic; + // For StaticShape, throw error caused by implicitly constructing StaticShape with PartialShape argument; + output_shapes[0] = ov::PartialShape::dynamic(data_rank); +} + +} // namespace v1 +} // namespace op +} // namespace ov \ No newline at end of file diff --git a/src/core/shape_inference/include/depth_to_space_shape_inference.hpp b/src/core/shape_inference/include/depth_to_space_shape_inference.hpp new file mode 100644 index 00000000000..7ac414fb58e --- /dev/null +++ b/src/core/shape_inference/include/depth_to_space_shape_inference.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "utils.hpp" +namespace ov { +namespace op { +namespace v0 { + +template +void shape_infer(const ov::op::v0::DepthToSpace* op, + const std::vector& input_shapes, + std::vector& output_shapes) { + using ValType = typename std::iterator_traits::value_type::value_type; + + NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); + + const auto& data_shape = input_shapes[0]; + const ov::Rank data_rank = data_shape.rank(); + const auto& block_size = op->get_block_size(); + + if (data_rank.is_static()) { + NODE_VALIDATION_CHECK(op, + data_shape.size() >= 3, + "The input tensor with rank lower than 3 is not supported (input rank: ", + data_shape.size(), + ")"); + + const size_t divider = std::pow(block_size, data_shape.size() - 2); + NODE_VALIDATION_CHECK(op, (divider), "DepthToSpace: The divider must not be 0"); + + auto& output_shape = output_shapes[0]; + output_shape.resize(data_shape.size()); + + output_shape[0] = data_shape[0]; + const auto divisor = static_cast(divider); + output_shape[1] = data_shape[1] / divisor; + check_divided_result(op, output_shape[1], data_shape[1], divisor); + for (size_t i = 2; i < output_shape.size(); i++) { + output_shape[i] = data_shape[i] * static_cast(block_size); + } + + } else { + // For PartialShape, Set the output to be dynamic; + // For StaticShape, throw error caused by implicitly constructing StaticShape with PartialShape argument; + output_shapes[0] = ov::PartialShape::dynamic(data_rank); + } +} + +} // namespace v0 +} // namespace op +} // namespace ov \ No newline at end of file diff --git a/src/core/shape_inference/include/space_to_batch_shape_inference.hpp b/src/core/shape_inference/include/space_to_batch_shape_inference.hpp new file mode 100644 index 00000000000..af78d4314db --- /dev/null +++ b/src/core/shape_inference/include/space_to_batch_shape_inference.hpp @@ -0,0 +1,90 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "utils.hpp" + +namespace ov { +namespace op { +namespace v1 { + +template +void shape_infer(const ov::op::v1::SpaceToBatch* op, + const std::vector& input_shapes, + std::vector& output_shapes, + const std::map>& constant_data = {}) { + using ValType = typename std::iterator_traits::value_type::value_type; + NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 && output_shapes.size() == 1); + + const auto& data_shape = input_shapes[0]; + const auto& block_shape = input_shapes[1]; + const auto& pads_begin_shape = input_shapes[2]; + const auto& pads_end_shape = input_shapes[3]; + const ov::Rank data_rank = data_shape.rank(); + bool got_const_data = false; + + auto inputs_same_ps = pads_begin_shape; + NODE_VALIDATION_CHECK(op, + T::merge_into(inputs_same_ps, pads_end_shape) && T::merge_into(inputs_same_ps, block_shape), + "block_shape, pads_begin and pads_end inputs must have the same shape. Got: ", + block_shape, + ", ", + pads_begin_shape, + " and ", + pads_end_shape); + + NODE_VALIDATION_CHECK(op, + inputs_same_ps.rank().compatible(1), + "block_shape and pads inputs must have rank 1. Got: ", + inputs_same_ps.rank()); + + if (data_rank.is_static()) { + NODE_VALIDATION_CHECK(op, + (data_shape.size() >= 2), + "The data tensor with rank lower than 2 is not supported (data rank: ", + data_shape.size(), + ")"); + + std::vector block_val, pads_begin_val, pads_end_val; + + auto& output_shape = output_shapes[0]; + output_shape.resize(data_shape.size()); + if (get_data_as_int64(1, op, block_val, constant_data) && + get_data_as_int64(2, op, pads_begin_val, constant_data) && + get_data_as_int64(3, op, pads_end_val, constant_data)) { + got_const_data = true; + int64_t block_prod = std::accumulate(begin(block_val), end(block_val), 1, std::multiplies()); + + output_shape[0] = data_shape[0] * static_cast(block_prod); + + for (size_t idx = 1; idx < output_shape.size(); ++idx) { + NODE_VALIDATION_CHECK(op, block_val[idx] > 0, "block_shape values must be greater than 0"); + if (data_shape[idx].is_dynamic() && data_shape[idx] == ov::Dimension::dynamic()) { + output_shape[idx] = ov::Dimension::dynamic(); + } else { + const auto divided = + data_shape[idx] + static_cast((pads_begin_val[idx] + pads_end_val[idx])); + const auto divisor = static_cast(block_val[idx]); + output_shape[idx] = divided / divisor; + check_divided_result(op, output_shape[idx], divided, divisor); + } + } + } + } + + if (!got_const_data) + // For PartialShape, Set the output to be dynamic; + // For StaticShape, throw error caused by implicitly constructing StaticShape with PartialShape argument; + output_shapes[0] = ov::PartialShape::dynamic(data_rank); +} + +} // namespace v1 +} // namespace op +} // namespace ov \ No newline at end of file diff --git a/src/core/shape_inference/include/space_to_depth_shape_inference.hpp b/src/core/shape_inference/include/space_to_depth_shape_inference.hpp new file mode 100644 index 00000000000..dcd90fc010a --- /dev/null +++ b/src/core/shape_inference/include/space_to_depth_shape_inference.hpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include + +#include "utils.hpp" +namespace ov { +namespace op { +namespace v0 { + +template +void shape_infer(const ov::op::v0::SpaceToDepth* op, + const std::vector& input_shapes, + std::vector& output_shapes) { + using ValType = typename std::iterator_traits::value_type::value_type; + + NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); + + const auto& data_shape = input_shapes[0]; + const ov::Rank data_rank = data_shape.rank(); + if (data_rank.is_static()) { + NODE_VALIDATION_CHECK(op, + !(data_shape.size() < 3), + "The input tensor with rank lower than 3 is not supported (input rank: ", + data_shape.size(), + ")"); + + const auto& block_size = op->get_block_size(); + NODE_VALIDATION_CHECK(op, block_size > 0, "The block size must begreater then 0 ", block_size); + const ValType multiplier = std::pow(block_size, data_shape.size() - 2); + + auto& out_shape = output_shapes[0]; + out_shape.resize(data_shape.size()); + + out_shape[0] = data_shape[0]; + out_shape[1] = data_shape[1] * multiplier; + const auto divisor = static_cast(block_size); + for (size_t i = 2; i < out_shape.size(); i++) { + out_shape[i] = data_shape[i] / divisor; + check_divided_result(op, out_shape[i], data_shape[i], divisor); + } + } else { + // For PartialShape, Set the output to be dynamic; + // For StaticShape, will throw error caused by implicitly constructing StaticShape with PartialShape argument; + output_shapes[0] = ov::PartialShape::dynamic(data_rank); + } +} + +} // namespace v0 +} // namespace op +} // namespace ov \ No newline at end of file diff --git a/src/core/shape_inference/include/utils.hpp b/src/core/shape_inference/include/utils.hpp index 91fcd5189f9..3ba1ff0c488 100644 --- a/src/core/shape_inference/include/utils.hpp +++ b/src/core/shape_inference/include/utils.hpp @@ -119,4 +119,36 @@ inline bool get_data_as_shape( } else { return ov::evaluate_as_partial_shape(op->input_value(idx), shape); } +} + +template +inline void check_divided_result(const ov::Node* op, + const T& res, + const T& divided, + const typename T::value_type& divisor) { + NODE_VALIDATION_CHECK(op, + res != T{}, + "Dimension value: [ ", + divided.get_min_length(), + ", ", + divided.get_max_length(), + "]", + " must be a multiple of divisor: ", + divisor); +} + +template <> +inline void check_divided_result(const ov::Node* op, + const ov::Dimension& res, + const ov::Dimension& divided, + const typename ov::Dimension::value_type& divisor) { + NODE_VALIDATION_CHECK(op, + !res.get_interval().empty(), + "Dimension value: [ ", + divided.get_min_length(), + ", ", + divided.get_max_length(), + "]", + " must be a multiple of divisor: ", + divisor); } \ No newline at end of file diff --git a/src/core/src/dimension.cpp b/src/core/src/dimension.cpp index 37f1fdcb1b4..f88e00508f6 100644 --- a/src/core/src/dimension.cpp +++ b/src/core/src/dimension.cpp @@ -41,6 +41,15 @@ Dimension Dimension::operator-(const Dimension& dim) const { return Dimension(m_dimension - dim.m_dimension); } +Dimension Dimension::operator/(const value_type divisor) const { + OPENVINO_ASSERT(divisor >= 0, "divisor must be greater than 0"); + + if (m_dimension.get_max_val() == Interval::s_max && m_dimension.get_min_val() == 0) + return Dimension::dynamic(); + + return Dimension((m_dimension.get_min_val() + divisor - 1) / divisor, m_dimension.get_max_val() / divisor); +} + Dimension Dimension::operator*(const Dimension& dim) const { return Dimension(m_dimension * dim.m_dimension); } diff --git a/src/core/src/op/batch_to_space.cpp b/src/core/src/op/batch_to_space.cpp index 3cee1ba4f42..1c2788f1776 100644 --- a/src/core/src/op/batch_to_space.cpp +++ b/src/core/src/op/batch_to_space.cpp @@ -4,6 +4,7 @@ #include "ngraph/op/batch_to_space.hpp" +#include #include #include #include @@ -61,103 +62,13 @@ void op::v1::BatchToSpace::validate_and_infer_types() { "block_shape and crops inputs must have integer element type. Got: ", inputs_integer_et); - const ov::PartialShape& data_pshape = get_input_partial_shape(0); - const ov::PartialShape& block_shape_ps = get_input_partial_shape(1); - const ov::PartialShape& crops_begin_ps = get_input_partial_shape(2); - const ov::PartialShape& crops_end_ps = get_input_partial_shape(3); - - ov::PartialShape inputs_same_ps{ov::PartialShape::dynamic()}; - NODE_VALIDATION_CHECK(this, - ov::PartialShape::merge_into(inputs_same_ps, crops_begin_ps) && - ov::PartialShape::merge_into(inputs_same_ps, crops_end_ps) && - ov::PartialShape::merge_into(inputs_same_ps, block_shape_ps), - "block_shape, crops_begin and crops_end inputs must have the same shape. Got: ", - block_shape_ps, - ", ", - crops_begin_ps, - " and ", - crops_end_ps); - - const Rank inputs_rank_one = inputs_same_ps.rank(); - NODE_VALIDATION_CHECK(this, - inputs_rank_one.compatible(1), - "block_shape and crops inputs must have rank 1. Got: ", - inputs_rank_one); - - const Rank data_rank = data_pshape.rank(); - if (data_rank.is_static()) { - NODE_VALIDATION_CHECK(this, - (data_rank.get_length() >= 2), - "data input must have rank greater or equal than 2. Got: ", - data_rank.get_length()); - - if (inputs_same_ps.is_static()) { - NODE_VALIDATION_CHECK(this, - data_rank.get_length() == inputs_same_ps[0].get_length(), - "block_shape and crop inputs must have same number of elements " - "as data input rank. Got: ", - inputs_same_ps[0], - " and ", - data_rank); - } - } - - const auto block_const = get_constant_from_source(input_value(1)); - const auto crops_begin_const = get_constant_from_source(input_value(2)); - const auto crops_end_const = get_constant_from_source(input_value(3)); - - if (block_const && crops_begin_const && crops_end_const && data_pshape.is_static()) { - const ov::Shape& data_sshape = data_pshape.to_shape(); - - auto block_val = block_const->cast_vector(); - auto crops_begin_val = crops_begin_const->cast_vector(); - auto crops_end_val = crops_end_const->cast_vector(); - - bool block_vals_valid = std::all_of(begin(block_val), end(block_val), [](int64_t elem) { - return elem >= 1; - }); - NODE_VALIDATION_CHECK(this, block_vals_valid, "Elements of block_shape input must be greater or equal to one."); - - bool crops_begin_vals_valid = std::all_of(begin(crops_begin_val), end(crops_begin_val), [](int64_t elem) { - return elem >= 0; - }); - bool crops_end_vals_valid = std::all_of(begin(crops_end_val), end(crops_end_val), [](int64_t elem) { - return elem >= 0; - }); - NODE_VALIDATION_CHECK(this, - crops_begin_vals_valid && crops_end_vals_valid, - "Elements of crops_begin and crops_end inputs must be greater or equal to zero."); - - int64_t block_prod = std::accumulate(begin(block_val), end(block_val), 1, std::multiplies()); - - NODE_VALIDATION_CHECK(this, - data_sshape[0] % block_prod == 0, - "The input data's 'batch' axis size: ", - data_sshape[0], - " must be a multiple of", - " product of block_shape values: ", - block_prod); - - for (size_t idx = 0; idx < data_sshape.size(); idx++) { - const bool is_valid_crops_and_shape = - crops_begin_val[idx] + crops_end_val[idx] <= block_val[idx] * static_cast(data_sshape[idx]); - NODE_VALIDATION_CHECK(this, - is_valid_crops_and_shape, - "crops_begin[i] + crops_end[i] must be less or equal to " - "block_shape[i] * input_shape[i]"); - } - - ov::Shape output_sshape = {static_cast(data_sshape[0] / block_prod)}; - for (size_t idx = 1; idx < data_sshape.size(); ++idx) { - output_sshape.push_back( - static_cast(data_sshape[idx] * block_val[idx] - crops_begin_val[idx] - crops_end_val[idx])); - } - - set_output_size(1); - set_output_type(0, data_et, output_sshape); - } else { - set_output_type(0, data_et, ov::PartialShape::dynamic(data_rank)); - } + std::vector output_shapes = {ov::PartialShape{}}; + const std::vector input_shapes = {get_input_partial_shape(0), + get_input_partial_shape(1), + get_input_partial_shape(2), + get_input_partial_shape(3)}; + shape_infer(this, input_shapes, output_shapes); + set_output_type(0, data_et, output_shapes[0]); } std::shared_ptr ngraph::op::v1::BatchToSpace::clone_with_new_inputs(const OutputVector& new_args) const { diff --git a/src/core/src/op/depth_to_space.cpp b/src/core/src/op/depth_to_space.cpp index cd58c04a5e7..c0d3e617ea2 100644 --- a/src/core/src/op/depth_to_space.cpp +++ b/src/core/src/op/depth_to_space.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -45,42 +46,12 @@ std::shared_ptr op::DepthToSpace::clone_with_new_inputs(const OutputVector void op::DepthToSpace::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_DepthToSpace_validate_and_infer_types); - ov::PartialShape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); - - auto data = input_value(0); - - if (data_pshape.is_static()) { - const auto& data_shape = data.get_shape(); - - NODE_VALIDATION_CHECK(this, - !(data_shape.size() < 3), - "The input tensor with rank lower than 3 is not supported (input rank: ", - data_shape.size(), - ")"); - - auto divider = std::pow(m_blocksize, data_shape.size() - 2); - NODE_VALIDATION_CHECK(this, (divider), "DepthToSpace: The divider must not be 0"); - - NODE_VALIDATION_CHECK(this, - m_blocksize > 0 && !(data_shape[1] % m_blocksize), - "DepthToSpace: The input data's 'channels' axis size: ", - data_shape[1], - " must be a equivalent to 'block_size'^'spatial_dims': ", - divider); - - auto out_shape = data_shape; - out_shape[1] /= divider; - for (size_t i = 2; i < out_shape.size(); i++) { - out_shape[i] *= m_blocksize; - } - - set_output_size(1); - set_output_type(0, data_type, out_shape); - } else { - set_output_type(0, data_type, ov::PartialShape::dynamic(data_pshape.rank())); - } + std::vector output_shapes = {ov::PartialShape{}}; + const std::vector input_shapes = {get_input_partial_shape(0)}; + shape_infer(this, input_shapes, output_shapes); + set_output_type(0, data_type, output_shapes[0]); } namespace { diff --git a/src/core/src/op/space_to_batch.cpp b/src/core/src/op/space_to_batch.cpp index cd11e09bd73..7f07d86b182 100644 --- a/src/core/src/op/space_to_batch.cpp +++ b/src/core/src/op/space_to_batch.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include "itt.hpp" #include "ngraph/builder/make_constant.hpp" @@ -37,7 +38,6 @@ ngraph::op::v1::SpaceToBatch::SpaceToBatch(const ngraph::Output& d void op::v1::SpaceToBatch::validate_and_infer_types() { NGRAPH_OP_SCOPE(v1_SpaceToBatch_validate_and_infer_types); - ov::PartialShape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); const auto& block_shape_type = get_input_element_type(1); const auto& pads_begin_type = get_input_element_type(2); @@ -60,54 +60,13 @@ void op::v1::SpaceToBatch::validate_and_infer_types() { "pads_end must be an integral number but got (", pads_end_type, ")."); - - auto data = input_value(0); - auto block = input_value(1); - auto pads_begin = input_value(2); - auto pads_end = input_value(3); - - const auto& block_const = get_constant_from_source(block); - const auto& pads_begin_const = get_constant_from_source(pads_begin); - const auto& pads_end_const = get_constant_from_source(pads_end); - - if (block_const && pads_begin_const && pads_end_const && data_pshape.is_static()) { - const auto& data_shape = data.get_shape(); - - NODE_VALIDATION_CHECK(this, - (data_shape.size() >= 2), - "The data tensor with rank lower than 2 is not supported (data rank: ", - data_shape.size(), - ")"); - - auto block_val = block_const->cast_vector(); - auto pads_begin_val = pads_begin_const->cast_vector(); - auto pads_end_val = pads_end_const->cast_vector(); - - int64_t block_prod = 1; - for (long idx : block_val) - block_prod *= idx; - - ov::Shape output_shape = {static_cast(data_shape[0] * block_prod)}; - for (size_t idx = 1; idx < data_shape.size(); ++idx) { - NODE_VALIDATION_CHECK(this, block_val.at(idx) > 0, "block_shape values must be greater than 0"); - NODE_VALIDATION_CHECK( - this, - (pads_begin_val.at(idx) + data_shape.at(idx) + pads_end_val.at(idx)) % block_val.at(idx) == 0, - "The dimension on position: ", - idx, - " equal to: ", - pads_begin_val.at(idx) + data_shape.at(idx) + pads_end_val.at(idx), - " must be a multiple of block_values[i]: ", - block_val.at(idx)); - output_shape.push_back(static_cast(pads_begin_val[idx] + data_shape[idx] + pads_end_val[idx]) / - block_val[idx]); - } - - set_output_size(1); - set_output_type(0, data_type, output_shape); - } else { - set_output_type(0, data_type, ov::PartialShape::dynamic(data_pshape.rank())); - } + std::vector output_shapes = {ov::PartialShape{}}; + const std::vector input_shapes = {get_input_partial_shape(0), + get_input_partial_shape(1), + get_input_partial_shape(2), + get_input_partial_shape(3)}; + shape_infer(this, input_shapes, output_shapes); + set_output_type(0, data_type, output_shapes[0]); } std::shared_ptr ngraph::op::v1::SpaceToBatch::clone_with_new_inputs(const OutputVector& new_args) const { diff --git a/src/core/src/op/space_to_depth.cpp b/src/core/src/op/space_to_depth.cpp index 14fff599f81..a10572bb5b1 100644 --- a/src/core/src/op/space_to_depth.cpp +++ b/src/core/src/op/space_to_depth.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include "itt.hpp" #include "ngraph/attribute_visitor.hpp" @@ -46,43 +47,12 @@ std::shared_ptr ov::op::v0::SpaceToDepth::clone_with_new_inputs(const Outp void ngraph::op::v0::SpaceToDepth::validate_and_infer_types() { NGRAPH_OP_SCOPE(v0_SpaceToDepth_validate_and_infer_types); - ov::PartialShape data_pshape = get_input_partial_shape(0); const auto& data_type = get_input_element_type(0); - - auto data = input_value(0); - - if (data_pshape.is_static()) { - const auto& data_shape = data.get_shape(); - - NODE_VALIDATION_CHECK(this, - !(data_shape.size() < 3), - "The input tensor with rank lower than 3 is not supported (input rank: ", - data_shape.size(), - ")"); - - auto multiplier = std::pow(m_blocksize, data_shape.size() - 2); - - auto out_shape = data_shape; - out_shape[1] *= multiplier; - for (size_t i = 2; i < out_shape.size(); i++) { - NODE_VALIDATION_CHECK(this, - m_blocksize > 0 && !(out_shape[i] % m_blocksize), - "The dimension on position: ", - i, - " equal to: ", - out_shape[i], - " must be a multiple of m_blocksize: ", - m_blocksize); - - out_shape[i] /= m_blocksize; - } - - set_output_size(1); - set_output_type(0, data_type, out_shape); - } else { - set_output_type(0, data_type, ov::PartialShape::dynamic(data_pshape.rank())); - } + std::vector output_shapes = {ov::PartialShape{}}; + const std::vector input_shapes = {get_input_partial_shape(0)}; + shape_infer(this, input_shapes, output_shapes); + set_output_type(0, data_type, output_shapes[0]); } namespace { diff --git a/src/core/tests/onnx/onnx_import_exceptions.cpp b/src/core/tests/onnx/onnx_import_exceptions.cpp index 7fe37cf29f3..958d1153738 100644 --- a/src/core/tests/onnx/onnx_import_exceptions.cpp +++ b/src/core/tests/onnx/onnx_import_exceptions.cpp @@ -27,8 +27,7 @@ TEST(onnx_importer, exception_msg_ngraph_error) { // Should have thrown, so fail if it didn't FAIL() << "ONNX Importer did not detected incorrect model!"; } catch (const ngraph_error& e) { - EXPECT_HAS_SUBSTRING(e.what(), std::string("While validating ONNX node '(data, block_shape, crops_begin, crops_end); FAIL() << "Incompatible data shape and block_shape input values not detected"; - } catch (const NodeValidationFailure& error) { - EXPECT_HAS_SUBSTRING( - error.what(), - "The input data's 'batch' axis size: 80 must be a multiple of product of block_shape values: 50"); + } catch (const ov::Exception& error) { + EXPECT_HAS_SUBSTRING(error.what(), "[ 80, 80] must be a multiple of divisor: 50"); } catch (...) { FAIL() << "Data shape and block_shape input values check failed for unexpected reason"; } @@ -340,6 +339,37 @@ TEST(type_prop, batch_to_space_output_shape_5D) { ASSERT_EQ(batch_to_space->get_shape(), (Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16})); } +TEST(type_prop, batch_to_space_output_dynamicshape_5D_when_batch_is_static) { + auto data = make_shared(element::f32, PartialShape{960, {2, 20}, {12, 14}, {100, 150}, {10, 20}}); + auto block_shape = make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); + auto crops_begin = make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); + auto crops_end = make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + + ASSERT_EQ(batch_to_space->get_output_partial_shape(0), + (PartialShape{960 / (6 * 5 * 16), + {2 * 6 - 2 - 2, 20 * 6 - 2 - 2}, + {12 * 5 - 1, 14 * 5 - 1}, + {100, 150}, + {10 * 16, 20 * 16}})); +} + +TEST(type_prop, batch_to_space_output_dynamicshape_5D_when_batch_is_dynamic) { + auto data = + make_shared(element::f32, PartialShape{{959, 962}, {2, 34}, {9, 21}, {100, 162}, {1, 1999}}); + auto block_shape = make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); + auto crops_begin = make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); + auto crops_end = make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); + auto batch_to_space = make_shared(data, block_shape, crops_begin, crops_end); + + ASSERT_EQ(batch_to_space->get_output_partial_shape(0), + (PartialShape{{DIV_ROUND_UP(959, (6 * 5 * 16)), 962 / (6 * 5 * 16)}, + {2 * 6 - 2 - 2, 34 * 6 - 2 - 2}, + {9 * 5 - 1, 21 * 5 - 1}, + {100, 162}, + {1 * 16, 1999 * 16}})); +} + TEST(type_prop, batch_to_space_and_space_to_batch) { auto data = make_shared(element::f32, Shape{4800, 9, 11, 2}); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); diff --git a/src/core/tests/type_prop/depth_to_space.cpp b/src/core/tests/type_prop/depth_to_space.cpp index 9f68d17db05..e02c225b587 100644 --- a/src/core/tests/type_prop/depth_to_space.cpp +++ b/src/core/tests/type_prop/depth_to_space.cpp @@ -9,6 +9,29 @@ using namespace std; using namespace ngraph; +#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d)) + +TEST(type_prop, depth_to_space_output_dynamicshape_block_first_5D_when_depth_is_static) { + auto A = make_shared(element::f32, PartialShape{{2, 10}, 24, {3, 7}, {423, 3000}, {235, 1345}}); + auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); + + ASSERT_EQ(space_to_depth->get_output_partial_shape(0), + (PartialShape{{2, 10}, 3, {3 * 2, 7 * 2}, {423 * 2, 3000 * 2}, {235 * 2, 1345 * 2}})); +} + +TEST(type_prop, depth_to_space_output_dynamicshape_block_first_5D_when_depth_is_dynamic) { + auto A = + make_shared(element::f32, PartialShape{{2, 10}, {81, 82}, {3, 7}, {423, 3000}, {235, 1345}}); + auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 3); + + ASSERT_EQ(space_to_depth->get_output_partial_shape(0), + (PartialShape{{2, 10}, + {DIV_ROUND_UP(81, 27), 82 / 27}, + {3 * 3, 7 * 3}, + {423 * 3, 3000 * 3}, + {235 * 3, 1345 * 3}})); +} + TEST(type_prop, depth_to_space_output_shape_block_first_4D) { auto A = make_shared(element::f32, Shape{1, 128, 8, 8}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 8); @@ -67,9 +90,7 @@ TEST(type_prop, depth_to_space_blocksize_not_matched) { auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2); FAIL() << "Not matched blocksize for DepthToSpace exception not thrown"; } catch (const ngraph_error& error) { - EXPECT_HAS_SUBSTRING(error.what(), - "DepthToSpace: The input data's 'channels' axis size: 7" - " must be a equivalent to 'block_size'^'spatial_dims': 4"); + EXPECT_HAS_SUBSTRING(error.what(), "Dimension value: [ 7, 7] must be a multiple of divisor: 4"); } catch (...) { FAIL() << "DepthToSpace decomposition failed for unexpected reason"; } diff --git a/src/core/tests/type_prop/space_to_batch.cpp b/src/core/tests/type_prop/space_to_batch.cpp index 2dd27ba5b65..806dba91937 100644 --- a/src/core/tests/type_prop/space_to_batch.cpp +++ b/src/core/tests/type_prop/space_to_batch.cpp @@ -9,6 +9,8 @@ using namespace std; using namespace ngraph; +#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d)) + TEST(type_prop, space_to_batch_output_shape_2D) { auto data = make_shared(element::f32, Shape{2, 128}); auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); @@ -62,6 +64,34 @@ TEST(type_prop, space_to_batch_and_batch_to_space) { ASSERT_EQ(batch_to_space->get_shape(), (Shape{2, 100, 1024, 3})); } +TEST(type_prop, space_to_batch_when_space_is_static) { + auto data = make_shared(element::f32, PartialShape{{2, 5}, 100, 1024, 3}); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + + auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); + + ASSERT_EQ( + space_to_batch->get_output_partial_shape(0), + (PartialShape{{2 * 12 * 100 * 2, 5 * 12 * 100 * 2}, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2})); +} + +TEST(type_prop, space_to_batch_when_space_is_dynamic) { + auto data = make_shared(element::f32, PartialShape{{2, 5}, {5, 100}, {100, 1024}, {3, 10}}); + auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + auto pads_begin = make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + + auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); + + ASSERT_EQ(space_to_batch->get_output_partial_shape(0), + (PartialShape{{2 * 12 * 100 * 2, 5 * 12 * 100 * 2}, + {DIV_ROUND_UP((5 + 5 + 3), 12), (100 + 5 + 3) / 12}, + {DIV_ROUND_UP((100 + 38 + 38), 100), (1024 + 38 + 38) / 100}, + {DIV_ROUND_UP((3 + 1), 2), (10 + 1) / 2}})); +} + TEST(type_prop, space_to_batch_dynamic_shape_static_rank) { auto data = make_shared(element::f32, PartialShape::dynamic(4)); auto block_shape = make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); diff --git a/src/core/tests/type_prop/space_to_depth.cpp b/src/core/tests/type_prop/space_to_depth.cpp index 6231f060e0c..e177fdf0a1b 100644 --- a/src/core/tests/type_prop/space_to_depth.cpp +++ b/src/core/tests/type_prop/space_to_depth.cpp @@ -9,6 +9,8 @@ using namespace std; using namespace ngraph; +#define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d)) + TEST(type_prop, space_to_depth_output_shape_block_first_4D) { auto A = make_shared(element::f32, Shape{1, 2, 64, 64}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; @@ -45,6 +47,27 @@ TEST(type_prop, space_to_depth_output_shape_depth_first_5D) { ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2})); } +TEST(type_prop, space_to_depth_output_shape_when_space_is_static) { + auto A = make_shared(element::f32, PartialShape{{1, 4}, {12, 36}, 1080, 1616}); + const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; + auto space_to_depth = make_shared(A, mode, 2); + + ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_output_partial_shape(0), + (PartialShape{{1, 4}, {12 * 4, 36 * 4}, 1080 / 2, 1616 / 2})); +} + +TEST(type_prop, space_to_depth_output_shape_when_space_is_dynamic) { + auto A = make_shared(element::f32, PartialShape{{1, 4}, {12, 36}, {100, 1081}, {99, 1616}}); + const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; + auto space_to_depth = make_shared(A, mode, 2); + + ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ( + space_to_depth->get_output_partial_shape(0), + (PartialShape{{1, 4}, {12 * 4, 36 * 4}, {DIV_ROUND_UP(100, 2), 1081 / 2}, {DIV_ROUND_UP(99, 2), 1616 / 2}})); +} + TEST(type_prop, space_to_depth_dynamic_shape_static_rank) { auto A = make_shared(element::f32, PartialShape::dynamic(4)); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; @@ -81,8 +104,7 @@ TEST(type_prop, space_to_depth_blocksize_not_matched) { auto space_to_depth = make_shared(A, op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 4); FAIL() << "Not matched blocksize SpaceToDepth exception not thrown"; } catch (const ngraph_error& error) { - EXPECT_HAS_SUBSTRING(error.what(), - "The dimension on position: 3 equal to: 7 must be a multiple of m_blocksize: 4"); + EXPECT_HAS_SUBSTRING(error.what(), "Dimension value: [ 7, 7] must be a multiple of divisor: 4"); } catch (...) { FAIL() << "SpaceToDepth decomposition failed for unexpected reason"; } diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp index 2595fd39961..cf5a9ac20e9 100644 --- a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp @@ -38,6 +38,10 @@ #include "shape_inference.hpp" #include "shape_nodes.hpp" #include "fake_quantize.hpp" +#include "batch_to_space_shape_inference.hpp" +#include "depth_to_space_shape_inference.hpp" +#include "space_to_batch_shape_inference.hpp" +#include "space_to_depth_shape_inference.hpp" #include "experimental_detectron_detection_output_shape_inference.hpp" #include "bucketize_shape_inference.hpp" #include "embedding_segments_sum_shape_inference.hpp" @@ -230,6 +234,14 @@ void shape_inference(ov::Node* op, shape_infer(node, input_shapes, output_shapes); } else if (auto node = ov::as_type(op)) { shape_infer(node, input_shapes, output_shapes); + } else if (auto node = ov::as_type(op)) { + shape_infer(node, input_shapes, output_shapes, constant_data); + } else if (auto node = ov::as_type(op)) { + shape_infer(node, input_shapes, output_shapes, constant_data); + } else if (auto node = ov::as_type(op)) { + shape_infer(node, input_shapes, output_shapes); + } else if (auto node = ov::as_type(op)) { + shape_infer(node, input_shapes, output_shapes); } else if (auto node = ov::as_type(op)) { shape_infer(node, input_shapes, output_shapes, constant_data); } else if (auto node = ov::as_type(op)) { diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.cpp b/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.cpp index 1b528eed821..213e8b6083c 100644 --- a/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.cpp +++ b/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.cpp @@ -41,6 +41,19 @@ StaticDimension& StaticDimension::operator*=(const StaticDimension& dim) { return (*this = *this * dim); } +StaticDimension StaticDimension::operator/(const value_type divisor) const { + OPENVINO_ASSERT(divisor >= 0, "divisor must be greater than 0"); + + if (m_dimension % divisor) { + return StaticDimension{}; + } + return StaticDimension(m_dimension / divisor); +} + +StaticDimension& StaticDimension::operator/=(const value_type divisor) { + return (*this = *this / divisor); +} + StaticDimension StaticDimension::operator&(const StaticDimension& dim) const { return (*this == dim) ? dim : 0; } diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.hpp b/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.hpp index 20efc49cdc7..590739d65e8 100644 --- a/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.hpp +++ b/src/plugins/intel_cpu/src/utils/shape_inference/static_dimension.hpp @@ -60,6 +60,8 @@ public: StaticDimension& operator+=(const StaticDimension& dim); StaticDimension& operator*=(const StaticDimension& dim); StaticDimension& operator&=(const StaticDimension& dim); + StaticDimension operator/(const value_type divisor) const; + StaticDimension &operator/=(const value_type divisor); private: value_type m_dimension = 0; diff --git a/src/tests/functional/inference_engine/transformations/batch_to_space_fusion.cpp b/src/tests/functional/inference_engine/transformations/batch_to_space_fusion.cpp index 19827b64104..74414edd36e 100644 --- a/src/tests/functional/inference_engine/transformations/batch_to_space_fusion.cpp +++ b/src/tests/functional/inference_engine/transformations/batch_to_space_fusion.cpp @@ -127,7 +127,7 @@ TEST_F(TransformationTestsF, NegativeBatchToSpaceFusionInvalidMode) { TEST_F(TransformationTestsF, NegativeBatchToSpaceFusionInvalidRank) { { - auto data = std::make_shared(element::f32, Shape{12, 3, 4, 8, 8}); + auto data = std::make_shared(element::f32, Shape{16, 3, 4, 8, 8}); auto trans_before = std::make_shared(data, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4})); auto depth_to_space = std::make_shared(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); auto slice = std::make_shared(depth_to_space, @@ -139,7 +139,7 @@ TEST_F(TransformationTestsF, NegativeBatchToSpaceFusionInvalidRank) { manager.register_pass(); } { - auto data = std::make_shared(element::f32, Shape{12, 3, 4, 8, 8}); + auto data = std::make_shared(element::f32, Shape{16, 3, 4, 8, 8}); auto trans_before = std::make_shared(data, op::Constant::create(element::i64, Shape{5}, {1, 0, 2, 3, 4})); auto depth_to_space = std::make_shared(trans_before, opset6::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); auto slice = std::make_shared(depth_to_space, diff --git a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp index 16419827f71..935acd365cd 100644 --- a/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp +++ b/src/tests/ngraph_helpers/lpt_ngraph_functions/src/transformations_after_split_function.cpp @@ -17,7 +17,7 @@ namespace builder { namespace subgraph { std::shared_ptr TransformationsAfterSplitFunction::get(const std::string transformationName) { - const auto input = std::make_shared(element::u8, Shape{ 1, 3, 16, 16 }); + const auto input = std::make_shared(element::u8, Shape{ 1, 9, 16, 16 }); const size_t outputSize = 2ul; const auto axis = opset1::Constant::create(element::i64, Shape{}, { 2 }); @@ -68,7 +68,7 @@ std::shared_ptr TransformationsAfterSplitFunction::getLayerByTransformatio } if (transformationName == "ConvolutionTransformation") { const auto dequantizationOnData = makeDequantization(parent, { {element::f32}, {}, { 0.1f } }); - const auto weights = opset1::Constant::create(element::i8, Shape{ 3, 3, 1, 1 }, { 2 }); + const auto weights = opset1::Constant::create(element::i8, Shape{ 3, 9, 1, 1 }, { 2 }); const auto dequantizationOnWeights = makeDequantization(weights, { {element::f32}, {}, {0.3f} }); return std::make_shared( dequantizationOnData, @@ -80,7 +80,7 @@ std::shared_ptr TransformationsAfterSplitFunction::getLayerByTransformatio } if (transformationName == "AsymmetricConvolutionTransformation") { const auto dequantizationOnData = makeDequantization(parent, { {element::f32}, { 128.f }, { 0.1f } }); - const auto weights = opset1::Constant::create(element::i8, Shape{ 3, 3, 1, 1 }, { 2 }); + const auto weights = opset1::Constant::create(element::i8, Shape{ 3, 9, 1, 1 }, { 2 }); const auto dequantizationOnWeights = makeDequantization(weights, { {element::f32}, {}, {0.3f} }); return std::make_shared( dequantizationOnData, diff --git a/src/tests/unit/cpu/shape_inference_test/batch_to_space_shape_inference.cpp b/src/tests/unit/cpu/shape_inference_test/batch_to_space_shape_inference.cpp new file mode 100644 index 00000000000..a0e1628a2bd --- /dev/null +++ b/src/tests/unit/cpu/shape_inference_test/batch_to_space_shape_inference.cpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include +#include + +using namespace ov; + +static std::shared_ptr make_batch_to_space( + PartialShape data_shape = PartialShape::dynamic(ov::Rank(2)), + PartialShape block_shape = PartialShape::dynamic(), + PartialShape crops_begin_shape = PartialShape::dynamic(), + PartialShape crops_end_shape = PartialShape::dynamic()) { + auto data = std::make_shared(element::f32, data_shape); + auto block = std::make_shared(element::i32, block_shape); + auto crops_begin = std::make_shared(element::i32, crops_begin_shape); + auto crops_end = std::make_shared(element::i32, crops_end_shape); + + const auto batch_to_space = std::make_shared(data, block, crops_begin, crops_end); + return batch_to_space; +} + +TEST(StaticShapeInferenceTest, BatchToSpaceWithHostTensorData) { + auto space_to_batch = make_batch_to_space(); + int32_t block_val[] = {1, 6, 5, 1, 16}; + int32_t pads_begin_val[] = {0, 2, 0, 0, 0}; + int32_t pads_end_val[] = {0, 2, 1, 0, 0}; + auto block = std::make_shared(ngraph::element::Type_t::i32, ov::Shape{5}, block_val); + auto crops_begin = std::make_shared(element::i32, ov::Shape{5}, pads_begin_val); + auto crops_end = std::make_shared(element::i32, ov::Shape{5}, pads_end_val); + + const std::vector input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}}; + std::vector output_shapes = {{}}; + + std::map> constant_data; + constant_data[1] = block; + constant_data[2] = crops_begin; + constant_data[3] = crops_end; + + shape_inference(space_to_batch.get(), input_shapes, output_shapes, constant_data); + ASSERT_EQ(output_shapes[0], (StaticShape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16})); +} + +TEST(StaticShapeInferenceTest, BatchToSpaceWithMissingTensorData) { + auto batch_to_space = make_batch_to_space(); + int32_t block_val[] = {1, 6, 5, 1, 16}; + int32_t pads_end_val[] = {0, 2, 1, 0, 0}; + auto block = std::make_shared(ngraph::element::Type_t::i32, ov::Shape{5}, block_val); + auto crops_end = std::make_shared(element::i32, ov::Shape{5}, pads_end_val); + + const std::vector input_shapes = {{960, 6, 13, 128, 16}, {5}, {5}, {5}}; + std::vector output_shapes = {{}}; + + std::map> constant_data; + constant_data[1] = block; + constant_data[3] = crops_end; + + EXPECT_THROW(shape_inference(batch_to_space.get(), input_shapes, output_shapes, constant_data), NodeValidationFailure); +} + +TEST(StaticShapeInferenceTest, batch_to_space_output_with_const_inputs) { + auto data = std::make_shared(element::f32, ov::PartialShape{-1, -1, -1, -1}); + auto block_shape = std::make_shared(element::i64, ov::Shape{4}, std::vector{1, 10, 5, 1}); + auto crops_begin = std::make_shared(element::i64, ov::Shape{4}, std::vector{0, 3, 1, 0}); + auto crops_end = std::make_shared(element::i64, ov::Shape{4}, std::vector{0, 3, 0, 0}); + const auto batch_to_space = std::make_shared(data, block_shape, crops_begin, crops_end); + std::vector input_shapes = {{100, 7, 13, 3}, {4}, {4}, {4}}; + std::vector output_shapes = {{}}; + shape_inference(batch_to_space.get(), input_shapes, output_shapes); + + ASSERT_EQ(output_shapes[0], (StaticShape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); +} diff --git a/src/tests/unit/cpu/shape_inference_test/depth_to_space_shape_inference.cpp b/src/tests/unit/cpu/shape_inference_test/depth_to_space_shape_inference.cpp new file mode 100644 index 00000000000..7e7f92b33f5 --- /dev/null +++ b/src/tests/unit/cpu/shape_inference_test/depth_to_space_shape_inference.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include + +TEST(StaticShapeInferenceTest, DepthToSpaceTest) { + auto A = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(ov::Rank(4))); + auto depth_to_space = + std::make_shared(A, ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2); + const std::vector input_shapes = {ov::StaticShape{1, 16, 3, 1080, 1616}}; + std::vector output_shapes = {ov::StaticShape{}}; + shape_inference(depth_to_space.get(), input_shapes, output_shapes); + ASSERT_EQ(output_shapes[0], (ov::StaticShape{1, 2, 2 * 3, 2 * 1080, 2 * 1616})); +} diff --git a/src/tests/unit/cpu/shape_inference_test/space_to_batch_shape_inference.cpp b/src/tests/unit/cpu/shape_inference_test/space_to_batch_shape_inference.cpp new file mode 100644 index 00000000000..6272c33d03e --- /dev/null +++ b/src/tests/unit/cpu/shape_inference_test/space_to_batch_shape_inference.cpp @@ -0,0 +1,87 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include +#include + +using namespace ov; + +static std::shared_ptr build_space_to_batch( + PartialShape data_shape = PartialShape::dynamic(ov::Rank(2)), + PartialShape block_shape = PartialShape::dynamic(), + PartialShape pads_begin_shape = PartialShape::dynamic(), + PartialShape pad_end_shape = PartialShape::dynamic()) { + auto data = std::make_shared(element::f32, data_shape); + auto block = std::make_shared(element::i32, block_shape); + auto pads_begin = std::make_shared(element::i32, pads_begin_shape); + auto pads_end = std::make_shared(element::i32, pad_end_shape); + + auto space_to_batch = std::make_shared(data, block, pads_begin, pads_end); + return space_to_batch; +} + +TEST(StaticShapeInferenceTest, SpaceToBatchTest) { + auto space_to_batch = build_space_to_batch(); + int32_t block_val[] = {1, 6, 5, 1, 16}; + int32_t pads_begin_val[] = {0, 2, 0, 0, 0}; + int32_t pads_end_val[] = {0, 2, 1, 0, 0}; + auto block = std::make_shared(ngraph::element::Type_t::i32, ov::Shape{5}, block_val); + auto pads_begin = std::make_shared(element::i32, ov::Shape{5}, pads_begin_val); + auto pads_end = std::make_shared(element::i32, ov::Shape{5}, pads_end_val); + + const std::vector input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}}; + std::vector output_shapes = {{}}; + + std::map> constant_data; + constant_data[1] = block; + constant_data[2] = pads_begin; + constant_data[3] = pads_end; + + shape_inference(space_to_batch.get(), input_shapes, output_shapes, constant_data); + ASSERT_EQ(output_shapes[0], (StaticShape{2 * 6 * 5 * 16, (32 + 2 + 2) / 6, (64 + 1) / 5, 128, 256 / 16})); +} + +TEST(StaticShapeInferenceTest, SpaceToBatchThrowExceptionWithoutHostTensorData) { + auto space_to_batch = build_space_to_batch(); + + std::map> constant_data; + const std::vector input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}}; + std::vector output_shapes = {{}}; + + EXPECT_THROW(shape_inference(space_to_batch.get(), input_shapes, output_shapes), NodeValidationFailure); +} + +TEST(StaticShapeInferenceTest, SpaceToBatchThrowExceptionWithMissingPadsHostTensorData) { + auto space_to_batch = build_space_to_batch(); + + int32_t block_val[] = {1, 6, 5, 1, 16}; + auto block = std::make_shared(ngraph::element::Type_t::i32, ov::Shape{5}, block_val); + + std::map> constant_data; + constant_data[1] = block; + + const std::vector input_shapes = {{2, 32, 64, 128, 256}, {5}, {5}, {5}}; + std::vector output_shapes = {{}}; + + EXPECT_THROW(shape_inference(space_to_batch.get(), input_shapes, output_shapes), NodeValidationFailure); +} + +TEST(StaticShapeInferenceTest, space_to_batch_output_with_const_inputs) { + auto data = std::make_shared(element::f32, ov::PartialShape{-1, -1, -1, -1}); + auto block_shape = std::make_shared(element::i64, ov::Shape{4}, std::vector{1, 12, 100, 2}); + auto pads_begin = std::make_shared(element::i64, ov::Shape{4}, std::vector{0, 3, 38, 1}); + auto pads_end = std::make_shared(element::i64, ov::Shape{4}, std::vector{0, 5, 38, 0}); + const auto space_to_batch = std::make_shared(data, block_shape, pads_begin, pads_end); + std::vector input_shapes = {{2, 100, 1024, 3}, {4}, {4}, {4}}; + std::vector output_shapes = {{}}; + shape_inference(space_to_batch.get(), input_shapes, output_shapes); + + ASSERT_EQ(output_shapes[0], (StaticShape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2})); +} diff --git a/src/tests/unit/cpu/shape_inference_test/space_to_depth_shape_inference.cpp b/src/tests/unit/cpu/shape_inference_test/space_to_depth_shape_inference.cpp new file mode 100644 index 00000000000..65da7fc4c23 --- /dev/null +++ b/src/tests/unit/cpu/shape_inference_test/space_to_depth_shape_inference.cpp @@ -0,0 +1,20 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include + +TEST(StaticShapeInferenceTest, SpaceToDepthTest) { + auto A = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(ov::Rank(4))); + auto space_to_depth = + std::make_shared(A, ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, 2); + const std::vector input_shapes = {ov::StaticShape{1, 12, 4, 1080, 1616}}; + std::vector output_shapes = {ov::StaticShape{}}; + shape_inference(space_to_depth.get(), input_shapes, output_shapes); + ASSERT_EQ(output_shapes[0], (ov::StaticShape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2})); +}