diff --git a/src/common/transformations/src/ov_ops/augru_cell.cpp b/src/common/transformations/src/ov_ops/augru_cell.cpp index c98d9f77b6b..91b757910f7 100644 --- a/src/common/transformations/src/ov_ops/augru_cell.cpp +++ b/src/common/transformations/src/ov_ops/augru_cell.cpp @@ -66,8 +66,7 @@ void ov::op::internal::AUGRUCell::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - std::vector output_shapes = {ov::PartialShape::dynamic(2)}; - shape_infer(this, input_shapes, output_shapes); + std::vector output_shapes = shape_infer(this, input_shapes); // Set output type and shape set_output_type(0, result_et, output_shapes[0]); diff --git a/src/common/transformations/src/ov_ops/augru_sequence.cpp b/src/common/transformations/src/ov_ops/augru_sequence.cpp index f83b8028474..4dc918d26c4 100644 --- a/src/common/transformations/src/ov_ops/augru_sequence.cpp +++ b/src/common/transformations/src/ov_ops/augru_sequence.cpp @@ -64,8 +64,7 @@ void ov::op::internal::AUGRUSequence::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - std::vector output_shapes = {ov::PartialShape::dynamic(4), ov::PartialShape::dynamic(3)}; - shape_infer(this, input_shapes, output_shapes); + std::vector output_shapes = shape_infer(this, input_shapes); // Set output size, type and shape set_output_size(2); diff --git a/src/core/include/openvino/op/assign.hpp b/src/core/include/openvino/op/assign.hpp index 2ba16d46195..fb703e8c3f6 100644 --- a/src/core/include/openvino/op/assign.hpp +++ b/src/core/include/openvino/op/assign.hpp @@ -34,8 +34,6 @@ public: private: std::string m_variable_id; - template - friend void shape_infer(const Assign* op, const std::vector& input_shapes, std::vector& output_shapes); }; } // namespace v3 @@ -72,10 +70,6 @@ public: OPENVINO_SUPPRESS_DEPRECATED_END bool has_evaluate() const override; bool constant_fold(OutputVector& output_values, const OutputVector& inputs_values) override; - -private: - template - friend void shape_infer(const Assign* op, const std::vector& input_shapes, std::vector& output_shapes); }; } // namespace v6 } // namespace op diff --git a/src/core/include/openvino/op/experimental_detectron_roi_feature.hpp b/src/core/include/openvino/op/experimental_detectron_roi_feature.hpp index 25411b5c29d..b1f0e29f975 100644 --- a/src/core/include/openvino/op/experimental_detectron_roi_feature.hpp +++ b/src/core/include/openvino/op/experimental_detectron_roi_feature.hpp @@ -58,11 +58,6 @@ public: private: Attributes m_attrs; - - template - friend void shape_infer(const ExperimentalDetectronROIFeatureExtractor* op, - const std::vector& input_shapes, - std::vector& output_shapes); }; } // namespace v6 } // namespace op diff --git a/src/core/include/openvino/op/extractimagepatches.hpp b/src/core/include/openvino/op/extractimagepatches.hpp index 37d506fe189..fc4977e862d 100644 --- a/src/core/include/openvino/op/extractimagepatches.hpp +++ b/src/core/include/openvino/op/extractimagepatches.hpp @@ -67,10 +67,6 @@ private: Strides m_patch_movement_strides; Shape m_patch_selection_rates; PadType m_padding{PadType::EXPLICIT}; - template - friend void shape_infer(const ExtractImagePatches* op, - const std::vector& input_shapes, - std::vector& output_shapes); }; } // namespace v3 } // namespace op diff --git a/src/core/include/openvino/op/lstm_cell.hpp b/src/core/include/openvino/op/lstm_cell.hpp index 249b3dccdc2..f1798385047 100644 --- a/src/core/include/openvino/op/lstm_cell.hpp +++ b/src/core/include/openvino/op/lstm_cell.hpp @@ -240,11 +240,6 @@ private: /// \brief The order of gates in weights tensors. /// LSTMWeightsFormat m_weights_format; - - static constexpr std::size_t s_gates_count{4}; - static constexpr std::size_t s_peepholes_count{3}; - template - friend void shape_infer(const LSTMCell* op, const std::vector& input_shapes, std::vector& output_shapes); }; } // namespace v0 @@ -380,10 +375,6 @@ private: /// \brief The Activation function h. /// util::ActivationFunction m_activation_h; - - static constexpr std::size_t s_gates_count{4}; - template - friend void shape_infer(const LSTMCell* op, const std::vector& input_shapes, std::vector& output_shapes); }; } // namespace v4 } // namespace op diff --git a/src/core/include/openvino/op/region_yolo.hpp b/src/core/include/openvino/op/region_yolo.hpp index 1a0078eefa0..917353e737d 100644 --- a/src/core/include/openvino/op/region_yolo.hpp +++ b/src/core/include/openvino/op/region_yolo.hpp @@ -105,9 +105,6 @@ private: std::vector m_anchors{}; int m_axis; int m_end_axis; - - template - friend void shape_infer(const RegionYolo* op, const std::vector& input_shapes, std::vector& output_shapes); }; } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/adaptive_avg_pool_shape_inference.hpp b/src/core/shape_inference/include/adaptive_avg_pool_shape_inference.hpp index 50ebf376144..cc07e36197d 100644 --- a/src/core/shape_inference/include/adaptive_avg_pool_shape_inference.hpp +++ b/src/core/shape_inference/include/adaptive_avg_pool_shape_inference.hpp @@ -12,19 +12,11 @@ namespace ov { namespace op { namespace v8 { -template -std::vector shape_infer(const AdaptiveAvgPool* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { - return {pooling::out_shape_infer(op, input_shapes, constant_data)}; -} - -template -void shape_infer(const AdaptiveAvgPool* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = shape_infer(op, input_shapes, constant_data); +template > +std::vector shape_infer(const AdaptiveAvgPool* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_acessor = make_tensor_accessor()) { + return {pooling::out_shape_infer(op, input_shapes, tensor_acessor)}; } } // namespace v8 } // namespace op diff --git a/src/core/shape_inference/include/adaptive_max_pool_shape_inference.hpp b/src/core/shape_inference/include/adaptive_max_pool_shape_inference.hpp index 4e111d897a2..7afda410724 100644 --- a/src/core/shape_inference/include/adaptive_max_pool_shape_inference.hpp +++ b/src/core/shape_inference/include/adaptive_max_pool_shape_inference.hpp @@ -12,19 +12,11 @@ namespace ov { namespace op { namespace v8 { -template -std::vector shape_infer(const AdaptiveMaxPool* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { - return {2, pooling::out_shape_infer(op, input_shapes, constant_data)}; -} - -template -void shape_infer(const AdaptiveMaxPool* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = shape_infer(op, input_shapes, constant_data); +template > +std::vector shape_infer(const AdaptiveMaxPool* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { + return {2, pooling::out_shape_infer(op, input_shapes, tensor_accessor)}; } } // namespace v8 } // namespace op diff --git a/src/core/shape_inference/include/assign_shape_inference.hpp b/src/core/shape_inference/include/assign_shape_inference.hpp index ecdd01f3863..307e71b8774 100644 --- a/src/core/shape_inference/include/assign_shape_inference.hpp +++ b/src/core/shape_inference/include/assign_shape_inference.hpp @@ -5,18 +5,20 @@ #include #include +#include "copy_shape_inference.hpp" #include "utils.hpp" namespace ov { namespace op { namespace v3 { -template -void shape_infer(const Assign* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); +template > +std::vector shape_infer(const Assign* op, const std::vector& input_shapes) { + auto output_shapes = ov::op::copy_shape_infer(op, input_shapes); + const auto& input_shape = input_shapes[0]; - const auto& variable_info = op->m_variable->get_info(); + const auto& variable_info = op->get_variable()->get_info(); NODE_VALIDATION_CHECK(op, - op->m_variable_id == variable_info.variable_id, + op->get_variable_id() == variable_info.variable_id, "Variables identifiers are inconsistent."); const auto& arg_t = op->get_input_element_type(0); NODE_VALIDATION_CHECK(op, arg_t == variable_info.data_type, "Variables types are inconsistent."); @@ -26,16 +28,8 @@ void shape_infer(const Assign* op, const std::vector& input_shapes, std::vect input_shape.to_shape() == variable_info.data_shape.to_shape(), "Variables output shapes are inconsistent."); } - copy_shape_infer(op, input_shapes, output_shapes); + return output_shapes; } } // namespace v3 - -namespace v6 { - -template -void shape_infer(const Assign* op, const std::vector& input_shapes, std::vector& output_shapes) { - copy_shape_infer(op, input_shapes, output_shapes); -} -} // namespace v6 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/augru_cell_shape_inference.hpp b/src/core/shape_inference/include/augru_cell_shape_inference.hpp index a320b6668fc..10a9282ac55 100644 --- a/src/core/shape_inference/include/augru_cell_shape_inference.hpp +++ b/src/core/shape_inference/include/augru_cell_shape_inference.hpp @@ -12,10 +12,8 @@ namespace ov { namespace op { namespace internal { -template -void shape_infer(const ov::op::internal::AUGRUCell* op, - const std::vector& input_shapes, - std::vector& output_shapes) { +template > +std::vector shape_infer(const ov::op::internal::AUGRUCell* op, const std::vector& input_shapes) { constexpr size_t expected_in_shapes_count = 6; NODE_VALIDATION_CHECK(op, input_shapes.size() == expected_in_shapes_count, @@ -27,7 +25,7 @@ void shape_infer(const ov::op::internal::AUGRUCell* op, constexpr auto num_gates = 3; constexpr auto num_state_nodes = 1; - output_shapes = rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes); + auto output_shapes = rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes); // `A` input shape validation // [batch_size, 1] const auto& a_shape = input_shapes.back(); @@ -41,6 +39,7 @@ void shape_infer(const ov::op::internal::AUGRUCell* op, } NODE_VALIDATION_CHECK(op, a_shape[1].compatible(1), "The last dimension of `A` shape must be equal to `1`."); } + return output_shapes; } } // namespace internal } // namespace op diff --git a/src/core/shape_inference/include/augru_sequence_shape_inference.hpp b/src/core/shape_inference/include/augru_sequence_shape_inference.hpp index f02d7499ea8..c76e491a5bb 100644 --- a/src/core/shape_inference/include/augru_sequence_shape_inference.hpp +++ b/src/core/shape_inference/include/augru_sequence_shape_inference.hpp @@ -11,10 +11,9 @@ namespace ov { namespace op { namespace internal { -template -void shape_infer(const ov::op::internal::AUGRUSequence* op, - const std::vector& input_shapes, - std::vector& output_shapes) { +template > +std::vector shape_infer(const ov::op::internal::AUGRUSequence* op, + const std::vector& input_shapes) { constexpr size_t expected_in_shapes_count = 7; NODE_VALIDATION_CHECK(op, input_shapes.size() == expected_in_shapes_count, @@ -26,12 +25,12 @@ void shape_infer(const ov::op::internal::AUGRUSequence* op, constexpr auto num_gates = 3; constexpr auto num_state_nodes = 1; - output_shapes = rnn::seq_base_shape_infer(op, - input_shapes, - num_gates, - num_state_nodes, - op->get_direction(), - op->get_linear_before_reset()); + auto output_shapes = rnn::seq_base_shape_infer(op, + input_shapes, + num_gates, + num_state_nodes, + op->get_direction(), + op->get_linear_before_reset()); // A input shape validation // [batch_size, seq_length, 1] const auto& a_shape = input_shapes.back(); @@ -48,6 +47,7 @@ void shape_infer(const ov::op::internal::AUGRUSequence* op, } NODE_VALIDATION_CHECK(op, a_shape[2].compatible(1), "The last dimension of `A` shape must be equal to `1`."); } + return output_shapes; } } // namespace internal } // namespace op diff --git a/src/core/shape_inference/include/avg_pool_shape_inference.hpp b/src/core/shape_inference/include/avg_pool_shape_inference.hpp index fe6df898366..ae45b3cb8bd 100644 --- a/src/core/shape_inference/include/avg_pool_shape_inference.hpp +++ b/src/core/shape_inference/include/avg_pool_shape_inference.hpp @@ -36,12 +36,11 @@ inline void valid_dilated_kernel_with_padding(const v1::AvgPool* op, } // namespace pooling namespace v1 { -template -std::vector shape_infer(const AvgPool* op, - const std::vector& input_shapes, - TContainer& pads_begin, - TContainer& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const AvgPool* op, + const std::vector& input_shapes, + TContainer& pads_begin, + TContainer& pads_end) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); const auto& data_shape = input_shapes[0]; const auto dilations = Strides(op->get_kernel().size(), 1); diff --git a/src/core/shape_inference/include/batch_to_space_shape_inference.hpp b/src/core/shape_inference/include/batch_to_space_shape_inference.hpp index ac2facdd4c9..94010660716 100644 --- a/src/core/shape_inference/include/batch_to_space_shape_inference.hpp +++ b/src/core/shape_inference/include/batch_to_space_shape_inference.hpp @@ -16,10 +16,10 @@ namespace ov { namespace op { namespace v1 { -template -std::vector shape_infer(const BatchToSpace* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const BatchToSpace* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { using namespace ov::util; using ValType = typename TShape::value_type::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); @@ -29,10 +29,10 @@ std::vector shape_infer(const BatchToSpace* op, const auto& crops_begin_shape = input_shapes[2]; const auto& crops_end_shape = input_shapes[3]; - auto inputs_same_ps = crops_begin_shape; + TRShape inputs_same_ps = crops_begin_shape; NODE_VALIDATION_CHECK( op, - TShape::merge_into(inputs_same_ps, crops_end_shape) && TShape::merge_into(inputs_same_ps, block_shape), + TRShape::merge_into(inputs_same_ps, crops_end_shape) && TRShape::merge_into(inputs_same_ps, block_shape), "block_shape, crops_begin and crops_end inputs must have the same shape. Got: ", block_shape, ", ", @@ -45,6 +45,9 @@ std::vector shape_infer(const BatchToSpace* op, "block_shape and crops inputs must have rank 1. Got: ", inputs_same_ps.rank()); + auto output_shapes = std::vector(1); + auto& out_shape = output_shapes[0]; + const auto data_rank = data_shape.rank(); if (data_rank.is_static()) { constexpr size_t spatial_dim_offset = 1; @@ -65,10 +68,9 @@ std::vector shape_infer(const BatchToSpace* op, data_rank); } - TShape out_shape; out_shape.reserve(data_rank_size); - const auto blocks = get_input_const_data_as(op, 1, constant_data); + const auto blocks = get_input_const_data_as(op, 1, tensor_accessor); if (blocks) { NODE_VALIDATION_CHECK(op, std::none_of(begin(*blocks), end(*blocks), cmp::Less(1)), @@ -81,9 +83,12 @@ std::vector shape_infer(const BatchToSpace* op, out_shape.emplace_back(dim::inf_bound); } - std::vector crops_begin_val, crops_end_val; - if (get_data_as_int64(2, op, crops_begin_val, constant_data) && - get_data_as_int64(3, op, crops_end_val, constant_data)) { + const auto crops_begin = get_input_const_data_as(op, 2, tensor_accessor); + const auto crops_end = get_input_const_data_as(op, 3, tensor_accessor); + if (crops_begin && crops_end) { + auto& crops_begin_val = *crops_begin; + auto& crops_end_val = *crops_end; + constexpr auto is_invalid_crop = cmp::Less(0); NODE_VALIDATION_CHECK(op, std::none_of(begin(crops_begin_val), end(crops_begin_val), is_invalid_crop) && @@ -112,20 +117,12 @@ std::vector shape_infer(const BatchToSpace* op, } else { out_shape.insert(out_shape.end(), data_rank_size - spatial_dim_offset, Dimension::dynamic()); } - return {out_shape}; } else { - return {PartialShape::dynamic()}; + out_shape = PartialShape::dynamic(); } -} -template -void shape_infer(const ov::op::v1::BatchToSpace* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = shape_infer(op, input_shapes, constant_data); + return output_shapes; } - } // namespace v1 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/broadcast_shape_inference.hpp b/src/core/shape_inference/include/broadcast_shape_inference.hpp index ca7bc8a46f4..1a0da2d0370 100644 --- a/src/core/shape_inference/include/broadcast_shape_inference.hpp +++ b/src/core/shape_inference/include/broadcast_shape_inference.hpp @@ -15,11 +15,11 @@ namespace ov { namespace op { namespace util { -template +template > void validate_target_shape_none(const ov::Node* op, const T& arg_shape, const AxisVector& axes_mapping_val, - const T& target_input_shape) { + const TRShape& target_input_shape) { if (arg_shape.rank().is_static() && target_input_shape.rank().is_static()) { const auto target_rank_length = target_input_shape.size(); // axes_mapping needs to be in sorted order @@ -62,8 +62,8 @@ void validate_target_shape_none(const ov::Node* op, } } -template -void validate_target_shape_numpy(const ov::Node* op, const T& arg_shape, const T& target_input_shape) { +template > +void validate_target_shape_numpy(const ov::Node* op, const T& arg_shape, const TRShape& target_input_shape) { if (arg_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) { return; } @@ -91,13 +91,13 @@ void validate_target_shape_numpy(const ov::Node* op, const T& arg_shape, const T } } -template +template > void set_result_shape_pdpd(const ov::Node* op, const T& arg0_shape, - const T& target_input_shape, - T& result_shape, + const TRShape& target_input_shape, + TRShape& result_shape, const ov::op::BroadcastModeSpec& broadcast_spec) { - using DimType = typename std::iterator_traits::value_type; + using DimType = typename T::value_type; if (arg0_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) { result_shape = PartialShape::dynamic(target_input_shape.rank()); return; @@ -126,14 +126,17 @@ void set_result_shape_pdpd(const ov::Node* op, } } -template -void set_result_shape_bidirectional(const ov::Node* op, const T& arg_shape, T& target_input_shape, T& result_shape) { - using DimType = typename std::iterator_traits::value_type; +template > +void set_result_shape_bidirectional(const ov::Node* op, + const T& arg_shape, + TRShape& target_input_shape, + TRShape& result_shape) { + using DimType = typename T::value_type; if (arg_shape.rank().is_dynamic() || target_input_shape.rank().is_dynamic()) { result_shape = PartialShape::dynamic(); return; } - auto arg_shape_vec = arg_shape; + auto arg_shape_vec = static_cast(arg_shape); // Add left padding to the shape with smaller rank, if the ranks are not equal if (arg_shape_vec.size() < target_input_shape.size()) { @@ -154,12 +157,10 @@ void set_result_shape_bidirectional(const ov::Node* op, const T& arg_shape, T& t } } -template -void broadcast_base_shape_infer( - const ov::op::util::BroadcastBase* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { +template > +std::vector broadcast_base_shape_infer(const ov::op::util::BroadcastBase* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { // shape node should produce a one dimensional shape. auto broadcast_shape_rank = input_shapes[1].rank(); NODE_VALIDATION_CHECK(op, @@ -177,35 +178,35 @@ void broadcast_base_shape_infer( axes_shape_rank); } + auto output_shapes = std::vector(1); auto& result_shape = output_shapes[0]; const auto& data_input_shape = input_shapes[0]; const auto& target_input_shape = input_shapes[1]; const bool is_target_input_shape_static = target_input_shape.is_static(); - T target_as_shape; - bool is_target_shape_defined = get_data_as_shape(1, op, target_as_shape, constant_data); + auto target_as_shape = get_input_const_data_as_shape(op, 1, ta); - if (!is_target_shape_defined) { + if (!target_as_shape) { if (auto concat = ov::as_type_ptr(op->get_input_node_shared_ptr(1))) { const auto concat_inputs = concat->input_values(); if (concat->get_output_partial_shape(0).is_static() && concat->get_shape().size() == 1 && concat_inputs.size() == shape_size(concat->get_shape())) { + target_as_shape.emplace(); for (const auto& concat_input : concat_inputs) { auto source_node_ptr = concat_input.get_node_shared_ptr(); if (auto source_const_ptr = ov::as_type_ptr(source_node_ptr)) { - target_as_shape.push_back(source_const_ptr->get_axis_vector_val()[0]); + target_as_shape->push_back(source_const_ptr->get_axis_vector_val()[0]); } else { - target_as_shape.push_back(Dimension::dynamic()); + target_as_shape->push_back(Dimension::dynamic()); } } - is_target_shape_defined = true; } } } if (mode.m_type == BroadcastType::NONE) { - if (is_target_shape_defined) { - result_shape = target_as_shape; + if (target_as_shape) { + result_shape = *target_as_shape; } else if (is_target_input_shape_static) { result_shape = PartialShape::dynamic(target_input_shape[0].get_length()); } else { @@ -223,54 +224,54 @@ void broadcast_base_shape_infer( axes_shape, " doesn't match rank of input tensor ", input_rank); - std::vector axes_mapping_val; - if (is_target_shape_defined && get_data_as_int64(2, op, axes_mapping_val, constant_data)) { - AxisVector axes_mapping = - AxisVector(std::vector(axes_mapping_val.begin(), axes_mapping_val.end())); - validate_target_shape_none(op, data_input_shape, axes_mapping, target_as_shape); + + if (target_as_shape) { + if (auto axes_mapping = get_input_const_data_as(op, 2, ta)) { + validate_target_shape_none(op, data_input_shape, *axes_mapping, *target_as_shape); + } } } } else if (mode.m_type == BroadcastType::NUMPY) { - if (is_target_shape_defined) { - result_shape = target_as_shape; - validate_target_shape_numpy(op, data_input_shape, target_as_shape); + if (target_as_shape) { + result_shape = *target_as_shape; + validate_target_shape_numpy(op, data_input_shape, *target_as_shape); } else if (is_target_input_shape_static) { result_shape = PartialShape::dynamic(target_input_shape[0].get_length()); } else { result_shape = PartialShape::dynamic(); } } else if (mode.m_type == BroadcastType::PDPD) { - if (is_target_shape_defined) { - set_result_shape_pdpd(op, data_input_shape, target_as_shape, result_shape, mode); + if (target_as_shape) { + set_result_shape_pdpd(op, data_input_shape, *target_as_shape, result_shape, mode); } else if (is_target_input_shape_static) { result_shape = PartialShape::dynamic(target_input_shape[0].get_length()); } else { result_shape = PartialShape::dynamic(); } } else if (mode.m_type == BroadcastType::BIDIRECTIONAL) { - if (is_target_shape_defined) { - set_result_shape_bidirectional(op, data_input_shape, target_as_shape, result_shape); + if (target_as_shape) { + set_result_shape_bidirectional(op, data_input_shape, *target_as_shape, result_shape); } else if (data_input_shape.rank().is_static() && is_target_input_shape_static) { result_shape = PartialShape::dynamic(target_input_shape[0].get_length()); // The logic of BroadcastType::BIDIRECTIONAL matches broadcast_merge_into with AutoBroadcastType::NUMPY - NODE_VALIDATION_CHECK(op, - T::broadcast_merge_into(result_shape, data_input_shape, op::AutoBroadcastType::NUMPY), - "Broadcast shape inference failed, output shape calculation with " - "'broadcast_merge_into' was unsuccessful."); + NODE_VALIDATION_CHECK( + op, + TRShape::broadcast_merge_into(result_shape, data_input_shape, op::AutoBroadcastType::NUMPY), + "Broadcast shape inference failed, output shape calculation with " + "'broadcast_merge_into' was unsuccessful."); } else { result_shape = PartialShape::dynamic(); } } + return output_shapes; } } // namespace util namespace v3 { -template -void shape_infer(const ov::op::v3::Broadcast* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - NODE_VALIDATION_CHECK(op, output_shapes.size() == 1); +template > +std::vector shape_infer(const ov::op::v3::Broadcast* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { auto& mode = op->get_broadcast_spec(); if (mode.m_type == BroadcastType::NONE) { NODE_VALIDATION_CHECK(op, @@ -281,19 +282,18 @@ void shape_infer(const ov::op::v3::Broadcast* op, input_shapes.size() == 2, "axes_mapping input should not be provided for mode other than explicit"); } - broadcast_base_shape_infer(op, input_shapes, output_shapes, constant_data); + return broadcast_base_shape_infer(op, input_shapes, ta); } } // namespace v3 namespace v1 { -template -void shape_infer(const ov::op::v1::Broadcast* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - NODE_VALIDATION_CHECK(op, output_shapes.size() == 1 && (input_shapes.size() == 2 || input_shapes.size() == 3)); +template > +std::vector shape_infer(const ov::op::v1::Broadcast* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3)); - broadcast_base_shape_infer(op, input_shapes, output_shapes, constant_data); + return broadcast_base_shape_infer(op, input_shapes, ta); } } // namespace v1 diff --git a/src/core/shape_inference/include/bucketize_shape_inference.hpp b/src/core/shape_inference/include/bucketize_shape_inference.hpp index decbc894fec..e16bcfe755e 100644 --- a/src/core/shape_inference/include/bucketize_shape_inference.hpp +++ b/src/core/shape_inference/include/bucketize_shape_inference.hpp @@ -4,14 +4,15 @@ #pragma once -#include +#include "openvino/op/bucketize.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v3 { -template -std::vector shape_infer(const Bucketize* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const Bucketize* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2)); const auto& data_shape = input_shapes[0]; @@ -23,11 +24,6 @@ std::vector shape_infer(const Bucketize* op, const std::vector& buckets_shape); return {data_shape}; } - -template -void shape_infer(const Bucketize* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v3 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/compare.hpp b/src/core/shape_inference/include/compare.hpp index 9b20fb861d9..b3c10c630c7 100644 --- a/src/core/shape_inference/include/compare.hpp +++ b/src/core/shape_inference/include/compare.hpp @@ -6,6 +6,7 @@ #include +#include "openvino/core/type/bfloat16.hpp" #include "openvino/core/type/float16.hpp" namespace ov { @@ -108,9 +109,10 @@ public: */ template ::value || std::is_same::value) && - (std::is_signed::value || std::is_same::value)) || - (std::is_unsigned::value && std::is_unsigned::value)>::type* = nullptr> + typename std::enable_if< + ((std::is_signed::value || std::is_same::value || std::is_same::value) && + (std::is_signed::value || std::is_same::value || std::is_same::value)) || + (std::is_unsigned::value && std::is_unsigned::value)>::type* = nullptr> constexpr bool lt(T a, U b) noexcept { return a < b; } @@ -125,7 +127,8 @@ constexpr bool lt(T a, U b) noexcept { template ::value || std::is_same::value) && + typename std::enable_if<(std::is_floating_point::value || std::is_same::value || + std::is_same::value) && std::is_unsigned::value>::type* = nullptr> constexpr bool lt(T a, U b) noexcept { return a < 0 ? true : a < b; @@ -141,8 +144,9 @@ constexpr bool lt(T a, U b) noexcept { template ::value && (std::is_floating_point::value || - std::is_same::value)>::type* = nullptr> + typename std::enable_if::value && + (std::is_floating_point::value || std::is_same::value || + std::is_same::value)>::type* = nullptr> constexpr bool lt(T a, U b) noexcept { return b < 0 ? false : a < b; } diff --git a/src/core/shape_inference/include/concat_shape_inference.hpp b/src/core/shape_inference/include/concat_shape_inference.hpp index d571b9417c4..e6a72f9b441 100644 --- a/src/core/shape_inference/include/concat_shape_inference.hpp +++ b/src/core/shape_inference/include/concat_shape_inference.hpp @@ -12,14 +12,15 @@ namespace ov { namespace op { namespace v0 { -template -void shape_infer(const Concat* op, const std::vector& input_shapes, std::vector& output_shapes) { - using DimType = typename std::iterator_traits::value_type; +template > +std::vector shape_infer(const Concat* op, const std::vector& input_shapes) { + using DimType = typename T::value_type; const auto concat_axis = op->get_concatenation_axis(); const auto empty_dim = DimType{}; auto concat_dim = DimType{0}; + auto output_shapes = std::vector(1); auto& output_shape = output_shapes.front(); if (std::is_same::value) { @@ -29,16 +30,16 @@ void shape_infer(const Concat* op, const std::vector& input_shapes, std::vect output_shape[concat_axis] = empty_dim; } - for (auto input : input_shapes) { + for (auto& input : input_shapes) { if (input.rank().is_static()) { - concat_dim += input[concat_axis]; - input[concat_axis] = empty_dim; + auto in_copy = TRShape(input); + concat_dim += in_copy[concat_axis]; + in_copy[concat_axis] = empty_dim; NODE_VALIDATION_CHECK(op, - T::merge_into(output_shape, input), + TRShape::merge_into(output_shape, in_copy), "Argument shapes are inconsistent; they must have the same rank, and must " - "have ", - "equal dimension everywhere except on the concatenation axis (axis ", + "have equal dimension everywhere except on the concatenation axis (axis ", concat_axis, ")."); } else { @@ -49,6 +50,7 @@ void shape_infer(const Concat* op, const std::vector& input_shapes, std::vect if (output_shape.rank().is_static()) { output_shape[concat_axis] = concat_dim; } + return output_shapes; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/convolution_backprop_shape_inference.hpp b/src/core/shape_inference/include/convolution_backprop_shape_inference.hpp index b4585434dc2..ee6ae9baa71 100644 --- a/src/core/shape_inference/include/convolution_backprop_shape_inference.hpp +++ b/src/core/shape_inference/include/convolution_backprop_shape_inference.hpp @@ -11,43 +11,46 @@ namespace ov { namespace op { namespace v1 { -template -std::vector shape_infer(const ConvolutionBackpropData* op, - const std::vector& input_shapes, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const ConvolutionBackpropData* op, + const std::vector& input_shapes, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end, + const ITensorAccessor& ta = make_tensor_accessor()) { const auto inputs_count = input_shapes.size(); const auto has_spatial_shape = inputs_count >= 3; NODE_VALIDATION_CHECK(op, inputs_count >= 2); using namespace ov::util; - TShape out_spatial_shape; + ov::optional out_spatial_shape; if (has_spatial_shape) { const auto& spatial_shape = input_shapes[2]; NODE_VALIDATION_CHECK(op, spatial_shape.rank().compatible(1), "Input delivering output shape must have rank 1."); - - if (!get_data_as_shape(2, op, out_spatial_shape, constant_data)) { + out_spatial_shape = get_input_const_data_as_shape(op, 2, ta); + if (!out_spatial_shape) { if (spatial_shape.is_static()) { - out_spatial_shape.resize(spatial_shape[0].get_length()); + out_spatial_shape.emplace(); + out_spatial_shape->resize(spatial_shape[0].get_length()); } else { out_spatial_shape = PartialShape::dynamic(); } } + } else { + out_spatial_shape.emplace(); } - const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, out_spatial_shape); + const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, *out_spatial_shape); - TShape output_shape; + TRShape output_shape; if (num_spatial != convolution::num_spatial_undefined) { const auto& data_shape = input_shapes[0]; const auto& filters_shape = input_shapes[1]; NODE_VALIDATION_CHECK( op, - !has_spatial_shape || out_spatial_shape.rank().is_dynamic() || out_spatial_shape.size() == num_spatial, + !has_spatial_shape || out_spatial_shape->rank().is_dynamic() || out_spatial_shape->size() == num_spatial, "Output shape should be defined for all and only spatial dimensions."); convolution::resize_empty_padding(num_spatial, pads_begin, pads_end); @@ -56,7 +59,7 @@ std::vector shape_infer(const ConvolutionBackpropData* op, convolution::validate::data_shape(op, data_shape); convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end); } - convolution::apply_padding(op, input_shapes, out_spatial_shape, pads_begin, pads_end); + convolution::apply_padding(op, input_shapes, *out_spatial_shape, pads_begin, pads_end); output_shape.reserve(util::spatial_dim_offset + num_spatial); output_shape.emplace_back(data_shape.rank().is_static() ? data_shape[0] : dim::inf_bound); @@ -64,8 +67,8 @@ std::vector shape_infer(const ConvolutionBackpropData* op, if (has_spatial_shape) { output_shape.insert(output_shape.end(), - std::make_move_iterator(out_spatial_shape.begin()), - std::make_move_iterator(out_spatial_shape.end())); + std::make_move_iterator(out_spatial_shape->begin()), + std::make_move_iterator(out_spatial_shape->end())); } else { convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape); } diff --git a/src/core/shape_inference/include/convolution_backprop_shape_inference_util.hpp b/src/core/shape_inference/include/convolution_backprop_shape_inference_util.hpp index eaa8b86748a..d9d2a8eb664 100644 --- a/src/core/shape_inference/include/convolution_backprop_shape_inference_util.hpp +++ b/src/core/shape_inference/include/convolution_backprop_shape_inference_util.hpp @@ -40,7 +40,9 @@ void filter_shape(const ov::op::util::ConvolutionBackPropBase* op, template ::value>::type* = nullptr> -size_t calculate_num_spatial(const TOp* op, const std::vector& input_shapes, const TShape& out_spatial_shape) { +size_t calculate_num_spatial(const TOp* op, + const std::vector& input_shapes, + const result_shape_t& out_spatial_shape) { NODE_VALIDATION_CHECK(op, input_shapes.size() > 1); auto num_spatial = util::get_num_spatial(op); @@ -77,7 +79,7 @@ template void apply_auto_pad(const TOp* op, const TShape& data_shape, const TShape& filters_shape, - const TShape& out_spatial_shape, + const result_shape_t& out_spatial_shape, TIter pads_begin, TIter pads_end) { const auto& strides = op->get_strides(); @@ -94,12 +96,12 @@ void apply_auto_pad(const TOp* op, for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim, ++filter_dim) { using namespace ov::util; - if (data_dim->is_static() && filter_dim->is_static() && out_spatial_shape[i].is_static()) { + if (dim::is_static(*data_dim) && dim::is_static(*filter_dim) && out_spatial_shape[i].is_static()) { const auto dilated_filter = dim::dilated(*filter_dim, dilations[i]); - const auto dim_len = static_cast(data_dim->get_length() - 1); - const auto padding = std::max( - dim_len * strides[i] + dilated_filter.get_length() - out_spatial_shape[i].get_length() + out_padding[i], - 0); + const auto dim_len = static_cast(dim::get_length(*data_dim) - 1); + const auto padding = std::max(dim_len * strides[i] + dim::get_length(dilated_filter) - + out_spatial_shape[i].get_length() + out_padding[i], + 0); *pad_b = padding / 2; *pad_e = padding - *pad_b; @@ -122,7 +124,7 @@ void apply_auto_pad(const TOp* op, template void apply_padding(const util::ConvolutionBackPropBase* op, const std::vector& input_shapes, - const TShape& out_spatial_shape, + const result_shape_t& out_spatial_shape, CoordinateDiff& pads_begin, CoordinateDiff& pads_end) { const auto& data_shape = input_shapes[0]; @@ -165,7 +167,7 @@ void append_spatial_shape(const TOp* op, const TShape& filters_shape, const TContainer& pads_begin, const TContainer& pads_end, - TShape& out_shape) { + result_shape_t& out_shape) { using namespace ov::util; const auto& strides = op->get_strides(); diff --git a/src/core/shape_inference/include/convolution_shape_inference.hpp b/src/core/shape_inference/include/convolution_shape_inference.hpp index b1bdeb4317e..5cd6ab4052e 100644 --- a/src/core/shape_inference/include/convolution_shape_inference.hpp +++ b/src/core/shape_inference/include/convolution_shape_inference.hpp @@ -10,18 +10,21 @@ namespace ov { namespace op { namespace v1 { -template -std::vector shape_infer(const TFrowardConv* op, - const std::vector& input_shapes, - TContainer& pads_begin, - TContainer& pads_end, - const std::map& constant_data = {}) { +template , + typename std::enable_if::value || + std::is_same::value>::type* = nullptr> +std::vector shape_infer(const TOp* op, + const std::vector& input_shapes, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2); using namespace ov::util; const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes); - TShape output_shape; + TRShape output_shape; if (num_spatial != util::num_spatial_undefined) { const auto& data_shape = input_shapes[0]; const auto& filters_shape = input_shapes[1]; diff --git a/src/core/shape_inference/include/convolution_shape_inference_util.hpp b/src/core/shape_inference/include/convolution_shape_inference_util.hpp index c1a26671e3e..e26569868b9 100644 --- a/src/core/shape_inference/include/convolution_shape_inference_util.hpp +++ b/src/core/shape_inference/include/convolution_shape_inference_util.hpp @@ -189,8 +189,8 @@ void apply_auto_pad(const TOp* op, for (size_t i = 0; i < num_spatial; ++i, ++pad_b, ++pad_e, ++data_dim, ++kernel_dim) { using namespace ov::util; - if (kernel_dim->is_static()) { - std::tie(*pad_b, *pad_e) = dim::padding(*data_dim, kernel_dim->get_length(), dilations[i], strides[i]); + if (dim::is_static(*kernel_dim)) { + std::tie(*pad_b, *pad_e) = dim::padding(*data_dim, dim::get_length(*kernel_dim), dilations[i], strides[i]); } else { *pad_b = 0; *pad_e = 0; @@ -241,6 +241,7 @@ void apply_padding(const TOp* op, */ template , typename std::enable_if::value || std::is_base_of::value>::type* = nullptr> void append_spatial_shape(const TOp* op, @@ -248,7 +249,7 @@ void append_spatial_shape(const TOp* op, const TShape& filters_shape, CoordinateDiff& pads_begin, CoordinateDiff& pads_end, - TShape& out_shape) { + TRShape& out_shape) { using namespace ov::util; using TDim = typename TShape::value_type; @@ -266,8 +267,8 @@ void append_spatial_shape(const TOp* op, const auto& dilations = op->get_dilations(); for (size_t i = 0; i < spatial_num; ++i, ++data_dim, ++filters_dim) { - auto dim = *data_dim + (pads_begin[i] + pads_end[i]); - const auto filter_dilated = dim::dilated(*filters_dim, dilations[i]); + TDim dim = *data_dim + (pads_begin[i] + pads_end[i]); + const TDim filter_dilated = dim::dilated(*filters_dim, dilations[i]); if (dim.is_static() && filter_dilated.is_static()) { // Use check from pooling op as it is same. diff --git a/src/core/shape_inference/include/copy_shape_inference.hpp b/src/core/shape_inference/include/copy_shape_inference.hpp new file mode 100644 index 00000000000..21dac2399b7 --- /dev/null +++ b/src/core/shape_inference/include/copy_shape_inference.hpp @@ -0,0 +1,17 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "utils.hpp" + +namespace ov { +namespace op { +template > +std::vector copy_shape_infer(const Node* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 1, "Incorrect number of input shapes"); + return {input_shapes[0]}; +} +} // namespace op +} // namespace ov diff --git a/src/core/shape_inference/include/ctc_greedy_decoder_seq_len_shape_inference.hpp b/src/core/shape_inference/include/ctc_greedy_decoder_seq_len_shape_inference.hpp index e8896dbfb1d..db85cebe8fa 100644 --- a/src/core/shape_inference/include/ctc_greedy_decoder_seq_len_shape_inference.hpp +++ b/src/core/shape_inference/include/ctc_greedy_decoder_seq_len_shape_inference.hpp @@ -3,13 +3,14 @@ // #pragma once #include "openvino/op/ctc_greedy_decoder_seq_len.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v6 { -template -std::vector shape_infer(const CTCGreedyDecoderSeqLen* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const CTCGreedyDecoderSeqLen* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 || input_shapes.size() == 3); using DimType = typename TShape::value_type; @@ -41,14 +42,7 @@ std::vector shape_infer(const CTCGreedyDecoderSeqLen* op, const std::vec "The first dimensions of input tensors must match."); } - return {TShape{batch_size, time_size}, TShape{batch_size}}; -} - -template -void shape_infer(const CTCGreedyDecoderSeqLen* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return {TRShape{batch_size, time_size}, TRShape{batch_size}}; } } // namespace v6 } // namespace op diff --git a/src/core/shape_inference/include/ctc_greedy_decoder_shape_inference.hpp b/src/core/shape_inference/include/ctc_greedy_decoder_shape_inference.hpp index 81e4b1cf678..396a38e0c54 100644 --- a/src/core/shape_inference/include/ctc_greedy_decoder_shape_inference.hpp +++ b/src/core/shape_inference/include/ctc_greedy_decoder_shape_inference.hpp @@ -2,14 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // #pragma once -#include +#include "openvino/op/ctc_greedy_decoder.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v0 { -template -std::vector shape_infer(const CTCGreedyDecoder* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const CTCGreedyDecoder* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); using DimType = typename TShape::value_type; @@ -46,14 +47,7 @@ std::vector shape_infer(const CTCGreedyDecoder* op, const std::vector -void shape_infer(const CTCGreedyDecoder* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return {TRShape(std::move(output_dims))}; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/ctc_loss_shape_inference.hpp b/src/core/shape_inference/include/ctc_loss_shape_inference.hpp index 859df54fc43..b282ec9aa7a 100644 --- a/src/core/shape_inference/include/ctc_loss_shape_inference.hpp +++ b/src/core/shape_inference/include/ctc_loss_shape_inference.hpp @@ -4,7 +4,9 @@ #pragma once #include -#include + +#include "openvino/op/ctc_loss.hpp" +#include "utils.hpp" namespace ov { namespace op { @@ -16,8 +18,8 @@ constexpr auto shape_names = constexpr auto shape_ranks = std::array{3, 1, 2, 1}; } // namespace ctc_loss -template -std::vector shape_infer(const CTCLoss* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const CTCLoss* op, const std::vector& input_shapes) { using DimType = typename TShape::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 || input_shapes.size() == 5); @@ -89,14 +91,8 @@ std::vector shape_infer(const CTCLoss* op, const std::vector& in " and: ", batch_size); - return {TShape{batch_size}}; + return {TRShape{batch_size}}; } - -template -void shape_infer(const CTCLoss* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} - } // namespace v4 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/deformable_convolution_shape_inference.hpp b/src/core/shape_inference/include/deformable_convolution_shape_inference.hpp index d78924ca615..5d91bace7e5 100644 --- a/src/core/shape_inference/include/deformable_convolution_shape_inference.hpp +++ b/src/core/shape_inference/include/deformable_convolution_shape_inference.hpp @@ -63,19 +63,18 @@ void deformable_group_divisible_dimension(const TDeformableConv* op, const TDim& } // namespace deformable_conv namespace util { -template -std::vector shape_infer(const DeformableConvolutionBase* op, - const std::vector& input_shapes, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const DeformableConvolutionBase* op, + const std::vector& input_shapes, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { static constexpr std::array names{"Input", "Offsets", "Filters", "Mask"}; using namespace ov::util; using TDim = typename TShape::value_type; const auto num_spatial = deformable_conv::calculate_num_spatial(op, input_shapes); - TShape output_shape; + TRShape output_shape; if (num_spatial != convolution::num_spatial_undefined) { const auto& data_shape = input_shapes[0]; const auto& offsets_shape = input_shapes[1]; @@ -168,24 +167,22 @@ std::vector shape_infer(const DeformableConvolutionBase* op, } // namespace util namespace v1 { -template -std::vector shape_infer(const DeformableConvolution* op, - const std::vector& input_shapes, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const DeformableConvolution* op, + const std::vector& input_shapes, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); - return util::shape_infer(op, input_shapes, pads_begin, pads_end, constant_data); + return util::shape_infer(op, input_shapes, pads_begin, pads_end); } } // namespace v1 namespace v8 { -template -std::vector shape_infer(const DeformableConvolution* op, - const std::vector& input_shapes, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const DeformableConvolution* op, + const std::vector& input_shapes, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { const auto has_mask_shape = input_shapes.size() == 4; NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 || has_mask_shape); using TDim = typename TShape::value_type; @@ -227,7 +224,7 @@ std::vector shape_infer(const DeformableConvolution* op, } } - auto output_shapes = util::shape_infer(op, input_shapes, pads_begin, pads_end, constant_data); + auto output_shapes = util::shape_infer(op, input_shapes, pads_begin, pads_end); // post infer checks if (has_mask_shape && input_shapes[3].rank().is_static() && output_shapes[0].rank().is_static()) { auto mask_dim = input_shapes[3].begin() + util::spatial_dim_offset; diff --git a/src/core/shape_inference/include/deformable_psroi_pooling_shape_inference.hpp b/src/core/shape_inference/include/deformable_psroi_pooling_shape_inference.hpp index db4a8812085..3c90472e532 100644 --- a/src/core/shape_inference/include/deformable_psroi_pooling_shape_inference.hpp +++ b/src/core/shape_inference/include/deformable_psroi_pooling_shape_inference.hpp @@ -3,13 +3,14 @@ // #pragma once #include "openvino/op/deformable_psroi_pooling.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v1 { -template -std::vector shape_infer(const DeformablePSROIPooling* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const DeformablePSROIPooling* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 || input_shapes.size() == 3); const auto& input_pshape = input_shapes[0]; @@ -42,17 +43,10 @@ std::vector shape_infer(const DeformablePSROIPooling* op, const std::vec using DimType = typename TShape::value_type; using DimTypeVal = typename DimType::value_type; // The output shape: [num_rois, output_dim, group_size, group_size] - return {TShape{box_coords_pshape.rank().is_static() ? box_coords_pshape[0] : DimType{}, - static_cast(op->get_output_dim()), - static_cast(op->get_group_size()), - static_cast(op->get_group_size())}}; -} - -template -void shape_infer(const DeformablePSROIPooling* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return {TRShape{box_coords_pshape.rank().is_static() ? box_coords_pshape[0] : DimType{}, + static_cast(op->get_output_dim()), + static_cast(op->get_group_size()), + static_cast(op->get_group_size())}}; } } // namespace v1 } // namespace op diff --git a/src/core/shape_inference/include/depth_to_space_shape_inference.hpp b/src/core/shape_inference/include/depth_to_space_shape_inference.hpp index bda94acd5a5..428af3ae426 100644 --- a/src/core/shape_inference/include/depth_to_space_shape_inference.hpp +++ b/src/core/shape_inference/include/depth_to_space_shape_inference.hpp @@ -14,10 +14,10 @@ namespace ov { namespace op { namespace v0 { -template -std::vector shape_infer(const DepthToSpace* op, const std::vector& input_shapes) { - using TDim = typename TShape::value_type; - using TVal = typename TDim::value_type; +template > +std::vector shape_infer(const DepthToSpace* op, const std::vector& input_shapes) { + using TDim = typename std::iterator_traits::value_type; + using TVal = typename TShape::value_type::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); const auto& data_shape = input_shapes[0]; @@ -34,7 +34,7 @@ std::vector shape_infer(const DepthToSpace* op, const std::vector(std::pow(block_size, data_shape.size() - spatial_dim_offset)); NODE_VALIDATION_CHECK(op, divisor != 0, "DepthToSpace: The divisor must not be 0"); - auto out_shape = data_shape; + auto out_shape = TRShape(data_shape); out_shape[1] /= divisor; check_divided_result(op, out_shape[1], data_shape[1], divisor); std::for_each(out_shape.begin() + spatial_dim_offset, out_shape.end(), [&block_size](TDim& d) { @@ -45,12 +45,6 @@ std::vector shape_infer(const DepthToSpace* op, const std::vector -void shape_infer(const DepthToSpace* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} - } // namespace v0 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/detection_output_shape_inference.hpp b/src/core/shape_inference/include/detection_output_shape_inference.hpp index 3ba93d6fd4e..a24c96fa28e 100644 --- a/src/core/shape_inference/include/detection_output_shape_inference.hpp +++ b/src/core/shape_inference/include/detection_output_shape_inference.hpp @@ -6,6 +6,8 @@ #include +#include "utils.hpp" + namespace ov { namespace op { namespace util { @@ -118,17 +120,17 @@ void compute_num_classes(const DetectionOutputBase* op, } } -template -void shape_infer_base(const DetectionOutputBase* op, - const DetectionOutputBase::AttributesBase& attrs, - const std::vector& input_shapes, - std::vector& output_shapes, - int64_t attribute_num_classes) { - using dim_t = typename std::iterator_traits::value_type; +template > +std::vector shape_infer_base(const DetectionOutputBase* op, + const DetectionOutputBase::AttributesBase& attrs, + const std::vector& input_shapes, + int64_t attribute_num_classes) { + using dim_t = typename T::value_type; using val_type = typename dim_t::value_type; - NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 5) && output_shapes.size() == 1); + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 5)); + auto output_shapes = std::vector(1); auto& ret_output_shape = output_shapes[0]; ret_output_shape.resize(4); @@ -310,6 +312,7 @@ void shape_infer_base(const DetectionOutputBase* op, } else { ret_output_shape[2] = dim_num_images * dim_num_prior_boxes * dim_num_classes; } + return output_shapes; } } // namespace util @@ -319,12 +322,11 @@ void shape_infer_base(const DetectionOutputBase* op, namespace ov { namespace op { namespace v0 { -template -void shape_infer(const DetectionOutput* op, const std::vector& input_shapes, std::vector& output_shapes) { +template > +std::vector shape_infer(const DetectionOutput* op, const std::vector& input_shapes) { const auto& attrs = op->get_attrs(); - ov::op::util::shape_infer_base(op, attrs, input_shapes, output_shapes, attrs.num_classes); + return ov::op::util::shape_infer_base(op, attrs, input_shapes, attrs.num_classes); } - } // namespace v0 } // namespace op } // namespace ov @@ -332,12 +334,11 @@ void shape_infer(const DetectionOutput* op, const std::vector& input_shapes, namespace ov { namespace op { namespace v8 { - -template -void shape_infer(const DetectionOutput* op, const std::vector& input_shapes, std::vector& output_shapes) { - ov::op::util::shape_infer_base(op, op->get_attrs(), input_shapes, output_shapes, -1); +template > +std::vector shape_infer(const DetectionOutput* op, const std::vector& input_shapes) { + const auto& attrs = op->get_attrs(); + return ov::op::util::shape_infer_base(op, attrs, input_shapes, -1); } - } // namespace v8 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/dimension_util.hpp b/src/core/shape_inference/include/dimension_util.hpp index f6b0403cbcf..8416efce6f2 100644 --- a/src/core/shape_inference/include/dimension_util.hpp +++ b/src/core/shape_inference/include/dimension_util.hpp @@ -12,6 +12,27 @@ namespace ov { namespace util { namespace dim { +template ::value>::type* = nullptr> +constexpr bool is_static(const TDim) { + return true; +} + +template ::value>::type* = nullptr> +constexpr bool is_static(const TDim& d) { + return d.is_static(); +} + +template +constexpr typename std::enable_if::value, TDim>::type get_length(const TDim& d) { + return d; +} + +template +constexpr typename std::enable_if::value, typename TDim::value_type>::type get_length( + const TDim& d) { + return d.get_length(); +} + constexpr int64_t inf_bound = -1; //!< Infinite bound value for dimension. /** @@ -132,10 +153,12 @@ typename std::enable_if::value, TDim>::type padded(const TDi * @param stride Kernel stride. * @return Pair of left, right padding values for input dimension. */ -template +template < + class TDim, + class T = typename std::conditional::value, size_t, typename Dimension::value_type>::type> inline std::pair padding(const TDim& dim, const int64_t kernel_size, const int64_t dilation, int64_t stride) { - if (dim.is_static()) { - const auto dim_size = static_cast(dim.get_length()); + if (dim::is_static(dim)) { + const auto dim_size = static_cast(dim::get_length(dim)); const auto dilated_kernel = dilated(kernel_size, dilation); const int64_t tmp = (dim_size + stride - 1) / stride; diff --git a/src/core/shape_inference/include/einsum_shape_inference.hpp b/src/core/shape_inference/include/einsum_shape_inference.hpp index 770e76565aa..6cb265d9d6a 100644 --- a/src/core/shape_inference/include/einsum_shape_inference.hpp +++ b/src/core/shape_inference/include/einsum_shape_inference.hpp @@ -12,9 +12,9 @@ namespace ov { namespace op { namespace v7 { -template -void shape_infer(const Einsum* op, const std::vector& input_shapes, std::vector& output_shapes) { - using DimType = typename std::iterator_traits::value_type; +template > +std::vector shape_infer(const Einsum* op, const std::vector& input_shapes) { + using DimType = typename T::value_type; // check that equation has correct format and extract input and output subscripts std::vector input_subscripts; @@ -25,11 +25,10 @@ void shape_infer(const Einsum* op, const std::vector& input_shapes, std::vect NODE_VALIDATION_CHECK(op, input_subscripts.size() == input_shapes.size(), "Equation must contain a number of subscripts equal to a number of Einsum inputs."); - NODE_VALIDATION_CHECK(op, output_shapes.size() == 1); // create a dictionary with dimension sizes (or ranges in case of dynamic shapes) for each label // and check their compatibility in case of repeating labels - std::unordered_map label_to_shape; + std::unordered_map label_to_shape; for (size_t input_idx = 0; input_idx < input_shapes.size(); ++input_idx) { const auto& pshape = input_shapes[input_idx]; @@ -53,9 +52,9 @@ void shape_infer(const Einsum* op, const std::vector& input_shapes, std::vect if (label_to_shape.find(label) == label_to_shape.end()) { label_to_shape[label] = current_sub_pshape; } else { - bool is_broadcast_success = T::broadcast_merge_into(label_to_shape[label], - current_sub_pshape, - op::AutoBroadcastType::NUMPY); + bool is_broadcast_success = TRShape::broadcast_merge_into(label_to_shape[label], + current_sub_pshape, + op::AutoBroadcastType::NUMPY); NODE_VALIDATION_CHECK(op, is_broadcast_success, "Input dimensions labeled with ellipsis for Einsum " @@ -64,13 +63,13 @@ void shape_infer(const Einsum* op, const std::vector& input_shapes, std::vect dim_ind += num_broadcasted_dims; } else { if (label_to_shape.find(label) == label_to_shape.end()) { - label_to_shape[label] = T{pshape[dim_ind]}; + label_to_shape[label] = TRShape{pshape[dim_ind]}; } else { NODE_VALIDATION_CHECK(op, - label_to_shape[label].compatible(T{pshape[label_ind]}), + label_to_shape[label].compatible(TRShape{pshape[label_ind]}), "Different input dimensions indicated by the same labels for Einsum " "must be compatible."); - OPENVINO_ASSERT(T::merge_into(label_to_shape[label], T{pshape[dim_ind]})); + OPENVINO_ASSERT(TRShape::merge_into(label_to_shape[label], TRShape{pshape[dim_ind]})); } ++dim_ind; } @@ -91,10 +90,9 @@ void shape_infer(const Einsum* op, const std::vector& input_shapes, std::vect // compute the output shape const auto output_labels = Einsum::extract_labels(output_subscript); + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; - output_shape.resize(0); - for (auto const& output_label : output_labels) { NODE_VALIDATION_CHECK(op, label_to_shape.find(output_label) != label_to_shape.end(), @@ -104,6 +102,7 @@ void shape_infer(const Einsum* op, const std::vector& input_shapes, std::vect label_to_shape[output_label].begin(), label_to_shape[output_label].end()); } + return output_shapes; } } // namespace v7 } // namespace op diff --git a/src/core/shape_inference/include/eltwise_shape_inference.hpp b/src/core/shape_inference/include/eltwise_shape_inference.hpp new file mode 100644 index 00000000000..cfae0ad3443 --- /dev/null +++ b/src/core/shape_inference/include/eltwise_shape_inference.hpp @@ -0,0 +1,35 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/util/binary_elementwise_arithmetic.hpp" +#include "openvino/op/util/binary_elementwise_comparison.hpp" +#include "openvino/op/util/binary_elementwise_logical.hpp" +#include "utils.hpp" + +namespace ov { +namespace op { +template > +std::vector eltwise_shape_infer(const OpType* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 2, "Incorrect number of input/output shapes"); + + auto output_shapes = std::vector{input_shapes[0]}; + auto& output_shape = output_shapes[0]; + const auto& autob = op->get_autob(); + if (autob.m_type == AutoBroadcastType::NONE) { + NODE_VALIDATION_CHECK(op, + TRShape::merge_into(output_shape, input_shapes[1]), + "Argument shapes are inconsistent."); + } else if (autob.m_type == AutoBroadcastType::NUMPY || autob.m_type == AutoBroadcastType::PDPD) { + NODE_VALIDATION_CHECK(op, + TRShape::broadcast_merge_into(output_shape, input_shapes[1], autob), + "Argument shapes are inconsistent."); + } else { + NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification"); + } + return output_shapes; +} +} // namespace op +} // namespace ov diff --git a/src/core/shape_inference/include/embedding_segments_sum_shape_inference.hpp b/src/core/shape_inference/include/embedding_segments_sum_shape_inference.hpp index df219931faa..f4aeec8164e 100644 --- a/src/core/shape_inference/include/embedding_segments_sum_shape_inference.hpp +++ b/src/core/shape_inference/include/embedding_segments_sum_shape_inference.hpp @@ -12,19 +12,11 @@ namespace ov { namespace op { namespace v3 { -template -void shape_infer(const EmbeddingSegmentsSum* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - output_shapes = shape_infer(op, input_shapes, constant_data); -} - -template -std::vector shape_infer( - const EmbeddingSegmentsSum* op, - const std::vector& input_shapes, - const std::map>& constant_data = {}) { +template > +std::vector shape_infer(const EmbeddingSegmentsSum* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + // const std::map>& constant_data = {}) { const auto input_size = input_shapes.size(); NODE_VALIDATION_CHECK(op, input_size >= 4 && input_size <= 6); @@ -58,12 +50,11 @@ std::vector shape_infer( "INDICES and PER_SAMPLE_WEIGHTS shape must be same."); } const auto& emb_table_shape = input_shapes[EMB_TABLE]; - TShape result_shape = emb_table_shape; + TRShape result_shape = emb_table_shape; if (emb_table_shape.rank().is_static()) { NODE_VALIDATION_CHECK(op, emb_table_shape.size() > 0, "EMB_TABLE can't be a scalar."); - TShape segments_value; - if (get_data_as_shape(NUM_SEGMENTS, op, segments_value, constant_data)) { - result_shape[0] = segments_value[0]; + if (auto segments_value = get_input_const_data_as_shape(op, NUM_SEGMENTS, ta)) { + result_shape[0] = (*segments_value)[0]; } else { result_shape[0] = Dimension::dynamic(); } diff --git a/src/core/shape_inference/include/embedding_shape_infer_utils.hpp b/src/core/shape_inference/include/embedding_shape_infer_utils.hpp index 8f25671dff3..b6db2001059 100644 --- a/src/core/shape_inference/include/embedding_shape_infer_utils.hpp +++ b/src/core/shape_inference/include/embedding_shape_infer_utils.hpp @@ -4,6 +4,7 @@ #pragma once #include "openvino/core/node.hpp" +#include "utils.hpp" namespace ov { namespace op { @@ -22,11 +23,11 @@ namespace embedding { * \return The copy of the `emb_table_shape` with the first dimsnsion overwritten by `dim_shape_src[0]` if the rank is * static, otherwise fully dynamic shape with dynamic rank. */ -template -TShape out_shape_infer(const ov::Node* op, const TShape& emb_table_shape, const TShape& dim_shape_src) { +template > +TRShape out_shape_infer(const ov::Node* op, const TShape& emb_table_shape, const TShape& dim_shape_src) { if (emb_table_shape.rank().is_static()) { NODE_VALIDATION_CHECK(op, emb_table_shape.size() > 0, "EMB_TABLE can't be a scalar."); - auto out_shape = emb_table_shape; + auto out_shape = TRShape(emb_table_shape); out_shape[0] = dim_shape_src.rank().is_static() ? dim_shape_src[0] : Dimension::dynamic(); return out_shape; } diff --git a/src/core/shape_inference/include/embeddingbag_offsets_shape_inference.hpp b/src/core/shape_inference/include/embeddingbag_offsets_shape_inference.hpp index c4f03075948..f69dc1bddec 100644 --- a/src/core/shape_inference/include/embeddingbag_offsets_shape_inference.hpp +++ b/src/core/shape_inference/include/embeddingbag_offsets_shape_inference.hpp @@ -13,9 +13,9 @@ namespace ov { namespace op { namespace util { -template -std::vector shape_infer(const ov::op::util::EmbeddingBagOffsetsBase* op, - const std::vector& input_shapes) { +template > +std::vector shape_infer(const ov::op::util::EmbeddingBagOffsetsBase* op, + const std::vector& input_shapes) { const auto input_size = input_shapes.size(); NODE_VALIDATION_CHECK(op, (input_size >= 3 && input_size <= 5)); @@ -45,14 +45,6 @@ std::vector shape_infer(const ov::op::util::EmbeddingBagOffsetsBase* op, return {embedding::out_shape_infer(op, input_shapes[EMB_TABLE], input_shapes[OFFSETS])}; } - -template -void shape_infer(const ov::op::util::EmbeddingBagOffsetsBase* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} - } // namespace util } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/embeddingbag_packed_shape_inference.hpp b/src/core/shape_inference/include/embeddingbag_packed_shape_inference.hpp index 95736a246d1..288583559cf 100644 --- a/src/core/shape_inference/include/embeddingbag_packed_shape_inference.hpp +++ b/src/core/shape_inference/include/embeddingbag_packed_shape_inference.hpp @@ -13,9 +13,9 @@ namespace ov { namespace op { namespace util { -template -std::vector shape_infer(const ov::op::util::EmbeddingBagPackedBase* op, - const std::vector& input_shapes) { +template > +std::vector shape_infer(const ov::op::util::EmbeddingBagPackedBase* op, + const std::vector& input_shapes) { const auto input_size = input_shapes.size(); NODE_VALIDATION_CHECK(op, input_size == 2 || input_size == 3); @@ -23,7 +23,7 @@ std::vector shape_infer(const ov::op::util::EmbeddingBagPackedBase* op, constexpr size_t INDICES = 1; constexpr size_t PER_SAMPLE_WEIGHTS = 2; - auto indices_shape = input_shapes[INDICES]; + auto indices_shape = TRShape(input_shapes[INDICES]); NODE_VALIDATION_CHECK(op, indices_shape.rank().compatible(2), "INDICES must be 2D."); if (input_size == 3) { @@ -32,18 +32,10 @@ std::vector shape_infer(const ov::op::util::EmbeddingBagPackedBase* op, "PER_SAMPLE_WEIGHTS must be 2D."); NODE_VALIDATION_CHECK(op, - TShape::merge_into(indices_shape, input_shapes[PER_SAMPLE_WEIGHTS]), + TRShape::merge_into(indices_shape, input_shapes[PER_SAMPLE_WEIGHTS]), "INDICES and PER_SAMPLE_WEIGHTS shape must be same."); } - - return {embedding::out_shape_infer(op, input_shapes[EMB_TABLE], indices_shape)}; -} - -template -void shape_infer(const ov::op::util::EmbeddingBagPackedBase* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return {embedding::out_shape_infer(op, input_shapes[EMB_TABLE], TShape(indices_shape))}; } } // namespace util } // namespace op diff --git a/src/core/shape_inference/include/experimental_detectron_detection_output_shape_inference.hpp b/src/core/shape_inference/include/experimental_detectron_detection_output_shape_inference.hpp index 9db747e6fda..e4983d79af1 100644 --- a/src/core/shape_inference/include/experimental_detectron_detection_output_shape_inference.hpp +++ b/src/core/shape_inference/include/experimental_detectron_detection_output_shape_inference.hpp @@ -5,14 +5,14 @@ #pragma once #include "openvino/op/experimental_detectron_detection_output.hpp" - +#include "utils.hpp" namespace ov { namespace op { namespace v6 { -template -std::vector shape_infer(const ExperimentalDetectronDetectionOutput* op, - const std::vector& input_shapes) { +template > +std::vector shape_infer(const ExperimentalDetectronDetectionOutput* op, + const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); using TDim = typename TShape::value_type; @@ -55,7 +55,7 @@ std::vector shape_infer(const ExperimentalDetectronDetectionOutput* op, } NODE_VALIDATION_CHECK(op, - im_info_shape.compatible({1, 3}), + im_info_shape.compatible(TRShape{1, 3}), "Input image info shape must be compatible with [1,3]."); if (rois_shape_rank_is_static && deltas_shape_rank_is_static && scores_shape_is_static) { @@ -76,19 +76,11 @@ std::vector shape_infer(const ExperimentalDetectronDetectionOutput* op, num_batches_scores); } - auto output_shapes = std::vector(3, TShape{TDim(attrs.max_detections_per_image)}); + auto output_shapes = std::vector(3, TRShape{TDim(attrs.max_detections_per_image)}); output_shapes[0].push_back(4); return output_shapes; } - -template -void shape_infer(const ExperimentalDetectronDetectionOutput* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} - } // namespace v6 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/experimental_detectron_generate_proposals_shape_inference.hpp b/src/core/shape_inference/include/experimental_detectron_generate_proposals_shape_inference.hpp index e55f9047a8b..003a4f7b806 100644 --- a/src/core/shape_inference/include/experimental_detectron_generate_proposals_shape_inference.hpp +++ b/src/core/shape_inference/include/experimental_detectron_generate_proposals_shape_inference.hpp @@ -5,14 +5,15 @@ #pragma once #include "openvino/op/experimental_detectron_generate_proposals.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v6 { -template -std::vector shape_infer(const ExperimentalDetectronGenerateProposalsSingleImage* op, - const std::vector& input_shapes) { +template > +std::vector shape_infer(const ExperimentalDetectronGenerateProposalsSingleImage* op, + const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); const auto& im_info_shape = input_shapes[0]; @@ -69,18 +70,10 @@ std::vector shape_infer(const ExperimentalDetectronGenerateProposalsSing } auto post_nms_count = static_cast(op->get_attrs().post_nms_count); - auto output_shapes = std::vector(2, TShape{post_nms_count}); + auto output_shapes = std::vector(2, TRShape{post_nms_count}); output_shapes[0].push_back(4); return output_shapes; } - -template -void shape_infer(const ExperimentalDetectronGenerateProposalsSingleImage* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} - } // namespace v6 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/experimental_detectron_prior_grid_generator_shape_inference.hpp b/src/core/shape_inference/include/experimental_detectron_prior_grid_generator_shape_inference.hpp index 621866f8b05..98a532e3eae 100644 --- a/src/core/shape_inference/include/experimental_detectron_prior_grid_generator_shape_inference.hpp +++ b/src/core/shape_inference/include/experimental_detectron_prior_grid_generator_shape_inference.hpp @@ -4,14 +4,15 @@ #pragma once #include "openvino/op/experimental_detectron_prior_grid_generator.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v6 { -template -std::vector shape_infer(const ExperimentalDetectronPriorGridGenerator* op, - const std::vector& input_shapes) { +template > +std::vector shape_infer(const ExperimentalDetectronPriorGridGenerator* op, + const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); const auto& priors_shape = input_shapes[0]; const auto& featmap_shape = input_shapes[1]; @@ -20,7 +21,7 @@ std::vector shape_infer(const ExperimentalDetectronPriorGridGenerator* o const auto is_flatten = op->get_attrs().flatten; const size_t output_size = is_flatten ? 2 : 4; - TShape output_shape; + TRShape output_shape; output_shape.resize(output_size); output_shape[output_size - 1] = 4; @@ -70,14 +71,6 @@ std::vector shape_infer(const ExperimentalDetectronPriorGridGenerator* o return {output_shape}; } - -template -void shape_infer(const ExperimentalDetectronPriorGridGenerator* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} - } // namespace v6 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/experimental_detectron_roi_feature_shape_inference.hpp b/src/core/shape_inference/include/experimental_detectron_roi_feature_shape_inference.hpp index 143070a5a8a..325da43c0e6 100644 --- a/src/core/shape_inference/include/experimental_detectron_roi_feature_shape_inference.hpp +++ b/src/core/shape_inference/include/experimental_detectron_roi_feature_shape_inference.hpp @@ -16,14 +16,14 @@ namespace v6 { // outputs: // 1. out_shape = [number_of_ROIs, number_of_channels, output_size, output_size] // 2. out_rois_shape = [number_of_ROIs, 4] -template -std::vector shape_infer(const ExperimentalDetectronROIFeatureExtractor* op, - const std::vector& input_shapes) { - using TDim = typename TShape::value_type; +template > +std::vector shape_infer(const ExperimentalDetectronROIFeatureExtractor* op, + const std::vector& input_shapes) { + using TDim = typename TRShape::value_type; using namespace ov::util; NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2); - auto output_shapes = std::vector(); + auto output_shapes = std::vector(); output_shapes.reserve(2); const auto& rois_shape = input_shapes[0]; diff --git a/src/core/shape_inference/include/experimental_detectron_topkrois_shape_inference.hpp b/src/core/shape_inference/include/experimental_detectron_topkrois_shape_inference.hpp index c4f0c571921..6eafd451050 100644 --- a/src/core/shape_inference/include/experimental_detectron_topkrois_shape_inference.hpp +++ b/src/core/shape_inference/include/experimental_detectron_topkrois_shape_inference.hpp @@ -2,14 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "openvino/op/experimental_detectron_topkrois.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v6 { -template -std::vector shape_infer(ExperimentalDetectronTopKROIs* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(ExperimentalDetectronTopKROIs* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); const auto& input_rois_shape = input_shapes[0]; @@ -45,13 +46,6 @@ std::vector shape_infer(ExperimentalDetectronTopKROIs* op, const std::ve return {{static_cast(op->get_max_rois()), 4}}; } - -template -void shape_infer(ExperimentalDetectronTopKROIs* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v6 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/extract_image_patches_shape_inference.hpp b/src/core/shape_inference/include/extract_image_patches_shape_inference.hpp index 6bc3c769e46..331dd06a975 100644 --- a/src/core/shape_inference/include/extract_image_patches_shape_inference.hpp +++ b/src/core/shape_inference/include/extract_image_patches_shape_inference.hpp @@ -4,6 +4,8 @@ #pragma once #include +#include "utils.hpp" + namespace ov { namespace op { namespace v3 { @@ -21,44 +23,45 @@ int32_t inline calc_shape_padding(const int32_t input, return out < 0 ? 0 : out; } -template -void shape_infer(const ExtractImagePatches* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); - using DimType = typename std::iterator_traits::value_type; +template > +std::vector shape_infer(const ExtractImagePatches* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); + using DimType = typename T::value_type; const auto& input_shape = input_shapes[0]; + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; output_shape.resize(4); NODE_VALIDATION_CHECK(op, input_shape.rank().compatible(4), "input tensor must be 4D tensor."); NODE_VALIDATION_CHECK(op, - op->m_patch_sizes.size() == 2, + op->get_sizes().size() == 2, "Attribute sizes should be in [size_rows, size_cols] format."); NODE_VALIDATION_CHECK(op, - op->m_patch_movement_strides.size() == 2, + op->get_strides().size() == 2, "Attribute strides should be in [stride_rows, stride_cols] format."); NODE_VALIDATION_CHECK(op, - op->m_patch_movement_strides[0] > 0 && op->m_patch_movement_strides[1] > 0, + op->get_strides()[0] > 0 && op->get_strides()[1] > 0, "Attribute strides should be strictly greater than zeros in values."); NODE_VALIDATION_CHECK(op, - op->m_patch_selection_rates.size() == 2, + op->get_rates().size() == 2, "Attribute rates should be in [rate_rows, rate_cols] format."); NODE_VALIDATION_CHECK(op, - op->m_patch_selection_rates[0] > 0 && op->m_patch_selection_rates[1] > 0, + op->get_rates()[0] > 0 && op->get_rates()[1] > 0, "Attribute rates should be strictly greater than zeros in values."); - NODE_VALIDATION_CHECK( - op, - op->m_padding == PadType::VALID || op->m_padding == PadType::SAME_LOWER || op->m_padding == PadType::SAME_UPPER, - "Attribute padding should be in either valid or same_lower or same_upper."); + NODE_VALIDATION_CHECK(op, + op->get_auto_pad() == PadType::VALID || op->get_auto_pad() == PadType::SAME_LOWER || + op->get_auto_pad() == PadType::SAME_UPPER, + "Attribute padding should be in either valid or same_lower or same_upper."); if (input_shape.rank().is_static()) { // Determine batch & output_depth output_shape[0] = input_shape[0]; - output_shape[1] = input_shape[1] * op->m_patch_sizes[0] * op->m_patch_sizes[1]; + output_shape[1] = input_shape[1] * op->get_sizes()[0] * op->get_sizes()[1]; // Determine spatial shape if (input_shape[2].is_static() && input_shape[3].is_static()) { int32_t input_rows = static_cast(input_shape[2].get_length()); @@ -67,19 +70,19 @@ void shape_infer(const ExtractImagePatches* op, const std::vector& input_shap int32_t out_cols(0); if (input_rows == 0 || input_cols == 0) { output_shape = input_shape; - return; + return output_shapes; } out_rows = calc_shape_padding(input_rows, - static_cast(op->m_patch_selection_rates[0]), - static_cast(op->m_patch_movement_strides[0]), - static_cast(op->m_patch_sizes[0]), - op->m_padding); + static_cast(op->get_rates()[0]), + static_cast(op->get_strides()[0]), + static_cast(op->get_sizes()[0]), + op->get_auto_pad()); out_cols = calc_shape_padding(input_cols, - static_cast(op->m_patch_selection_rates[1]), - static_cast(op->m_patch_movement_strides[1]), - static_cast(op->m_patch_sizes[1]), - op->m_padding); + static_cast(op->get_rates()[1]), + static_cast(op->get_strides()[1]), + static_cast(op->get_sizes()[1]), + op->get_auto_pad()); auto out_rows_cast = static_cast(out_rows); auto out_cols_cast = static_cast(out_cols); @@ -88,6 +91,7 @@ void shape_infer(const ExtractImagePatches* op, const std::vector& input_shap output_shape[3] = out_cols_cast; } } + return output_shapes; } } // namespace v3 } // namespace op diff --git a/src/core/shape_inference/include/eye_shape_inference.hpp b/src/core/shape_inference/include/eye_shape_inference.hpp index 8d764fd8234..2d6989b40a1 100644 --- a/src/core/shape_inference/include/eye_shape_inference.hpp +++ b/src/core/shape_inference/include/eye_shape_inference.hpp @@ -3,9 +3,9 @@ // #pragma once #include -#include -#include +#include "openvino/core/validation_util.hpp" +#include "openvino/op/eye.hpp" #include "utils.hpp" namespace ov { @@ -19,7 +19,8 @@ void check_1D_or_scalar_shape(const ov::op::v9::Eye* op, const T& input_shape, c NODE_VALIDATION_CHECK(op, num_rows_rank <= 1, name, " value must be a scalar or 1D tensor."); if (num_rows_rank == 1) { - NODE_VALIDATION_CHECK(op, input_shape.compatible(T{1}), name, " value input should have 1 element."); + using TRShape = result_shape_t; + NODE_VALIDATION_CHECK(op, input_shape.compatible(TRShape{1}), name, " value input should have 1 element."); } } } @@ -38,16 +39,16 @@ namespace v9 { * * \param op Pointer to Eye operator. * \param input_shapes Input shapes of Eye. - * \param constant_data Map of constant data. Default empty. - * \return * template + * \param ta Tensor accessor to constant data. + * \return Vector with output shapes. */ -template -std::vector shape_infer(const Eye* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const Eye* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { const auto& inputs_count = input_shapes.size(); NODE_VALIDATION_CHECK(op, (inputs_count == 3 || inputs_count == 4)); - TShape output_shape; + TRShape output_shape; for (size_t i = 0; i < 3; ++i) { util::check_1D_or_scalar_shape(op, input_shapes[i], eye::shape_names[i]); @@ -57,10 +58,11 @@ std::vector shape_infer(const Eye* op, const auto& batch_shape = input_shapes[3]; NODE_VALIDATION_CHECK(op, batch_shape.rank().compatible(1), eye::shape_names[3], " input must be a 1D tensor."); if (batch_shape.is_static()) { - if (get_data_as_shape(3, op, output_shape, constant_data)) { + if (auto batch_as_shape = get_input_const_data_as_shape(op, 3, ta)) { NODE_VALIDATION_CHECK(op, static_cast(batch_shape[0].get_length()) == - static_cast(output_shape.rank().get_length())); + static_cast(batch_as_shape->rank().get_length())); + output_shape = std::move(*batch_as_shape); } else { output_shape = PartialShape::dynamic(batch_shape[0].get_length()); } @@ -73,7 +75,7 @@ std::vector shape_infer(const Eye* op, constexpr auto get_non_negatives = ov::util::InTypeRange(0, std::numeric_limits::max()); for (size_t i = 0; i < 2; ++i) { - if (auto eye_dim = get_input_const_data_as_shape(op, i, constant_data, get_non_negatives)) { + if (auto eye_dim = get_input_const_data_as_shape(op, i, ta, get_non_negatives)) { NODE_VALIDATION_CHECK(op, eye_dim->size() == 1, eye::shape_names[i], @@ -87,14 +89,6 @@ std::vector shape_infer(const Eye* op, return {output_shape}; } - -template -void shape_infer(const Eye* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = shape_infer(op, input_shapes, constant_data); -} } // namespace v9 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/fake_quantize.hpp b/src/core/shape_inference/include/fake_quantize.hpp index 0a3a32c1ec7..62ec9141818 100644 --- a/src/core/shape_inference/include/fake_quantize.hpp +++ b/src/core/shape_inference/include/fake_quantize.hpp @@ -7,22 +7,25 @@ #include "utils.hpp" -template -void shape_infer(const ov::op::v0::FakeQuantize* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 5 && output_shapes.size() == 1); +namespace ov { +namespace op { +namespace v0 { +template > +std::vector shape_infer(const FakeQuantize* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 5); - T data_pshape = input_shapes[0]; + TRShape data_pshape = input_shapes[0]; ov::op::AutoBroadcastSpec auto_broadcast = op->get_auto_broadcast(); for (size_t i = 1; i <= 4; ++i) { if (auto_broadcast.m_type == ov::op::AutoBroadcastType::NONE) { - NODE_VALIDATION_CHECK(op, T::merge_into(data_pshape, input_shapes[i]), "Argument shapes are inconsistent."); + NODE_VALIDATION_CHECK(op, + TRShape::merge_into(data_pshape, input_shapes[i]), + "Argument shapes are inconsistent."); } else if (auto_broadcast.m_type == ov::op::AutoBroadcastType::NUMPY || auto_broadcast.m_type == ov::op::AutoBroadcastType::PDPD) { NODE_VALIDATION_CHECK(op, - T::broadcast_merge_into(data_pshape, input_shapes[i], auto_broadcast), + TRShape::broadcast_merge_into(data_pshape, input_shapes[i], auto_broadcast), "Argument shapes are inconsistent."); } else { NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification"); @@ -35,5 +38,8 @@ void shape_infer(const ov::op::v0::FakeQuantize* op, // input[1].shape = [1, 3, 4, 5] // This controversial behavior is kept here due to backward-compatibility and the fact that // frameworks do not allow such behavior too -- so the chance to have such FQ configuration is minimal - first_input_passthrough_infer(op, input_shapes, output_shapes); + return {data_pshape}; } +} // namespace v0 +} // namespace op +} // namespace ov diff --git a/src/core/shape_inference/include/fft_base_shape_inference.hpp b/src/core/shape_inference/include/fft_base_shape_inference.hpp index fbe72f54da2..9a79db5cb81 100644 --- a/src/core/shape_inference/include/fft_base_shape_inference.hpp +++ b/src/core/shape_inference/include/fft_base_shape_inference.hpp @@ -9,20 +9,19 @@ namespace ov { namespace op { -namespace util { -template -void shape_infer(const ov::op::util::FFTBase* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - using DimType = typename std::iterator_traits::value_type; - NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3) && output_shapes.size() == 1); +template > +std::vector shape_infer(const util::FFTBase* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + using DimType = typename T::value_type; + + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3)); const auto& input_shape = input_shapes[0]; const auto& axes_shape = input_shapes[1]; + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; - std::vector axes; - bool axes_are_known = get_data_as_int64(1, op, axes, constant_data); + auto axes = get_input_const_data_as(op, 1, ta); if (input_shape.rank().is_static()) { const auto input_rank = input_shape.size(); @@ -52,11 +51,11 @@ void shape_infer(const ov::op::util::FFTBase* op, // 'r - 1 + a'. The reason is the following: real input tensor of the shape // [n_0, ..., n_{r - 1}, 2] is interpreted as a complex tensor with the shape // [n_0, ..., n_{r - 1}]. - if (axes_shape.rank().is_static() && axes_are_known) { + if (axes_shape.rank().is_static() && axes) { const auto axis_min_value = -static_cast(input_rank); const auto axis_max_value = static_cast(input_rank) - 1; ov::AxisSet axes_set; - for (int64_t& axis : axes) { + for (int64_t& axis : *axes) { NODE_VALIDATION_CHECK(op, axis_min_value < axis && axis < axis_max_value, "FFT op axis ", @@ -72,7 +71,7 @@ void shape_infer(const ov::op::util::FFTBase* op, axes_set.insert(static_cast(axis)); } - NODE_VALIDATION_CHECK(op, axes.size() == axes_set.size(), "FFT op axes must be unique."); + NODE_VALIDATION_CHECK(op, axes->size() == axes_set.size(), "FFT op axes must be unique."); } } @@ -97,31 +96,30 @@ void shape_infer(const ov::op::util::FFTBase* op, } output_shape = input_shape; - if (input_shape.rank().is_static() && axes_shape.rank().is_static() && input_shapes.size() == 3 && axes_are_known) { + if (input_shape.rank().is_static() && axes_shape.rank().is_static() && input_shapes.size() == 3 && axes) { const auto& signal_size_shape = input_shapes[2]; - std::vector signal_size; - bool status_signal_size = get_data_as_int64(2, op, signal_size, constant_data); + auto signal_size = get_input_const_data_as(op, 2, ta); - if (signal_size_shape.rank().is_static() && status_signal_size) { - size_t num_of_axes = axes.size(); + if (signal_size_shape.rank().is_static() && signal_size) { + size_t num_of_axes = axes->size(); for (size_t i = 0; i < num_of_axes; ++i) { - if (signal_size[i] == -1) { + if ((*signal_size)[i] == -1) { continue; } - output_shape[axes[i]] = DimType(signal_size[i]); + output_shape[(*axes)[i]] = DimType((*signal_size)[i]); } } else if (signal_size_shape.rank().is_static()) { - for (int64_t& axis : axes) { + for (int64_t& axis : *axes) { output_shape[axis] = ov::Dimension::dynamic(); } } - } else if (input_shape.rank().is_static() && (axes_shape.rank().is_dynamic() || !axes_are_known)) { + } else if (input_shape.rank().is_static() && (axes_shape.rank().is_dynamic() || !axes)) { const auto input_rank = input_shape.size(); for (size_t i = 0; i < input_rank - 1; ++i) { output_shape[i] = ov::Dimension::dynamic(); } } + return output_shapes; } -} // namespace util } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/gather_elements_shape_inference.hpp b/src/core/shape_inference/include/gather_elements_shape_inference.hpp index 8d3fedd0f31..2447ca40eea 100644 --- a/src/core/shape_inference/include/gather_elements_shape_inference.hpp +++ b/src/core/shape_inference/include/gather_elements_shape_inference.hpp @@ -9,15 +9,16 @@ namespace ov { namespace op { namespace v6 { -template -void shape_infer(const GatherElements* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1); - using DimType = typename std::iterator_traits::value_type; +template > +std::vector shape_infer(const GatherElements* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); + using DimType = typename T::value_type; const auto& data_pshape = input_shapes[0]; const auto& indices_pshape = input_shapes[1]; auto data_rank = data_pshape.rank(); auto indices_rank = indices_pshape.rank(); + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; int64_t axis = op->get_axis(); @@ -36,16 +37,16 @@ void shape_infer(const GatherElements* op, const std::vector& input_shapes, s // output has the same rank of data output_shape = data_pshape; output_shape[axis] = DimType(); - return; + return output_shapes; } if (data_rank.is_dynamic()) { if (indices_rank.is_dynamic()) { output_shape = PartialShape::dynamic(); - return; + return output_shapes; } output_shape = indices_pshape; - return; + return output_shapes; } // left only case when data_rank.is_static() && indices_rank.is_static() @@ -70,6 +71,7 @@ void shape_infer(const GatherElements* op, const std::vector& input_shapes, s " are not consistent, `data` and `indices` must have equal or " "intersecting dimensions, except for the dimension at axis index.", axis); + return output_shapes; } } // namespace v6 } // namespace op diff --git a/src/core/shape_inference/include/gather_nd_shape_inference.hpp b/src/core/shape_inference/include/gather_nd_shape_inference.hpp index 210e6706628..d4302521c27 100644 --- a/src/core/shape_inference/include/gather_nd_shape_inference.hpp +++ b/src/core/shape_inference/include/gather_nd_shape_inference.hpp @@ -9,8 +9,8 @@ namespace ov { namespace op { namespace gather_nd { -template -std::vector gather_nd_base_shape_infer(const TOp* op, const std::vector& input_shapes) { +template > +std::vector gather_nd_base_shape_infer(const TOp* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); const auto& data_pshape = input_shapes[0]; @@ -59,17 +59,17 @@ std::vector gather_nd_base_shape_infer(const TOp* op, const std::vector< for (auto dim_idx = batch_dims + indices_tuple_length; dim_idx < data_pshape.size(); ++dim_idx) { output_dims.emplace_back(data_pshape[dim_idx]); } - return {TShape(std::move(output_dims))}; + return {TRShape(std::move(output_dims))}; } else { return {ov::PartialShape::dynamic()}; } } } // namespace gather_nd namespace v5 { -template -void shape_infer(const GatherND* op, const std::vector& input_shapes, std::vector& output_shapes) { +template > +std::vector shape_infer(const GatherND* op, const std::vector& input_shapes) { using DimType = typename TShape::value_type; - output_shapes = gather_nd::gather_nd_base_shape_infer(op, input_shapes); + auto output_shapes = gather_nd::gather_nd_base_shape_infer(op, input_shapes); // If batch_dims > 1, batch dimensions are need to be fused auto batch_dims = op->get_batch_dims(); @@ -82,15 +82,16 @@ void shape_infer(const GatherND* op, const std::vector& input_shapes, st output_dims[0] *= dim; }); output_dims.insert(output_dims.begin() + 1, output_base_shape.begin() + batch_dims, output_base_shape.end()); - output_shapes[0] = TShape(std::move(output_dims)); + output_shapes[0] = TRShape(std::move(output_dims)); } + return output_shapes; } } // namespace v5 namespace v8 { -template -void shape_infer(const GatherND* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = gather_nd::gather_nd_base_shape_infer(op, input_shapes); +template > +std::vector shape_infer(const GatherND* op, const std::vector& input_shapes) { + return gather_nd::gather_nd_base_shape_infer(op, input_shapes); } } // namespace v8 } // namespace op diff --git a/src/core/shape_inference/include/gather_shape_inference.hpp b/src/core/shape_inference/include/gather_shape_inference.hpp index b229ef5b882..f7440798d1c 100644 --- a/src/core/shape_inference/include/gather_shape_inference.hpp +++ b/src/core/shape_inference/include/gather_shape_inference.hpp @@ -8,19 +8,18 @@ namespace ov { namespace op { -namespace util { -template -void shape_infer(const GatherBase* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 && output_shapes.size() == 1); +template > +std::vector shape_infer(const util::GatherBase* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); const auto& data_pshape = input_shapes[0]; const auto& indices_pshape = input_shapes[1]; const auto& axis_pshape = input_shapes[2]; auto data_rank = data_pshape.rank(); auto indices_rank = indices_pshape.rank(); auto axis_rank = axis_pshape.rank(); + auto output_shapes = std::vector(1); auto& output_pshape = output_shapes[0]; if (axis_rank.is_static() && axis_pshape.is_static()) { @@ -37,12 +36,11 @@ void shape_infer(const GatherBase* op, batch_dims += indices_rank.get_length(); } - std::vector axes_val; - bool axis_is_set = get_data_as_int64(2, op, axes_val, constant_data); - int64_t axis = 0; - - if (axis_is_set) { - axis = axes_val[0]; + bool axis_is_set; + int64_t axis; + if (const auto axes_val = get_input_const_data_as(op, 2, tensor_accessor)) { + axis = (*axes_val)[0]; + axis_is_set = true; if (data_rank.is_static()) { OPENVINO_SUPPRESS_DEPRECATED_START @@ -58,6 +56,9 @@ void shape_infer(const GatherBase* op, batch_dims, ", axis = ", axis); + } else { + axis_is_set = false; + axis = 0; } if (indices_rank.is_static() && batch_dims >= 0) { @@ -108,7 +109,7 @@ void shape_infer(const GatherBase* op, out_rank = out_rank - indices_rank.get_max_length(); output_pshape = PartialShape::dynamic(out_rank); } + return output_shapes; } -} // namespace util } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/gather_tree_shape_inference.hpp b/src/core/shape_inference/include/gather_tree_shape_inference.hpp index 4b1d5756d20..2a28a5453d2 100644 --- a/src/core/shape_inference/include/gather_tree_shape_inference.hpp +++ b/src/core/shape_inference/include/gather_tree_shape_inference.hpp @@ -2,23 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 // #pragma once -#include + +#include "openvino/op/gather_tree.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v1 { -template -std::vector shape_infer(const GatherTree* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const GatherTree* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); - using DimType = typename std::iterator_traits::value_type; + using DimType = typename TShape::value_type; + const auto& step_ids_shape = input_shapes[0]; const auto& parent_idx_shape = input_shapes[1]; const auto& max_seq_len_pshape = input_shapes[2]; const auto& end_token_pshape = input_shapes[3]; - auto result_shape = step_ids_shape; + TRShape result_shape = step_ids_shape; NODE_VALIDATION_CHECK(op, - TShape::merge_into(result_shape, parent_idx_shape) && result_shape.rank().compatible(3), + TRShape::merge_into(result_shape, parent_idx_shape) && result_shape.rank().compatible(3), "step_ids and parent_idx inputs must have the same shape with rank 3. Got: ", step_ids_shape, " and ", @@ -47,11 +50,6 @@ std::vector shape_infer(const GatherTree* op, const std::vector& end_token_pshape); return {result_shape}; } - -template -void shape_infer(const GatherTree* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v1 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/generate_proposals_shape_inference.hpp b/src/core/shape_inference/include/generate_proposals_shape_inference.hpp index d159f4ef198..e5f5a7bff56 100644 --- a/src/core/shape_inference/include/generate_proposals_shape_inference.hpp +++ b/src/core/shape_inference/include/generate_proposals_shape_inference.hpp @@ -6,13 +6,15 @@ #include +#include "utils.hpp" + namespace ov { namespace op { namespace v9 { -template -void shape_infer(const GenerateProposals* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 && output_shapes.size() == 3); +template > +std::vector shape_infer(const GenerateProposals* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); const auto& im_info_shape = input_shapes[0]; const auto& anchors_shape = input_shapes[1]; @@ -123,9 +125,7 @@ void shape_infer(const GenerateProposals* op, const std::vector& input_shapes } auto num_rois = Dimension(0, (num_batches * op->get_attrs().post_nms_count).get_max_length()); - output_shapes[0] = ov::PartialShape({num_rois, 4}); - output_shapes[1] = ov::PartialShape({num_rois}); - output_shapes[2] = ov::PartialShape({num_batches}); + return {TRShape{num_rois, 4}, TRShape{num_rois}, TRShape{num_batches}}; } } // namespace v9 diff --git a/src/core/shape_inference/include/grid_sample_shape_inference.hpp b/src/core/shape_inference/include/grid_sample_shape_inference.hpp index 58f15830dd5..d799fd2f867 100644 --- a/src/core/shape_inference/include/grid_sample_shape_inference.hpp +++ b/src/core/shape_inference/include/grid_sample_shape_inference.hpp @@ -8,21 +8,24 @@ #include #include +#include "utils.hpp" + namespace ov { namespace op { namespace v9 { -template -void shape_infer(const GridSample* op, const std::vector& input_shapes, std::vector& output_shapes) { +template > +std::vector shape_infer(const GridSample* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, - input_shapes.size() == 2 && output_shapes.size() == 1, - "Incorrect number of input/output shapes in GridSample's shape inference function"); + input_shapes.size() == 2, + "Incorrect number of input shapes in GridSample's shape inference function"); const auto& data_shape = input_shapes[0]; NODE_VALIDATION_CHECK(op, data_shape.rank().compatible(4), "The supported shape of the input data tensor is 4D."); const auto& grid_shape = input_shapes[1]; NODE_VALIDATION_CHECK(op, grid_shape.rank().compatible(4), "The supported shape of the grid tensor is 4D."); - shape_t output_shape; + auto output_shapes = std::vector(1); + auto& output_shape = output_shapes.front(); output_shape.resize(4); auto& batch_dim = output_shape[0]; @@ -41,7 +44,7 @@ void shape_infer(const GridSample* op, const std::vector& input_shapes, if (data_shape.rank().is_static()) { NODE_VALIDATION_CHECK( op, - shape_t::value_type::merge(batch_dim, grid_shape[0], data_shape[0]), + TShape::value_type::merge(batch_dim, grid_shape[0], data_shape[0]), "The batch dimension in the input data tensor's shape doesn't match the batch dimension in " "the grid tensor's shape."); channel_dim = data_shape[1]; @@ -50,8 +53,7 @@ void shape_infer(const GridSample* op, const std::vector& input_shapes, batch_dim = data_shape[0]; channel_dim = data_shape[1]; } - - output_shapes[0] = std::move(output_shape); + return output_shapes; } } // namespace v9 diff --git a/src/core/shape_inference/include/group_convolution_backprop_shape_inference.hpp b/src/core/shape_inference/include/group_convolution_backprop_shape_inference.hpp index d0c4844c0cf..fc69b3af245 100644 --- a/src/core/shape_inference/include/group_convolution_backprop_shape_inference.hpp +++ b/src/core/shape_inference/include/group_convolution_backprop_shape_inference.hpp @@ -22,36 +22,38 @@ constexpr size_t filter_non_spatial_dims_count } // namespace convolution namespace v1 { -template -std::vector shape_infer(const GroupConvolutionBackpropData* op, - const std::vector& input_shapes, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const GroupConvolutionBackpropData* op, + const std::vector& input_shapes, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end, + const ITensorAccessor& ta = make_tensor_accessor()) { const auto inputs_count = input_shapes.size(); const auto has_spatial_shape = inputs_count >= 3; NODE_VALIDATION_CHECK(op, inputs_count >= 2); using namespace ov::util; - TShape out_spatial_shape; + ov::optional out_spatial_shape; if (has_spatial_shape) { const auto& spatial_shape = input_shapes[2]; NODE_VALIDATION_CHECK(op, spatial_shape.rank().compatible(1), "Input delivering output shape must have rank 1."); - - if (!get_data_as_shape(2, op, out_spatial_shape, constant_data)) { + out_spatial_shape = get_input_const_data_as_shape(op, 2, ta); + if (!out_spatial_shape) { if (spatial_shape.is_static()) { - out_spatial_shape.resize(spatial_shape[0].get_length()); + out_spatial_shape.emplace(); + out_spatial_shape->resize(spatial_shape[0].get_length()); } else { out_spatial_shape = PartialShape::dynamic(); } } + } else { + out_spatial_shape.emplace(); } + const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, *out_spatial_shape); - const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes, out_spatial_shape); - - TShape output_shape; + TRShape output_shape; if (num_spatial != util::num_spatial_undefined) { const auto& data_shape = input_shapes[0]; const auto& filters_shape = input_shapes[1]; @@ -60,9 +62,8 @@ std::vector shape_infer(const GroupConvolutionBackpropData* op, NODE_VALIDATION_CHECK( op, - !has_spatial_shape || out_spatial_shape.rank().is_dynamic() || out_spatial_shape.size() == num_spatial, + !has_spatial_shape || out_spatial_shape->rank().is_dynamic() || out_spatial_shape->size() == num_spatial, "Output shape should be defined for all and only spatial dimensions."); - convolution::resize_empty_padding(num_spatial, pads_begin, pads_end); if (is_attr_validation_required(op)) { convolution::validate::data_shape(op, data_shape); @@ -77,8 +78,7 @@ std::vector shape_infer(const GroupConvolutionBackpropData* op, convolution::validate::common_attributes(op, num_spatial, pads_begin, pads_end); } - convolution::apply_padding(op, input_shapes, out_spatial_shape, pads_begin, pads_end); - + convolution::apply_padding(op, input_shapes, *out_spatial_shape, pads_begin, pads_end); output_shape.reserve(util::spatial_dim_offset + num_spatial); output_shape.emplace_back(data_rank.is_static() ? data_shape[0] : dim::inf_bound); @@ -102,8 +102,8 @@ std::vector shape_infer(const GroupConvolutionBackpropData* op, // add spatial dimensions if (has_spatial_shape) { output_shape.insert(output_shape.end(), - std::make_move_iterator(out_spatial_shape.begin()), - std::make_move_iterator(out_spatial_shape.end())); + std::make_move_iterator(out_spatial_shape->begin()), + std::make_move_iterator(out_spatial_shape->end())); } else { convolution::append_spatial_shape(op, data_shape, filters_shape, pads_begin, pads_end, output_shape); } diff --git a/src/core/shape_inference/include/group_convolution_shape_inference.hpp b/src/core/shape_inference/include/group_convolution_shape_inference.hpp index f8f5bfe690d..5a0171306a3 100644 --- a/src/core/shape_inference/include/group_convolution_shape_inference.hpp +++ b/src/core/shape_inference/include/group_convolution_shape_inference.hpp @@ -22,18 +22,17 @@ constexpr size_t filter_non_spatial_dims_count() { } // namespace convolution namespace v1 { -template -std::vector shape_infer(const GroupConvolution* op, - const std::vector& input_shapes, - CoordinateDiff& pads_begin, - CoordinateDiff& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const GroupConvolution* op, + const std::vector& input_shapes, + CoordinateDiff& pads_begin, + CoordinateDiff& pads_end) { NODE_VALIDATION_CHECK(op, input_shapes.size() >= 2); using namespace ov::util; const auto num_spatial = convolution::calculate_num_spatial(op, input_shapes); - TShape output_shape; + TRShape output_shape; if (num_spatial != convolution::num_spatial_undefined) { const auto& data_shape = input_shapes[0]; const auto& filters_shape = input_shapes[1]; diff --git a/src/core/shape_inference/include/group_normalization_shape_inference.hpp b/src/core/shape_inference/include/group_normalization_shape_inference.hpp index 0b36f306d2d..a08d0762e70 100644 --- a/src/core/shape_inference/include/group_normalization_shape_inference.hpp +++ b/src/core/shape_inference/include/group_normalization_shape_inference.hpp @@ -50,13 +50,6 @@ std::vector shape_infer(const GroupNormalization* op, const std::vector< return {input_shapes[0]}; } - -template -void shape_infer(const GroupNormalization* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v12 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/gru_cell_shape_inference.hpp b/src/core/shape_inference/include/gru_cell_shape_inference.hpp index 277801b1da8..c8d64f2d710 100644 --- a/src/core/shape_inference/include/gru_cell_shape_inference.hpp +++ b/src/core/shape_inference/include/gru_cell_shape_inference.hpp @@ -12,17 +12,12 @@ namespace ov { namespace op { namespace v3 { -template -std::vector shape_infer(const GRUCell* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const GRUCell* op, const std::vector& input_shapes) { constexpr auto num_gates = 3; constexpr auto num_state_nodes = 1; return rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_linear_before_reset()); } - -template -void shape_infer(const GRUCell* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v3 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/gru_sequence_shape_inference.hpp b/src/core/shape_inference/include/gru_sequence_shape_inference.hpp index e7b5a8065fb..e439fbc211f 100644 --- a/src/core/shape_inference/include/gru_sequence_shape_inference.hpp +++ b/src/core/shape_inference/include/gru_sequence_shape_inference.hpp @@ -11,10 +11,8 @@ namespace ov { namespace op { namespace v5 { -template -void shape_infer(const ov::op::v5::GRUSequence* op, - const std::vector& input_shapes, - std::vector& output_shapes) { +template > +std::vector shape_infer(const ov::op::v5::GRUSequence* op, const std::vector& input_shapes) { constexpr size_t expected_in_shapes_count = 6; NODE_VALIDATION_CHECK(op, input_shapes.size() == expected_in_shapes_count, @@ -26,12 +24,12 @@ void shape_infer(const ov::op::v5::GRUSequence* op, constexpr auto num_gates = 3; constexpr auto num_state_nodes = 1; - output_shapes = rnn::seq_base_shape_infer(op, - input_shapes, - num_gates, - num_state_nodes, - op->get_direction(), - op->get_linear_before_reset()); + return rnn::seq_base_shape_infer(op, + input_shapes, + num_gates, + num_state_nodes, + op->get_direction(), + op->get_linear_before_reset()); } } // namespace v5 } // namespace op diff --git a/src/core/shape_inference/include/interpolate_shape_inference.hpp b/src/core/shape_inference/include/interpolate_shape_inference.hpp index bbf76fa4e5c..49b81b2250a 100644 --- a/src/core/shape_inference/include/interpolate_shape_inference.hpp +++ b/src/core/shape_inference/include/interpolate_shape_inference.hpp @@ -130,10 +130,10 @@ void resize_padding(const ov::op::util::InterpolateBase* op, * @param pads_end Dimensions end padding values. * @return TShape Shape with dimensions of input plus paddings. */ -template -TShape make_padded_shape(const TShape& input, TInputIter pads_begin, TInputIter pads_end) { +template > +TRShape make_padded_shape(const TShape& input, TInputIter pads_begin, TInputIter pads_end) { using TDim = typename TShape::value_type; - TShape out; + TRShape out; out.reserve(input.size()); std::transform(input.cbegin(), input.cend(), std::back_inserter(out), [&pads_begin, &pads_end](const TDim& d) { return ov::util::dim::padded(d, (*pads_begin++ + *pads_end++)); @@ -240,14 +240,14 @@ void update_dims_with_scales_on_axes(TShape& out_shape, } // namespace interpolate namespace v0 { -template -std::vector shape_infer(const Interpolate* op, - const std::vector& input_shapes, - const ITensorAccessor& tensor_accessor) { +template > +std::vector shape_infer(const Interpolate* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); const auto& img_shape = input_shapes[0]; - auto output_shapes = std::vector(1, img_shape); + auto output_shapes = std::vector(1, img_shape); auto& out_shape = output_shapes.front(); if (img_shape.rank().is_static()) { @@ -256,7 +256,7 @@ std::vector shape_infer(const Interpolate* op, interpolate::validate::axes_values(op, axes, img_rank); - if (const auto target_spatial_shape = get_input_const_data_as_shape(op, 1, tensor_accessor)) { + if (const auto target_spatial_shape = get_input_const_data_as_shape(op, 1, tensor_accessor)) { auto target_spatial_shape_iter = target_spatial_shape->begin(); for (const auto axis : axes) { out_shape[axis] = *target_spatial_shape_iter++; @@ -271,12 +271,12 @@ std::vector shape_infer(const Interpolate* op, } // namespace v0 namespace v4 { -template -std::vector shape_infer(const Interpolate* op, - const std::vector& input_shapes, - TContainer& pads_begin, - TContainer& pads_end, - const ITensorAccessor& tensor_accessor) { +template > +std::vector shape_infer(const Interpolate* op, + const std::vector& input_shapes, + TContainer& pads_begin, + TContainer& pads_end, + const ITensorAccessor& tensor_accessor) { const auto has_axes_input = (input_shapes.size() == 4); NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || has_axes_input)); @@ -289,13 +289,13 @@ std::vector shape_infer(const Interpolate* op, } const auto& img_shape = input_shapes[0]; - auto output_shapes = std::vector(); + auto output_shapes = std::vector(); if (img_shape.rank().is_static()) { const auto img_rank = img_shape.size(); interpolate::resize_padding(op, img_rank, pads_begin, pads_end); - const auto axes = interpolate::get_axes(op, 3, has_axes_input, img_rank, tensor_accessor); + const auto axes = interpolate::get_axes(op, 3, has_axes_input, img_rank, tensor_accessor); if (axes) { output_shapes.push_back(interpolate::make_padded_shape(img_shape, pads_begin.cbegin(), pads_end.cbegin())); @@ -315,18 +315,18 @@ std::vector shape_infer(const Interpolate* op, } // namespace v4 namespace v11 { -template -std::vector shape_infer(const Interpolate* op, - const std::vector& input_shapes, - TContainer& pads_begin, - TContainer& pads_end, - const ITensorAccessor& tensor_accessor) { +template > +std::vector shape_infer(const Interpolate* op, + const std::vector& input_shapes, + TContainer& pads_begin, + TContainer& pads_end, + const ITensorAccessor& tensor_accessor) { NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3)); interpolate::validate::are_inputs_except_first_1d(op, input_shapes); const auto& img_shape = input_shapes[0]; - auto output_shapes = std::vector(); + auto output_shapes = std::vector(); if (img_shape.rank().is_static()) { const auto img_rank = img_shape.size(); @@ -334,7 +334,7 @@ std::vector shape_infer(const Interpolate* op, interpolate::resize_padding(op, img_rank, pads_begin, pads_end); - const auto axes = interpolate::get_axes(op, 2, has_axes_input, img_rank, tensor_accessor); + const auto axes = interpolate::get_axes(op, 2, has_axes_input, img_rank, tensor_accessor); if (axes) { output_shapes.push_back(interpolate::make_padded_shape(img_shape, pads_begin.cbegin(), pads_end.cbegin())); diff --git a/src/core/shape_inference/include/irdft_shape_inference.hpp b/src/core/shape_inference/include/irdft_shape_inference.hpp index 78767b4a3d5..2aed5451a52 100644 --- a/src/core/shape_inference/include/irdft_shape_inference.hpp +++ b/src/core/shape_inference/include/irdft_shape_inference.hpp @@ -2,38 +2,39 @@ // SPDX-License-Identifier: Apache-2.0 // #pragma once -#include #include "openvino/core/axis_vector.hpp" +#include "openvino/op/irdft.hpp" #include "rfft_common_validation.hpp" #include "utils.hpp" namespace ov { namespace op { namespace v9 { -template -void shape_infer(const ov::op::v9::IRDFT* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - using DimType = typename std::iterator_traits::value_type; - NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3) && output_shapes.size() == 1); +template > +std::vector shape_infer(const IRDFT* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + using DimType = typename T::value_type; + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3)); const auto& input_shape = input_shapes[0]; const auto& axes_shape = input_shapes[1]; + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; - std::vector axes; - bool axes_are_known = get_data_as_int64(1, op, axes, constant_data); + + auto axes = get_input_const_data_as(op, 1, ta); + auto axes_are_known = static_cast(axes); util::rfft_common_validation::shape_validation(op, input_shapes, - axes, + *axes, axes_are_known, util::rfft_common_validation::RFFTKind::Inverse); if (input_shape.rank().is_dynamic()) { output_shape = ov::PartialShape::dynamic(); - return; + return output_shapes; } const auto input_rank = input_shape.size(); @@ -45,34 +46,34 @@ void shape_infer(const ov::op::v9::IRDFT* op, for (size_t i = 0; i < input_rank - 1; ++i) { output_shape[i] = ov::Dimension::dynamic(); } - return; + return output_shapes; } - const auto last_axis = axes.back(); + const auto last_axis = axes->back(); if (input_shapes.size() == 2) { output_shape[last_axis] = DimType(2) * (input_shape[last_axis] - DimType(1)); - return; + return output_shapes; } const auto& signal_size_shape = input_shapes[2]; - std::vector signal_size; - bool status_signal_size = get_data_as_int64(2, op, signal_size, constant_data); + auto signal_size = get_input_const_data_as(op, 2, ta); - if (signal_size_shape.rank().is_dynamic() || !status_signal_size) { + if (signal_size_shape.rank().is_dynamic() || !signal_size) { output_shape[last_axis] = ov::Dimension::dynamic(); - return; + return output_shapes; } - size_t num_of_axes = axes.size(); + size_t num_of_axes = axes->size(); for (size_t i = 0; i < num_of_axes; ++i) { - if (signal_size[i] != -1) { - output_shape[axes[i]] = DimType(signal_size[i]); + if ((*signal_size)[i] != -1) { + output_shape[(*axes)[i]] = DimType((*signal_size)[i]); } } - if (signal_size.back() == -1) { + if (signal_size->back() == -1) { output_shape[last_axis] = DimType(2) * (input_shape[last_axis] - DimType(1)); } + return output_shapes; } } // namespace v9 } // namespace op diff --git a/src/core/shape_inference/include/lstm_cell_shape_inference.hpp b/src/core/shape_inference/include/lstm_cell_shape_inference.hpp index 0100f2d6329..3ac1c6bfac5 100644 --- a/src/core/shape_inference/include/lstm_cell_shape_inference.hpp +++ b/src/core/shape_inference/include/lstm_cell_shape_inference.hpp @@ -11,11 +11,18 @@ namespace ov { namespace op { namespace v0 { +namespace lstm_cell { +constexpr size_t gates_count = 4; +constexpr size_t num_state_nodes = 2; +constexpr size_t peepholes_count = 3; +} // namespace lstm_cell + template -void shape_infer(const LSTMCell* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 7 && output_shapes.size() == 2); - constexpr auto num_state_nodes = 2; - output_shapes = rnn::cell_base_shape_infer(op, input_shapes, op->s_gates_count, num_state_nodes); +std::vector> shape_infer(const LSTMCell* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 7); + + auto output_shapes = + rnn::cell_base_shape_infer(op, input_shapes, lstm_cell::gates_count, lstm_cell::num_state_nodes); const auto& hidden_size = output_shapes[0][1]; if (hidden_size.is_dynamic()) { // set hidden_size based on attribute output_shapes[0][1] = op->get_hidden_size(); @@ -24,34 +31,33 @@ void shape_infer(const LSTMCell* op, const std::vector& input_shapes, std::ve const auto& p_pshape = input_shapes[6]; if (p_pshape[0].is_static() && hidden_size.is_static()) { NODE_VALIDATION_CHECK(op, - p_pshape[0].compatible(hidden_size * op->s_peepholes_count), - "Parameter hidden_size mistmatched in P input. Current value is: ", + p_pshape[0].compatible(hidden_size * 3), + "Parameter hidden_size mismatched in P input. Current value is: ", p_pshape[0].get_length(), ", expected: ", - hidden_size.get_length() * op->s_peepholes_count, + hidden_size.get_length() * 3, "."); } + return output_shapes; } } // namespace v0 namespace v4 { +namespace lstm_cell { +constexpr size_t gates_count = 4; +} + template -std::vector shape_infer(const LSTMCell* op, const std::vector& input_shapes) { +std::vector> shape_infer(const LSTMCell* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 6); - constexpr auto num_gates = 4; constexpr auto num_state_nodes = 2; - auto output_shapes = rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes); + auto output_shapes = rnn::cell_base_shape_infer(op, input_shapes, lstm_cell::gates_count, num_state_nodes); if (output_shapes[0][1].is_dynamic()) { // set hidden_size based on attribute output_shapes[0][1] = op->get_hidden_size(); output_shapes[1][1] = op->get_hidden_size(); } return output_shapes; } - -template -void shape_infer(const LSTMCell* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v4 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/lstm_sequence_shape_inference.hpp b/src/core/shape_inference/include/lstm_sequence_shape_inference.hpp index 375b7b3bf22..a34c7a52447 100644 --- a/src/core/shape_inference/include/lstm_sequence_shape_inference.hpp +++ b/src/core/shape_inference/include/lstm_sequence_shape_inference.hpp @@ -9,7 +9,7 @@ namespace ov { namespace op { namespace v0 { template -std::vector shape_infer(const LSTMSequence* op, const std::vector& input_shapes) { +std::vector> shape_infer(const LSTMSequence* op, const std::vector& input_shapes) { constexpr auto num_gates = 4; constexpr auto num_state_nodes = 2; const auto output_shapes = @@ -33,7 +33,7 @@ std::vector shape_infer(const LSTMSequence* op, const std::vector -std::vector shape_infer(const LSTMSequence* op, const std::vector& input_shapes) { +std::vector> shape_infer(const LSTMSequence* op, const std::vector& input_shapes) { constexpr auto num_gates = 4; constexpr auto num_state_nodes = 2; return rnn::seq_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_direction()); diff --git a/src/core/shape_inference/include/matmul_shape_inference.hpp b/src/core/shape_inference/include/matmul_shape_inference.hpp index 55700c6fbdb..425dd5839ac 100644 --- a/src/core/shape_inference/include/matmul_shape_inference.hpp +++ b/src/core/shape_inference/include/matmul_shape_inference.hpp @@ -10,15 +10,16 @@ namespace ov { namespace op { namespace v0 { -template -void shape_infer(const ov::op::v0::MatMul* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1); +template > +std::vector shape_infer(const MatMul* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); auto arg0_shape = input_shapes[0], arg1_shape = input_shapes[1]; if (arg0_shape.rank().is_dynamic() || arg1_shape.rank().is_dynamic()) { - output_shapes[0] = ov::PartialShape::dynamic(); - return; + return {ov::PartialShape::dynamic()}; } + + auto output_shapes = std::vector(); // ranks are known const bool transpose_a = op->get_transpose_a(); const bool transpose_b = op->get_transpose_b(); @@ -27,7 +28,7 @@ void shape_infer(const ov::op::v0::MatMul* op, const std::vector& input_shape NODE_VALIDATION_CHECK(op, (arg0_rank != 0 && arg1_rank != 0), "Scalars are not supported as MatMul inputs."); // Temporary Dimension vectors to calculate output shape - T arg0_shape_tmp(arg0_shape), arg1_shape_tmp(arg1_shape); + TRShape arg0_shape_tmp(arg0_shape), arg1_shape_tmp(arg1_shape); // 1. Applying transpositions specified by optional `transpose_a` and `transpose_b` // Only two right-most dimensions are swapped, other dimensions remain the same. @@ -58,7 +59,7 @@ void shape_infer(const ov::op::v0::MatMul* op, const std::vector& input_shape // COL_INDEX_DIM of the first matrix has to match ROW_INDEX_DIM of the second matrix. // Error is not thrown for dynamic dimensions bounds without intersection // to ensure MatMul backward compatibility. - using DimType = typename std::iterator_traits::value_type; + using DimType = typename T::value_type; auto merged_dimension = DimType(); auto arg0_col_dim = arg0_shape_tmp[arg0_rank - 1]; auto arg1_row_dim = arg1_shape_tmp[arg1_rank - 2]; @@ -116,7 +117,8 @@ void shape_infer(const ov::op::v0::MatMul* op, const std::vector& input_shape // arg1 input temporary axis inserted at COL_INDEX_DIM is removed output_shape.erase(output_shape.begin() + output_shape.size() - 1); } - output_shapes[0] = output_shape; + output_shapes.emplace_back(std::move(output_shape)); + return output_shapes; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/max_pool_shape_inference.hpp b/src/core/shape_inference/include/max_pool_shape_inference.hpp index 806cd3f3db3..f5cf23a16c9 100644 --- a/src/core/shape_inference/include/max_pool_shape_inference.hpp +++ b/src/core/shape_inference/include/max_pool_shape_inference.hpp @@ -12,12 +12,11 @@ namespace ov { namespace op { namespace v1 { -template -std::vector shape_infer(const MaxPool* op, - const std::vector& input_shapes, - TContainer& pads_begin, - TContainer& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const MaxPool* op, + const std::vector& input_shapes, + TContainer& pads_begin, + TContainer& pads_end) { const auto& data_shape = input_shapes[0]; const auto dilations = Strides(op->get_kernel().size(), 1); @@ -32,12 +31,11 @@ std::vector shape_infer(const MaxPool* op, } // namespace v1 namespace v8 { -template -std::vector shape_infer(const MaxPool* op, - const std::vector& input_shapes, - TContainer& pads_begin, - TContainer& pads_end, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const MaxPool* op, + const std::vector& input_shapes, + TContainer& pads_begin, + TContainer& pads_end) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); const auto& data_shape = input_shapes[0]; diff --git a/src/core/shape_inference/include/nms_shape_inference.hpp b/src/core/shape_inference/include/nms_shape_inference.hpp index 1679b4a7b54..a523348cfeb 100644 --- a/src/core/shape_inference/include/nms_shape_inference.hpp +++ b/src/core/shape_inference/include/nms_shape_inference.hpp @@ -4,25 +4,23 @@ #pragma once -#include -#include #include +#include "openvino/core/validation_util.hpp" +#include "openvino/op/non_max_suppression.hpp" #include "utils.hpp" -using namespace ngraph; - namespace ov { namespace op { namespace v9 { -template +template > void shape_infer(const NonMaxSuppression* op, const std::vector& input_shapes, std::vector& output_shapes, bool static_output = false, - const std::map>& constant_data = {}) { - // this shape_infer differs from all the other - it is used in GPU during compile-time and infer-time in custom code + const ITensorAccessor& ta = make_tensor_accessor()) { + // this shape_infer differs from all the other - it is used in GPU during compile-time and infer-time in custom NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 3); const auto& boxes_ps = input_shapes[0]; @@ -66,17 +64,16 @@ void shape_infer(const NonMaxSuppression* op, // NonMaxSuppression produces triplets // that have the following format: [batch_index, class_index, box_index] - ov::PartialShape out_shape = {Dimension::dynamic(), 3}; + TRShape out_shape = {Dimension::dynamic(), 3}; if (boxes_ps.rank().is_static() && scores_ps.rank().is_static()) { const auto num_boxes_boxes = boxes_ps[1]; if (num_boxes_boxes.get_max_length() != -1 && scores_ps[0].get_max_length() != -1 && scores_ps[1].get_max_length() != -1) { const auto num_boxes = num_boxes_boxes.get_max_length(); const auto num_classes = scores_ps[1].get_max_length(); - std::vector max_output_boxes_per_class_as_vals; - if ((op->get_input_size() > 2 || constant_data.count(2)) && - get_data_as_int64(2, op, max_output_boxes_per_class_as_vals, constant_data)) { - int64_t max_output_boxes_per_class = max_output_boxes_per_class_as_vals[0]; + + if (auto max_output_boxes_per_class_as_vals = get_input_const_data_as(op, 2, ta)) { + int64_t max_output_boxes_per_class = (*max_output_boxes_per_class_as_vals)[0]; out_shape[0] = static_output ? std::min(num_boxes, max_output_boxes_per_class) * num_classes * scores_ps[0].get_max_length() : Dimension(0, diff --git a/src/core/shape_inference/include/one_hot_shape_inference.hpp b/src/core/shape_inference/include/one_hot_shape_inference.hpp index 3da491e3d60..be68da4619d 100644 --- a/src/core/shape_inference/include/one_hot_shape_inference.hpp +++ b/src/core/shape_inference/include/one_hot_shape_inference.hpp @@ -2,63 +2,28 @@ // SPDX-License-Identifier: Apache-2.0 // #pragma once -#include -#include +#include "openvino/op/one_hot.hpp" #include "utils.hpp" namespace ov { namespace op { +namespace util { + +template +struct GetNotNegative { + const Node* m_op; + + GetNotNegative(const Node* op) : m_op{op} {} + + template + T operator()(const V v) const { + NODE_VALIDATION_CHECK(m_op, cmp::ge(v, 0), "OneHot depth value can't be negative."); + return static_cast(v); + } +}; +} // namespace util namespace v1 { - -namespace utils { -namespace one_hot { - -OPENVINO_SUPPRESS_DEPRECATED_START -template -inline bool get_data_as_shape_and_validate_sign( - size_t idx, - const ov::Node* op, - TShape& shape, - const std::map>& constant_data) { - if (constant_data.count(idx)) { - using DimType = typename TShape::value_type; - const auto data = host_tensor_2_vector(constant_data.at(idx)); - shape.clear(); - std::transform(data.cbegin(), data.cend(), std::back_inserter(shape), [&](int64_t v) { - NODE_VALIDATION_CHECK(op, v >= 0, "OneHot depth value can't be negative."); - return static_cast(v); - }); - return true; - } else { - return get_data_as_shape(idx, op, shape, constant_data); - } -} - -template <> -inline bool get_data_as_shape_and_validate_sign( - size_t idx, - const ov::Node* op, - ov::PartialShape& shape, - const std::map>& constant_data) { - if (constant_data.count(idx)) { - const auto data = host_tensor_2_vector(constant_data.at(idx)); - for (const auto& value : data) { - NODE_VALIDATION_CHECK(op, value >= 0, "OneHot depth value can't be negative."); - } - shape = PartialShape(data); - return true; - } else { - OPENVINO_SUPPRESS_DEPRECATED_START - return ov::evaluate_as_partial_shape(op->input_value(idx), shape); - OPENVINO_SUPPRESS_DEPRECATED_END - } -} -OPENVINO_SUPPRESS_DEPRECATED_END - -} // namespace one_hot -} // namespace utils - void inline resolve_axis(OneHot* op) { if (op->get_input_size() < 1) { return; @@ -72,13 +37,12 @@ void inline resolve_axis(OneHot* op) { } } -template -void shape_infer(const OneHot* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 4 && output_shapes.size() == 1); - using DimType = typename std::iterator_traits::value_type; +template > +std::vector shape_infer(const OneHot* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); + using DimType = typename T::value_type; const auto& indices_shape = input_shapes[0]; const auto& depth_shape = input_shapes[1]; const auto& on_value_shape = input_shapes[2]; @@ -96,6 +60,7 @@ void shape_infer(const OneHot* op, off_value_shape.is_dynamic() || ngraph::is_scalar(off_value_shape.to_shape()), "off_value input must be scalar."); + auto output_shapes = std::vector(1); auto& result_shape = output_shapes[0]; if (indices_shape.rank().is_static()) { result_shape = indices_shape; @@ -104,16 +69,18 @@ void shape_infer(const OneHot* op, const auto axis = ov::normalize_axis(op, op->get_axis(), indices_rank + 1, -indices_rank - 1, indices_rank); OPENVINO_SUPPRESS_DEPRECATED_END - T depth_dim_as_shape; - if (utils::one_hot::get_data_as_shape_and_validate_sign(1, op, depth_dim_as_shape, constant_data) && - depth_dim_as_shape.size() == 1) { - result_shape.insert(result_shape.begin() + axis, depth_dim_as_shape[0]); + auto depth_as_shape = + get_input_const_data_as_shape(op, 1, ta, util::GetNotNegative(op)); + + if (depth_as_shape && depth_as_shape->size() == 1) { + result_shape.insert(result_shape.begin() + axis, (*depth_as_shape)[0]); } else { result_shape.insert(result_shape.begin() + axis, DimType()); } } else { result_shape = PartialShape::dynamic(); } + return output_shapes; } } // namespace v1 } // namespace op diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/ov_optional.hpp b/src/core/shape_inference/include/ov_optional.hpp similarity index 76% rename from src/plugins/intel_cpu/src/utils/shape_inference/ov_optional.hpp rename to src/core/shape_inference/include/ov_optional.hpp index 1a9abcdcd79..f7f8b474f9a 100644 --- a/src/plugins/intel_cpu/src/utils/shape_inference/ov_optional.hpp +++ b/src/core/shape_inference/include/ov_optional.hpp @@ -25,13 +25,13 @@ public: optional(const optional& other) : m_has_value{other.m_has_value}, m_opt{} { if (other.m_has_value) { - construct(*other); + create(*other); } } optional(optional&& other) noexcept : m_has_value{other.m_has_value}, m_opt{} { if (other.m_has_value) { - construct(std::move(*other)); + create(std::move(*other)); } } @@ -40,13 +40,8 @@ public: } optional& operator=(const optional& other) { - if (other.m_has_value) { - if (m_has_value) { - m_opt.m_value = *other; - } else { - construct(*other); - } - m_has_value = true; + if (other) { + *this = *other; } else { reset(); } @@ -54,19 +49,24 @@ public: } optional& operator=(optional&& other) noexcept { - if (other.m_has_value) { - if (m_has_value) { - m_opt.m_value = std::move(*other); - } else { - construct(std::move(*other)); - } - m_has_value = true; + if (other) { + *this = std::move(*other); } else { reset(); } return *this; } + template + optional& operator=(U&& value) { + if (m_has_value) { + m_opt.m_value = std::forward(value); + } else { + emplace(std::forward(value)); + } + return *this; + } + constexpr operator bool() const { return m_has_value; } @@ -84,7 +84,7 @@ public: } T&& operator*() && noexcept { - return m_opt.m_value; + return std::move(m_opt.m_value); } constexpr const T* operator->() const noexcept { @@ -102,9 +102,15 @@ public: } } + template + void emplace(Args&&... args) { + create(std::forward(args)...); + m_has_value = true; + } + private: template - void construct(Args&&... args) { + void create(Args&&... args) { new (std::addressof(m_opt)) T(std::forward(args)...); } @@ -123,7 +129,7 @@ private: ~Storage() {} }; - bool m_has_value; - Storage m_opt; + bool m_has_value = false; + Storage m_opt{}; }; } // namespace ov diff --git a/src/core/shape_inference/include/pad_shape_inference.hpp b/src/core/shape_inference/include/pad_shape_inference.hpp index cc81e70caf2..760b128caee 100644 --- a/src/core/shape_inference/include/pad_shape_inference.hpp +++ b/src/core/shape_inference/include/pad_shape_inference.hpp @@ -11,11 +11,10 @@ #include "utils.hpp" namespace ov { namespace op { -namespace util { -template -std::vector shape_infer(const PadBase* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const util::PadBase* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 || input_shapes.size() == 4); const auto& pad_mode = op->get_pad_mode(); @@ -49,9 +48,10 @@ std::vector shape_infer(const PadBase* op, const auto& arg_shape = input_shapes[0]; const auto& arg_shape_rank = arg_shape.rank(); - TShape output_shape; - const auto pads_begin_coord = get_input_bounds(op, 1, constant_data); - const auto pads_end_coord = get_input_bounds(op, 2, constant_data); + auto output_shapes = std::vector(1); + auto& output_shape = output_shapes[0]; + const auto pads_begin_coord = get_input_bounds(op, 1, tensor_accessor); + const auto pads_end_coord = get_input_bounds(op, 2, tensor_accessor); if (arg_shape_rank.is_static()) { const auto arg_rank_len = arg_shape_rank.get_length(); @@ -135,33 +135,11 @@ std::vector shape_infer(const PadBase* op, ")."); output_shape.resize(arg_shape_rank.get_length()); } - - return {output_shape}; } else { - return {PartialShape::dynamic()}; + output_shape = PartialShape::dynamic(); } -} -} // namespace util - -namespace v1 { -template -void shape_infer(const Pad* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = op::util::shape_infer(op, input_shapes, constant_data); + return output_shapes; } -} // namespace v1 - -namespace v12 { -template -void shape_infer(const Pad* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = op::util::shape_infer(op, input_shapes, constant_data); -} -} // namespace v12 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/pooling_shape_inference_util.hpp b/src/core/shape_inference/include/pooling_shape_inference_util.hpp index baef60e624c..6060358c887 100644 --- a/src/core/shape_inference/include/pooling_shape_inference_util.hpp +++ b/src/core/shape_inference/include/pooling_shape_inference_util.hpp @@ -108,7 +108,7 @@ void apply_padding(const TOp* op, pads_begin.reserve(num_spatial); pads_end.reserve(num_spatial); - auto data_dim = data_shape.cbegin() + spatial_dim_offset; + auto data_dim = &data_shape[spatial_dim_offset]; auto pad_b = auto_pad == PadType::SAME_UPPER ? pads_begin.begin() : pads_end.begin(); auto pad_e = auto_pad == PadType::SAME_UPPER ? pads_end.begin() : pads_begin.begin(); @@ -163,13 +163,13 @@ void valid_dilated_kernel_with_padding(const TOp* op, * @param dilations Kernel dilations. * @param out_shape Output shape for appending the spatial shape of pooling */ -template +template void append_spatial_shape(const TOp* op, const TShape& data_shape, const TContainer& pads_begin, const TContainer& pads_end, const Strides& dilations, - TShape& out_shape) { + TRShape& out_shape) { using namespace ov::util; const auto spatial_num = data_shape.size() - spatial_dim_offset; const auto is_ceil_mode = op->get_rounding_type() == RoundingType::CEIL; @@ -178,7 +178,7 @@ void append_spatial_shape(const TOp* op, using TDim = typename TShape::value_type; const auto& dim_divide = is_ceil_mode ? dim::ceil_div : dim::floor_div; - auto data_dim = data_shape.cbegin() + spatial_dim_offset; + auto data_dim = &data_shape[spatial_dim_offset]; const auto& kernel = op->get_kernel(); const auto& stride = op->get_strides(); @@ -207,14 +207,14 @@ void append_spatial_shape(const TOp* op, /** * @brief Shape inference helper used for pooling operators such Max Pool, Avg Pool. */ -template -TShape out_shape_infer(const TOp* op, - const TShape& data_shape, - const TContainer& pads_begin, - const TContainer& pads_end, - const Strides& dilations) { +template > +TRShape out_shape_infer(const TOp* op, + const TShape& data_shape, + const TContainer& pads_begin, + const TContainer& pads_end, + const Strides& dilations) { const auto out_rank_size = spatial_dim_offset + op->get_kernel().size(); - TShape out_shape; + TRShape out_shape; if (data_shape.rank().is_static()) { const auto& batch_size = data_shape[0]; const auto& channel_count = data_shape[1]; @@ -239,11 +239,10 @@ TShape out_shape_infer(const TOp* op, */ template , typename std::enable_if::value || std::is_same::value>::type* = nullptr> -TShape out_shape_infer(const TOp* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { +TRShape out_shape_infer(const TOp* op, const std::vector& input_shapes, const ITensorAccessor& ta) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); const auto& data_shape = input_shapes[0]; @@ -258,7 +257,7 @@ TShape out_shape_infer(const TOp* op, data_shape); OPENVINO_SUPPRESS_DEPRECATED_END - TShape output_shape; + TRShape output_shape; if (data_rank.is_static()) { auto num_of_spatial_dims = data_shape.size() - spatial_dim_offset; @@ -270,7 +269,7 @@ TShape out_shape_infer(const TOp* op, output_shape.reserve(data_shape.size()); std::copy_n(data_shape.begin(), spatial_dim_offset, std::back_inserter(output_shape)); - if (const auto spatial_dims = get_input_const_data_as_shape(op, 1, constant_data)) { + if (const auto spatial_dims = get_input_const_data_as_shape(op, 1, ta)) { NODE_VALIDATION_CHECK(op, num_of_spatial_dims == spatial_dims->size(), "Number of spatial dimensions is not compatible with input data rank"); diff --git a/src/core/shape_inference/include/prior_box_clustered_shape_inference.hpp b/src/core/shape_inference/include/prior_box_clustered_shape_inference.hpp index e48045e79aa..8e7ff81b30c 100644 --- a/src/core/shape_inference/include/prior_box_clustered_shape_inference.hpp +++ b/src/core/shape_inference/include/prior_box_clustered_shape_inference.hpp @@ -9,19 +9,11 @@ namespace ov { namespace op { namespace v0 { -template -std::vector shape_infer(const PriorBoxClustered* const op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { - return prior_box::shape_infer(op, input_shapes, constant_data); -} - -template -void shape_infer(const PriorBoxClustered* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = prior_box::shape_infer(op, input_shapes, constant_data); +template > +std::vector shape_infer(const PriorBoxClustered* const op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + return prior_box::shape_infer(op, input_shapes, ta); } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/prior_box_shape_inference.hpp b/src/core/shape_inference/include/prior_box_shape_inference.hpp index a660d8e3b2c..9bbddd24842 100644 --- a/src/core/shape_inference/include/prior_box_shape_inference.hpp +++ b/src/core/shape_inference/include/prior_box_shape_inference.hpp @@ -9,28 +9,20 @@ namespace ov { namespace op { namespace v0 { -template -std::vector shape_infer(const PriorBox* const op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { - return prior_box::shape_infer(op, input_shapes, constant_data); +template > +std::vector shape_infer(const PriorBox* const op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + return prior_box::shape_infer(op, input_shapes, ta); } } // namespace v0 namespace v8 { -template -std::vector shape_infer(const PriorBox* const op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { - return prior_box::shape_infer(op, input_shapes, constant_data); -} - -template -void shape_infer(const PriorBox* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = prior_box::shape_infer(op, input_shapes, constant_data); +template > +std::vector shape_infer(const PriorBox* const op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + return prior_box::shape_infer(op, input_shapes, ta); } } // namespace v8 diff --git a/src/core/shape_inference/include/prior_box_shape_inference_util.hpp b/src/core/shape_inference/include/prior_box_shape_inference_util.hpp index 9ef24c209af..91b1360093a 100644 --- a/src/core/shape_inference/include/prior_box_shape_inference_util.hpp +++ b/src/core/shape_inference/include/prior_box_shape_inference_util.hpp @@ -46,10 +46,10 @@ TDim number_of_priors(const v0::PriorBoxClustered* const op) { return {static_cast(op->get_attrs().widths.size())}; } -template -std::vector shape_infer(const TOp* const op, - const std::vector& input_shapes, - const std::map& constant_data) { +template > +std::vector shape_infer(const TOp* const op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); auto out_size_rank = input_shapes[0].rank(); @@ -62,12 +62,12 @@ std::vector shape_infer(const TOp* const op, img_size_rank, " and both must be 1-D"); - auto output_shapes = std::vector(1, TShape{2}); + auto output_shapes = std::vector(1, TRShape{2}); - if (auto out_size = get_input_const_data_as_shape(op, 0, constant_data)) { + if (auto out_size = get_input_const_data_as_shape(op, 0, ta)) { NODE_VALIDATION_CHECK(op, out_size->size() == 2, "Output size must have two elements. Got: ", out_size->size()); - using TDim = typename TShape::value_type; + using TDim = typename TRShape::value_type; const auto num_of_priors = prior_box::number_of_priors(op); output_shapes.front().push_back((*out_size)[0] * (*out_size)[1] * num_of_priors * 4); } else { diff --git a/src/core/shape_inference/include/proposal_shape_inference.hpp b/src/core/shape_inference/include/proposal_shape_inference.hpp index b64d09725b9..a3b7f4702e4 100644 --- a/src/core/shape_inference/include/proposal_shape_inference.hpp +++ b/src/core/shape_inference/include/proposal_shape_inference.hpp @@ -11,9 +11,9 @@ namespace ov { namespace op { namespace proposal { -template -TShape shape_infer_boxes(const TOp* op, const std::vector& input_shapes) { - using TDim = typename TShape::value_type; +template > +TRShape shape_infer_boxes(const TOp* op, const std::vector& input_shapes) { + using TDim = typename TRShape::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); const auto& class_probs_ps = input_shapes[0]; @@ -43,7 +43,7 @@ TShape shape_infer_boxes(const TOp* op, const std::vector& input_shapes) const auto is_bbox_rank_dynamic = bbox_deltas_ps.rank().is_dynamic(); - TShape proposed_boxes_shape; + TRShape proposed_boxes_shape; proposed_boxes_shape.reserve(2); if (class_probs_ps.rank().is_static()) { @@ -78,8 +78,8 @@ TShape shape_infer_boxes(const TOp* op, const std::vector& input_shapes) } // namespace proposal namespace v0 { -template -std::vector shape_infer(const Proposal* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const Proposal* op, const std::vector& input_shapes) { return {ov::op::proposal::shape_infer_boxes(op, input_shapes)}; } } // namespace v0 @@ -89,9 +89,9 @@ std::vector shape_infer(const Proposal* op, const std::vector& i namespace ov { namespace op { namespace v4 { -template -std::vector shape_infer(const Proposal* op, const std::vector& input_shapes) { - auto output_shapes = std::vector(2, ov::op::proposal::shape_infer_boxes(op, input_shapes)); +template > +std::vector shape_infer(const Proposal* op, const std::vector& input_shapes) { + auto output_shapes = std::vector(2, ov::op::proposal::shape_infer_boxes(op, input_shapes)); output_shapes[1].resize(1); return output_shapes; } diff --git a/src/core/shape_inference/include/psroi_pooling_shape_inference.hpp b/src/core/shape_inference/include/psroi_pooling_shape_inference.hpp index c47b1211e66..d84a9c5d242 100644 --- a/src/core/shape_inference/include/psroi_pooling_shape_inference.hpp +++ b/src/core/shape_inference/include/psroi_pooling_shape_inference.hpp @@ -64,8 +64,8 @@ void mode_attr(const TROIPooling* op) { } // namespace psroi_pooling namespace v0 { -template -std::vector shape_infer(const PSROIPooling* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const PSROIPooling* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); using namespace ov::util; @@ -80,19 +80,15 @@ std::vector shape_infer(const PSROIPooling* op, const std::vector(1); + auto& out_shape = output_shapes.front(); out_shape.reserve(4); out_shape.emplace_back(rois_shape.rank().is_static() ? rois_shape[0] : dim::inf_bound); out_shape.emplace_back(op->get_output_dim()); out_shape.insert(out_shape.end(), 2, op->get_group_size()); - return {out_shape}; -} - -template -void shape_infer(const PSROIPooling* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return output_shapes; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/range_shape_inference.hpp b/src/core/shape_inference/include/range_shape_inference.hpp index 72da58cda67..66f7eaa5d8f 100644 --- a/src/core/shape_inference/include/range_shape_inference.hpp +++ b/src/core/shape_inference/include/range_shape_inference.hpp @@ -13,76 +13,41 @@ namespace op { namespace ShapeInferRange { -template -inline bool get_data_as_double( - size_t idx, - const ov::Node* op, - std::vector& axes_value, - const std::map>& constant_data = {}) { - if (constant_data.count(idx)) { - axes_value = ov::opset1::Constant(constant_data.at(idx)).cast_vector(); - } else { - const auto& constant = ov::as_type_ptr(op->get_input_node_shared_ptr(idx)); - NODE_VALIDATION_CHECK(op, constant != nullptr, "Static shape inference lacks constant data on port ", idx); - axes_value = constant->cast_vector(); - } - return true; -} - -template <> -inline bool get_data_as_double( - size_t idx, - const ov::Node* op, - std::vector& axes_value, - const std::map>& constant_data) { - if (constant_data.count(idx)) { - axes_value = ov::opset1::Constant(constant_data.at(idx)).cast_vector(); - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (const auto& constant = ov::get_constant_from_source(op->input_value(idx))) { - OPENVINO_SUPPRESS_DEPRECATED_END - axes_value = constant->cast_vector(); - } else { - return false; - } - return true; -} - -template -void range_shape_infer(const Node* op, - const std::vector& input_shapes, - std::vector& output_shapes, - bool output_is_integral, - bool step_allows_zero, - const std::map>& constant_data) { - NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3) && output_shapes.size() == 1); +template > +std::vector range_shape_infer(const Node* op, + const std::vector& input_shapes, + bool output_is_integral, + bool step_allows_zero, + const ITensorAccessor& tensor_accessor) { + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3)); NODE_VALIDATION_CHECK(op, input_shapes[0].rank().compatible(0), "'start' input is not a scalar"); NODE_VALIDATION_CHECK(op, input_shapes[1].rank().compatible(0), "'stop' input is not a scalar"); NODE_VALIDATION_CHECK(op, input_shapes[2].rank().compatible(0), "'step' input is not a scalar"); - std::vector start_val; - std::vector stop_val; - std::vector step_val; + const auto start_val = get_input_const_data_as(op, 0, tensor_accessor); + const auto stop_val = get_input_const_data_as(op, 1, tensor_accessor); + const auto step_val = get_input_const_data_as(op, 2, tensor_accessor); double start = 0; double stop = 0; double step = 0; - if (get_data_as_double(0, op, start_val, constant_data)) { - NODE_VALIDATION_CHECK(op, start_val.size() == 1); - start = start_val[0]; + if (start_val) { + NODE_VALIDATION_CHECK(op, start_val->size() == 1); + start = (*start_val)[0]; NODE_VALIDATION_CHECK(op, std::isfinite(start) && !std::isnan(start), "'start' cannot be nan or infinite."); } - if (get_data_as_double(1, op, stop_val, constant_data)) { - NODE_VALIDATION_CHECK(op, stop_val.size() == 1); - stop = stop_val[0]; + if (stop_val) { + NODE_VALIDATION_CHECK(op, stop_val->size() == 1); + stop = (*stop_val)[0]; NODE_VALIDATION_CHECK(op, std::isfinite(stop) && !std::isnan(stop), "'stop' cannot be nan or infinite."); } - if (get_data_as_double(2, op, step_val, constant_data)) { - NODE_VALIDATION_CHECK(op, step_val.size() == 1); - step = step_val[0]; + if (step_val) { + NODE_VALIDATION_CHECK(op, step_val->size() == 1); + step = (*step_val)[0]; if (step_allows_zero) NODE_VALIDATION_CHECK(op, std::isfinite(step) && !std::isnan(step), "'step' cannot be nan or infinite."); else @@ -91,7 +56,8 @@ void range_shape_infer(const Node* op, "'step' cannot be zero, nan, or infinite."); } - if (start_val.size() == 1 && stop_val.size() == 1 && step_val.size() == 1) { + auto output_shapes = std::vector(1); + if (start_val && stop_val && step_val) { // all inputs must be casted to output_type before // the rounding for casting values are done towards zero if (output_is_integral) { @@ -110,45 +76,38 @@ void range_shape_infer(const Node* op, double strided = ceil(fabs(span) / fabs(step)); - output_shapes[0] = T{static_cast(strided)}; + output_shapes[0] = TRShape{static_cast(strided)}; } else { output_shapes[0] = ov::PartialShape::dynamic(1); } + return output_shapes; } } // namespace ShapeInferRange namespace v0 { - -template -void shape_infer(const Range* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - ShapeInferRange::range_shape_infer(op, - input_shapes, - output_shapes, - op->get_input_element_type(0).is_integral_number(), - false, - constant_data); +template > +std::vector shape_infer(const Range* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { + return ShapeInferRange::range_shape_infer(op, + input_shapes, + op->get_input_element_type(0).is_integral_number(), + false, + tensor_accessor); } - } // namespace v0 namespace v4 { - -template -void shape_infer(const Range* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - ShapeInferRange::range_shape_infer(op, - input_shapes, - output_shapes, - op->get_output_type().is_integral_number(), - true, - constant_data); +template > +std::vector shape_infer(const Range* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { + return ShapeInferRange::range_shape_infer(op, + input_shapes, + op->get_output_type().is_integral_number(), + true, + tensor_accessor); } - } // namespace v4 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/rdft_shape_inference.hpp b/src/core/shape_inference/include/rdft_shape_inference.hpp index 34e45f1c6c9..1a3e9246078 100644 --- a/src/core/shape_inference/include/rdft_shape_inference.hpp +++ b/src/core/shape_inference/include/rdft_shape_inference.hpp @@ -24,29 +24,28 @@ DimType get_rdft_output_dimension(DimType d) { return DimType(get_ouput_dimension_bound(d.get_min_length()), get_ouput_dimension_bound(d.get_max_length())); } -template -void shape_infer(const ov::op::v9::RDFT* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - using DimType = typename std::iterator_traits::value_type; - NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3) && output_shapes.size() == 1); +template > +std::vector shape_infer(const RDFT* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + using DimType = typename T::value_type; + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2 || input_shapes.size() == 3)); const auto& input_shape = input_shapes[0]; const auto& axes_shape = input_shapes[1]; + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; - std::vector axes; - bool axes_are_known = get_data_as_int64(1, op, axes, constant_data); + auto axes = get_input_const_data_as(op, 1, ta); util::rfft_common_validation::shape_validation(op, input_shapes, - axes, - axes_are_known, + *axes, + static_cast(axes), util::rfft_common_validation::RFFTKind::Forward); if (input_shape.rank().is_dynamic()) { output_shape = ov::PartialShape::dynamic(); - return; + return output_shapes; } output_shape = input_shape; @@ -54,37 +53,38 @@ void shape_infer(const ov::op::v9::RDFT* op, const auto input_rank = input_shape.size(); - if (axes_shape.rank().is_dynamic() || !axes_are_known) { + if (axes_shape.rank().is_dynamic() || !axes) { for (size_t i = 0; i < input_rank; ++i) { output_shape[i] = ov::Dimension::dynamic(); } - return; + return output_shapes; } - const auto last_axis = axes.back(); + const auto last_axis = axes->back(); if (input_shapes.size() == 2) { output_shape[last_axis] = get_rdft_output_dimension(input_shape[last_axis]); - return; + return output_shapes; } const auto& signal_size_shape = input_shapes[2]; - std::vector signal_size; - bool status_signal_size = get_data_as_int64(2, op, signal_size, constant_data); + auto signal_size = get_input_const_data_as(op, 2, ta); - if (signal_size_shape.rank().is_dynamic() || !status_signal_size) { + if (signal_size_shape.rank().is_dynamic() || !signal_size) { output_shape[last_axis] = ov::Dimension::dynamic(); - return; + return output_shapes; } - size_t num_of_axes = axes.size(); + size_t num_of_axes = axes->size(); for (size_t i = 0; i < num_of_axes; ++i) { - const int64_t current_axis = axes[i]; - if (signal_size[i] != -1) { - output_shape[current_axis] = DimType(signal_size[i]); + const int64_t current_axis = (*axes)[i]; + if ((*signal_size)[i] != -1) { + output_shape[current_axis] = DimType((*signal_size)[i]); } } output_shape[last_axis] = get_rdft_output_dimension(output_shape[last_axis]); + + return output_shapes; } } // namespace v9 } // namespace op diff --git a/src/core/shape_inference/include/read_value_shape_inference.hpp b/src/core/shape_inference/include/read_value_shape_inference.hpp deleted file mode 100644 index 1540ce40c49..00000000000 --- a/src/core/shape_inference/include/read_value_shape_inference.hpp +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// -#pragma once -#include - -#include "utils.hpp" -namespace ov { -namespace op { - -template -void read_value_shape_infer(const OpType* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - copy_shape_infer(op, input_shapes, output_shapes); -} - -namespace v3 { -template -void shape_infer(const ReadValue* op, const std::vector& input_shapes, std::vector& output_shapes) { - read_value_shape_infer(op, input_shapes, output_shapes); -} -} // namespace v3 - -namespace v6 { -template -void shape_infer(const ReadValue* op, const std::vector& input_shapes, std::vector& output_shapes) { - read_value_shape_infer(op, input_shapes, output_shapes); -} -} // namespace v6 -} // namespace op -} // namespace ov diff --git a/src/core/shape_inference/include/reduce_shape_inference.hpp b/src/core/shape_inference/include/reduce_shape_inference.hpp index 035659f1e4e..93c3ffab1aa 100644 --- a/src/core/shape_inference/include/reduce_shape_inference.hpp +++ b/src/core/shape_inference/include/reduce_shape_inference.hpp @@ -9,11 +9,13 @@ #include "utils.hpp" -template -std::vector reduce_shape_infer(const ov::op::util::ReductionBase* op, - bool keep_dims, - const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor = ov::make_tensor_accessor()) { +namespace ov { +namespace op { +template > +std::vector reduce_shape_infer(const util::ReductionBase* op, + bool keep_dims, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); const auto& data_shape = input_shapes[0]; @@ -21,7 +23,7 @@ std::vector reduce_shape_infer(const ov::op::util::ReductionBase* op, const auto& axes_shape = input_shapes[1]; const auto& axes_rank = axes_shape.rank(); - std::vector output_shapes; + std::vector output_shapes; output_shapes.reserve(1); NODE_VALIDATION_CHECK(op, @@ -29,7 +31,7 @@ std::vector reduce_shape_infer(const ov::op::util::ReductionBase* op, "Axes input must be a scalar or 1D input. Got: ", axes_shape); - const auto axes_val = ov::op::get_input_const_data_as(op, 1, tensor_accessor); + const auto axes_val = ov::op::get_input_const_data_as(op, 1, tensor_accessor); if (data_rank.is_static() && axes_val) { OPENVINO_SUPPRESS_DEPRECATED_START @@ -38,13 +40,13 @@ std::vector reduce_shape_infer(const ov::op::util::ReductionBase* op, if (keep_dims) { output_shapes.push_back(data_shape); - TShape& output_shape = output_shapes[0]; + auto& output_shape = output_shapes[0]; for (const auto& axis : *axes_val) { output_shape[axis] = 1; } } else { output_shapes.resize(1); - TShape& output_shape = output_shapes[0]; + auto& output_shape = output_shapes[0]; for (size_t i = 0; i < data_shape.size(); ++i) { if (std::find(axes_val->begin(), axes_val->end(), i) == axes_val->end()) { output_shape.push_back(data_shape[i]); @@ -62,35 +64,18 @@ std::vector reduce_shape_infer(const ov::op::util::ReductionBase* op, } // API: TensorAccessor to constant data -template -std::vector shape_infer(const ov::op::util::ArithmeticReductionKeepDims* op, - const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor = ov::make_tensor_accessor()) { +template > +std::vector shape_infer(const util::ArithmeticReductionKeepDims* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { return reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor); } -template -std::vector shape_infer(const ov::op::util::LogicalReductionKeepDims* op, - const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor = ov::make_tensor_accessor()) { +template > +std::vector shape_infer(const util::LogicalReductionKeepDims* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { return reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor); } - -// API for compatibility: Constant data map -template -void shape_infer(const ov::op::util::ArithmeticReductionKeepDims* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - const auto tensor_accessor = ov::make_tensor_accessor(constant_data); - output_shapes = reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor); -} - -template -void shape_infer(const ov::op::util::LogicalReductionKeepDims* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - const auto tensor_accessor = ov::make_tensor_accessor(constant_data); - output_shapes = reduce_shape_infer(op, op->get_keep_dims(), input_shapes, tensor_accessor); -} +} // namespace op +} // namespace ov diff --git a/src/core/shape_inference/include/region_yolo_shape_inference.hpp b/src/core/shape_inference/include/region_yolo_shape_inference.hpp index 5363bf0362a..73dafdf511b 100644 --- a/src/core/shape_inference/include/region_yolo_shape_inference.hpp +++ b/src/core/shape_inference/include/region_yolo_shape_inference.hpp @@ -12,27 +12,28 @@ namespace ov { namespace op { namespace v0 { -template -void shape_infer(const RegionYolo* op, const std::vector& input_shapes, std::vector& output_shapes) { - using DimType = typename std::iterator_traits::value_type; - NODE_VALIDATION_CHECK(op, (input_shapes.size() == 1) && output_shapes.size() == 1); +template > +std::vector shape_infer(const RegionYolo* op, const std::vector& input_shapes) { + using DimType = typename T::value_type; + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 1)); const auto& input_shape = input_shapes[0]; const auto& input_rank = input_shape.rank(); + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; NODE_VALIDATION_CHECK(op, input_rank.compatible(4), "Input must be a tensor of rank 4, but got ", input_rank); if (input_rank.is_static()) { - int64_t end_axis = op->m_end_axis; + int64_t end_axis = op->get_end_axis(); if (end_axis < 0) { end_axis += static_cast(input_shape.size()); } - if (op->m_do_softmax) { + if (op->get_do_softmax()) { output_shape.resize(0); OPENVINO_SUPPRESS_DEPRECATED_START - auto axis = ov::normalize_axis(op, op->m_axis, input_rank); + auto axis = ov::normalize_axis(op, op->get_axis(), input_rank); OPENVINO_SUPPRESS_DEPRECATED_END DimType flat_dim = 1; for (int64_t i = 0; i < axis; i++) { @@ -46,15 +47,16 @@ void shape_infer(const RegionYolo* op, const std::vector& input_shapes, std:: output_shape.push_back(input_shape[i]); } } else { - output_shape = T({input_shape[0], - static_cast( - (op->get_num_classes() + op->get_num_coords() + 1) * op->get_mask().size()), - input_shape[2], - input_shape[3]}); + output_shape = TRShape({input_shape[0], + static_cast( + (op->get_num_classes() + op->get_num_coords() + 1) * op->get_mask().size()), + input_shape[2], + input_shape[3]}); } } else { output_shape = ov::PartialShape::dynamic(ov::Rank(1, 4)); } + return output_shapes; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/reorg_yolo_shape_inference.hpp b/src/core/shape_inference/include/reorg_yolo_shape_inference.hpp index 9d782ba0daf..52542d8d53f 100644 --- a/src/core/shape_inference/include/reorg_yolo_shape_inference.hpp +++ b/src/core/shape_inference/include/reorg_yolo_shape_inference.hpp @@ -12,10 +12,11 @@ namespace ov { namespace op { namespace v0 { -template -void shape_infer(const ReorgYolo* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, (input_shapes.size() == 1) && output_shapes.size() == 1); +template > +std::vector shape_infer(const ReorgYolo* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 1)); const auto& input_shape = input_shapes[0]; + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; const auto& strides = op->get_strides(); if (input_shape.rank().is_static()) { @@ -34,7 +35,7 @@ void shape_infer(const ReorgYolo* op, const std::vector& input_shapes, std::v static_cast(input_shape[1].get_length()) >= (strides[0] * strides[0]), "For [N, C, H, W] input shape, C >= (stride*stride) is required."); - output_shape = T({input_shape[0], input_shape[1]}); + output_shape = TRShape({input_shape[0], input_shape[1]}); for (size_t i = 2; i < input_shape.size(); i++) { if (input_shape[i].is_static()) @@ -53,6 +54,7 @@ void shape_infer(const ReorgYolo* op, const std::vector& input_shapes, std::v } else { output_shape = ov::PartialShape::dynamic(input_shape.rank()); } + return output_shapes; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/reverse_sequence_shape_inference.hpp b/src/core/shape_inference/include/reverse_sequence_shape_inference.hpp index 415f5e7f01f..235eb8eb64d 100644 --- a/src/core/shape_inference/include/reverse_sequence_shape_inference.hpp +++ b/src/core/shape_inference/include/reverse_sequence_shape_inference.hpp @@ -6,11 +6,13 @@ #include #include +#include "utils.hpp" + namespace ov { namespace op { namespace v0 { -template -std::vector shape_infer(const ReverseSequence* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const ReverseSequence* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); using DimType = typename TShape::value_type; @@ -28,7 +30,7 @@ std::vector shape_infer(const ReverseSequence* op, const std::vectorget_origin_batch_axis(), data_rank); @@ -51,13 +53,6 @@ std::vector shape_infer(const ReverseSequence* op, const std::vector -void shape_infer(const ReverseSequence* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v0 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/reverse_shape_inference.hpp b/src/core/shape_inference/include/reverse_shape_inference.hpp index 883f52cb5ef..d904c7e150d 100644 --- a/src/core/shape_inference/include/reverse_shape_inference.hpp +++ b/src/core/shape_inference/include/reverse_shape_inference.hpp @@ -18,7 +18,7 @@ struct ClipNegative { template constexpr value_type operator()(const T value) const { - return (std::is_signed::value && value < 0) ? 0 : static_cast(value); + return ov::cmp::lt(value, 0) ? 0 : static_cast(value); } }; } // namespace util @@ -36,10 +36,10 @@ namespace v1 { * * \return Vector of output shapes with one shape. */ -template -std::vector shape_infer(const Reverse* op, - const std::vector& input_shapes, - const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { +template > +std::vector shape_infer(const Reverse* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); const auto& data_shape = input_shapes[0]; @@ -82,24 +82,6 @@ std::vector shape_infer(const Reverse* op, return {data_shape}; } - -/** - * \brief Reverse shape inference - * - * \tparam TShape Type of shape. - * - * \param op Pointer to Reverse operator. - * \param input_shapes Input shapes of Reverse. - * \param output_shapes Output shapes of Reverse - * \param constant_data Map of constant data. Default empty. - */ -template -void shape_infer(const Reverse* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v1 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/rnn_base_shape_inference.hpp b/src/core/shape_inference/include/rnn_base_shape_inference.hpp index fcf6af6fee1..61f4067875d 100644 --- a/src/core/shape_inference/include/rnn_base_shape_inference.hpp +++ b/src/core/shape_inference/include/rnn_base_shape_inference.hpp @@ -29,18 +29,15 @@ void validate_inputs_rank(const op::util::RNNCellBase* op, // Output shape layout: // output_shapes[0...num_state_nodes]: [batch_size, hidden_size] // Rank always 2 -template -std::vector cell_base_shape_infer(const op::util::RNNCellBase* op, - const std::vector& input_shapes, - size_t num_gates, - size_t num_state_nodes, - bool linear_before_reset = false) { +template > +std::vector cell_base_shape_infer(const op::util::RNNCellBase* op, + const std::vector& input_shapes, + size_t num_gates, + size_t num_state_nodes, + bool linear_before_reset = false) { const auto num_inputs = 4 + num_state_nodes; NODE_VALIDATION_CHECK(op, input_shapes.size() >= num_inputs, "Incorrect number of shapes has been provided."); - std::vector output_shapes; - output_shapes.reserve(num_state_nodes); - std::vector expected_in_ranks; expected_in_ranks.reserve(num_inputs); expected_in_ranks.insert(expected_in_ranks.end(), 1 + num_state_nodes, Rank(2)); @@ -128,23 +125,23 @@ std::vector cell_base_shape_infer(const op::util::RNNCellBase* op, } } - return {num_state_nodes, TShape{merged_batch_size, merged_hidden_size}}; + return {num_state_nodes, TRShape{merged_batch_size, merged_hidden_size}}; } // Output shapes layout: // output_shapes[0]: [batch_size, num_directions, seq_length, hidden_size] // Rank always 4 // output_shapes[1... num_state_nodes]: [batch_size, num_directions, hidden_size] // Rank always 3 -template -std::vector seq_base_shape_infer(const op::util::RNNCellBase* op, - const std::vector& input_shapes, - size_t num_gates, - size_t num_state_nodes, - op::RecurrentSequenceDirection direction, - bool linear_before_reset = false) { +template > +std::vector seq_base_shape_infer(const op::util::RNNCellBase* op, + const std::vector& input_shapes, + size_t num_gates, + size_t num_state_nodes, + op::RecurrentSequenceDirection direction, + bool linear_before_reset = false) { const auto num_inputs = 5 + num_state_nodes; NODE_VALIDATION_CHECK(op, input_shapes.size() >= num_inputs, "Incorrect number of shapes has been provided."); - std::vector output_shapes; + std::vector output_shapes; output_shapes.reserve(1 + num_state_nodes); std::vector expected_in_ranks; @@ -265,14 +262,14 @@ std::vector seq_base_shape_infer(const op::util::RNNCellBase* op, } // Y output - output_shapes.push_back(TShape{merged_batch_size, - merged_num_directions, - x_pshape.rank().is_static() ? x_pshape[1] : DimType(), - merged_hidden_size}); + output_shapes.push_back(TRShape{merged_batch_size, + merged_num_directions, + x_pshape.rank().is_static() ? x_pshape[1] : DimType(), + merged_hidden_size}); // Ho, Co outputs output_shapes.insert(output_shapes.end(), num_state_nodes, - TShape{merged_batch_size, merged_num_directions, merged_hidden_size}); + TRShape{merged_batch_size, merged_num_directions, merged_hidden_size}); return output_shapes; } } // namespace rnn diff --git a/src/core/shape_inference/include/rnn_cell_shape_inference.hpp b/src/core/shape_inference/include/rnn_cell_shape_inference.hpp index 5476718a3df..69613a27f62 100644 --- a/src/core/shape_inference/include/rnn_cell_shape_inference.hpp +++ b/src/core/shape_inference/include/rnn_cell_shape_inference.hpp @@ -9,15 +9,11 @@ namespace ov { namespace op { namespace v0 { template -std::vector shape_infer(const RNNCell* op, const std::vector& input_shapes) { +std::vector> shape_infer(const RNNCell* op, const std::vector& input_shapes) { constexpr auto num_gates = 1; constexpr auto num_state_nodes = 1; return rnn::cell_base_shape_infer(op, input_shapes, num_gates, num_state_nodes); } -template -void shape_infer(const RNNCell* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v0 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/rnn_sequence_shape_inference.hpp b/src/core/shape_inference/include/rnn_sequence_shape_inference.hpp index c69dddd0e55..65f79039d52 100644 --- a/src/core/shape_inference/include/rnn_sequence_shape_inference.hpp +++ b/src/core/shape_inference/include/rnn_sequence_shape_inference.hpp @@ -9,7 +9,7 @@ namespace ov { namespace op { namespace v5 { template -std::vector shape_infer(const RNNSequence* op, const std::vector& input_shapes) { +std::vector> shape_infer(const RNNSequence* op, const std::vector& input_shapes) { constexpr auto num_gates = 1; constexpr auto num_state_nodes = 1; return rnn::seq_base_shape_infer(op, input_shapes, num_gates, num_state_nodes, op->get_direction()); diff --git a/src/core/shape_inference/include/roi_align_shape_inference.hpp b/src/core/shape_inference/include/roi_align_shape_inference.hpp index 5557bdb00c7..db2e3049cf5 100644 --- a/src/core/shape_inference/include/roi_align_shape_inference.hpp +++ b/src/core/shape_inference/include/roi_align_shape_inference.hpp @@ -48,8 +48,8 @@ inline void batch_indicies_et(const Node* const op) { } } // namespace validate -template -std::vector shape_infer(const OpType* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const OpType* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); using TDim = typename TShape::value_type; @@ -62,7 +62,7 @@ std::vector shape_infer(const OpType* op, const std::vector& inp const auto input_ps_rank = input_ps.rank(); const auto batch_indices_ps_rank = batch_indices_ps.rank(); - auto output_shapes = std::vector(1); + auto output_shapes = std::vector(1); auto& out_shape = output_shapes.front(); out_shape.reserve(4); @@ -103,27 +103,17 @@ std::vector shape_infer(const OpType* op, const std::vector& inp } // namespace roi_align namespace v3 { -template -std::vector shape_infer(const ROIAlign* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const ROIAlign* op, const std::vector& input_shapes) { return roi_align::shape_infer(op, input_shapes); } - -template -void shape_infer(const ROIAlign* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v3 namespace v9 { -template -std::vector shape_infer(const ROIAlign* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const ROIAlign* op, const std::vector& input_shapes) { return roi_align::shape_infer(op, input_shapes); } - -template -void shape_infer(const ROIAlign* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace v9 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/roi_pooling_shape_inference.hpp b/src/core/shape_inference/include/roi_pooling_shape_inference.hpp index 0de62e9c090..3f319aa5821 100644 --- a/src/core/shape_inference/include/roi_pooling_shape_inference.hpp +++ b/src/core/shape_inference/include/roi_pooling_shape_inference.hpp @@ -9,6 +9,7 @@ #include "compare.hpp" #include "dimension_util.hpp" #include "openvino/op/roi_pooling.hpp" +#include "utils.hpp" namespace ov { namespace op { @@ -77,8 +78,8 @@ void method_attr(const TROIPooling* op) { } // namespace roi_pooling namespace v0 { -template -std::vector shape_infer(const ROIPooling* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const ROIPooling* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); using namespace ov::util; @@ -92,19 +93,15 @@ std::vector shape_infer(const ROIPooling* op, const std::vector& roi_pooling::validate::scale_attr(op); roi_pooling::validate::method_attr(op); - TShape out_shape; + auto output_shapes = std::vector(1); + auto& out_shape = output_shapes.front(); out_shape.reserve(4); out_shape.emplace_back(rois_shape.rank().is_static() ? rois_shape[0] : dim::inf_bound); out_shape.emplace_back(feat_rank.is_static() ? feat_shape[1] : dim::inf_bound); std::copy(op->get_output_roi().cbegin(), op->get_output_roi().cend(), std::back_inserter(out_shape)); - return {out_shape}; -} - -template -void shape_infer(const ROIPooling* op, const std::vector& input_shapes, std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); + return output_shapes; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/roll_shape_inference.hpp b/src/core/shape_inference/include/roll_shape_inference.hpp index c985b2abff0..9ef3bd1fb48 100644 --- a/src/core/shape_inference/include/roll_shape_inference.hpp +++ b/src/core/shape_inference/include/roll_shape_inference.hpp @@ -12,10 +12,10 @@ namespace ov { namespace op { namespace v7 { -template -std::vector shape_infer(const Roll* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const Roll* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); const auto& data_pshape = input_shapes[0]; @@ -39,7 +39,7 @@ std::vector shape_infer(const Roll* op, "Axes must be a scalar or 1D tensor."); if (data_pshape.rank().is_static()) { - if (const auto& axes = get_input_const_data_as(op, 2, constant_data)) { + if (const auto axes = get_input_const_data_as(op, 2, ta)) { OPENVINO_SUPPRESS_DEPRECATED_START ov::normalize_axes(op, data_pshape.size(), *axes); OPENVINO_SUPPRESS_DEPRECATED_END @@ -48,15 +48,6 @@ std::vector shape_infer(const Roll* op, return {data_pshape}; } - -template -void shape_infer(const Roll* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = shape_infer(op, input_shapes, constant_data); -} - } // namespace v7 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/scatter_elements_update_shape_inference.hpp b/src/core/shape_inference/include/scatter_elements_update_shape_inference.hpp index a8ea0173cda..d434afa7b34 100644 --- a/src/core/shape_inference/include/scatter_elements_update_shape_inference.hpp +++ b/src/core/shape_inference/include/scatter_elements_update_shape_inference.hpp @@ -9,13 +9,12 @@ #include "utils.hpp" namespace ov { - namespace op { -namespace util { -template -std::vector shape_infer(const util::ScatterElementsUpdateBase* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { + +template > +std::vector shape_infer(const util::ScatterElementsUpdateBase* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); const auto& data_shape = input_shapes[0]; @@ -51,7 +50,7 @@ std::vector shape_infer(const util::ScatterElementsUpdateBase* op, updates_shape); if (data_shape.rank().is_static()) { - if (const auto axis_input = get_input_const_data_as(op, 3, constant_data)) { + if (const auto axis_input = get_input_const_data_as(op, 3, ta)) { OPENVINO_SUPPRESS_DEPRECATED_START ov::normalize_axis(op, (*axis_input)[0], data_rank); OPENVINO_SUPPRESS_DEPRECATED_END @@ -59,23 +58,13 @@ std::vector shape_infer(const util::ScatterElementsUpdateBase* op, } return {data_shape}; } -} // namespace util -namespace v3 { -template -void shape_infer(const ScatterElementsUpdate* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = util::shape_infer(op, input_shapes, constant_data); -} -} // namespace v3 + namespace v12 { -template -void shape_infer(const ScatterElementsUpdate* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = util::shape_infer(op, input_shapes, constant_data); +template > +std::vector shape_infer(const ScatterElementsUpdate* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + return ov::op::shape_infer(op, input_shapes, ta); } } // namespace v12 } // namespace op diff --git a/src/core/shape_inference/include/scatter_nd_base_shape_inference.hpp b/src/core/shape_inference/include/scatter_nd_base_shape_inference.hpp index 01aa4516380..71778e11511 100644 --- a/src/core/shape_inference/include/scatter_nd_base_shape_inference.hpp +++ b/src/core/shape_inference/include/scatter_nd_base_shape_inference.hpp @@ -11,8 +11,8 @@ namespace ov { namespace op { -template -std::vector shape_infer(const util::ScatterNDBase* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const util::ScatterNDBase* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); const auto& inputs_shape = input_shapes[util::ScatterNDBase::INPUTS]; const auto& indices_shape = input_shapes[util::ScatterNDBase::INDICES]; @@ -65,12 +65,5 @@ std::vector shape_infer(const util::ScatterNDBase* op, const std::vector return {inputs_shape}; } - -template -void shape_infer(const util::ScatterNDBase* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/select_shape_inference.hpp b/src/core/shape_inference/include/select_shape_inference.hpp index 04b87e8d229..9c40baab5dc 100644 --- a/src/core/shape_inference/include/select_shape_inference.hpp +++ b/src/core/shape_inference/include/select_shape_inference.hpp @@ -6,41 +6,46 @@ #include +#include "utils.hpp" + namespace ov { namespace op { namespace v1 { -template -void shape_infer(const Select* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 3 && output_shapes.size() == 1); +template > +std::vector shape_infer(const Select* op, const std::vector& input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 3); const auto& broadcast_spec = op->get_auto_broadcast(); - auto& result_shape = output_shapes[0]; + TRShape result_shape; if (broadcast_spec.m_type == op::AutoBroadcastType::PDPD) { result_shape = input_shapes[1]; // 'then' tensor // in PDPD type, Broadcast-merging 'else' into 'then' one way not each other. NODE_VALIDATION_CHECK(op, - T::broadcast_merge_into(result_shape, input_shapes[2], broadcast_spec), + TRShape::broadcast_merge_into(result_shape, input_shapes[2], broadcast_spec), "'Else' tensor shape is not broadcastable."); NODE_VALIDATION_CHECK(op, - T::broadcast_merge_into(result_shape, input_shapes[0], broadcast_spec), + TRShape::broadcast_merge_into(result_shape, input_shapes[0], broadcast_spec), "'Cond' tensor shape is not broadcastable."); } else { result_shape = input_shapes[2]; for (int input_port = 1; input_port >= 0; input_port--) { if (broadcast_spec.m_type == op::AutoBroadcastType::NONE) { NODE_VALIDATION_CHECK(op, - T::merge_into(result_shape, input_shapes[input_port]), + TRShape::merge_into(result_shape, input_shapes[input_port]), "Argument shapes are inconsistent."); } else if (broadcast_spec.m_type == op::AutoBroadcastType::NUMPY) { - NODE_VALIDATION_CHECK(op, - T::broadcast_merge_into(result_shape, input_shapes[input_port], broadcast_spec), - "Argument shapes are inconsistent."); + NODE_VALIDATION_CHECK( + op, + TRShape::broadcast_merge_into(result_shape, input_shapes[input_port], broadcast_spec), + "Argument shapes are inconsistent."); } else { NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification"); } } } + + return {result_shape}; } } // namespace v1 diff --git a/src/core/shape_inference/include/shape_nodes.hpp b/src/core/shape_inference/include/shape_nodes.hpp index 9dd89337700..eaa7e5ba0f0 100644 --- a/src/core/shape_inference/include/shape_nodes.hpp +++ b/src/core/shape_inference/include/shape_nodes.hpp @@ -10,34 +10,33 @@ #include "utils.hpp" -template -void shape_infer(const ov::opset1::Reshape* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1); - std::vector output_pattern; - bool status = get_data_as_int64(1, op, output_pattern, constant_data); - NODE_VALIDATION_CHECK(op, status, "Shape inference lacks input data"); +template > +std::vector shape_infer(const ov::op::v1::Reshape* op, + const std::vector& input_shapes, + const ov::ITensorAccessor& ta = ov::make_tensor_accessor()) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); + auto output_pattern = ov::op::get_input_const_data_as(op, 1, ta); + NODE_VALIDATION_CHECK(op, output_pattern, "Shape inference lacks input data"); auto& input_shape = input_shapes[0]; OPENVINO_ASSERT(input_shape.is_static()); + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; - output_shape.resize(output_pattern.size()); + output_shape.resize(output_pattern->size()); auto output_rank = input_shapes[1].size() == 0 ? 0 : input_shapes[1][0]; if (output_rank == 0 && output_shape.size() != 0) { - output_pattern.clear(); - OPENVINO_ASSERT(output_pattern.size() == 1); - NODE_VALIDATION_CHECK(op, output_pattern[0] == 1, "The value of scalar shape pattern should be equal to 1!"); + output_pattern->clear(); + OPENVINO_ASSERT(output_pattern->size() == 1); + NODE_VALIDATION_CHECK(op, (*output_pattern)[0] == 1, "The value of scalar shape pattern should be equal to 1!"); } auto special_zero = op->get_special_zero(); size_t output_product(1); int64_t minus_one_idx = -1; - for (size_t i = 0; i < output_pattern.size(); ++i) { - if (output_pattern[i] == -1) { // resolving everything except -1 + for (size_t i = 0; i < output_pattern->size(); ++i) { + if ((*output_pattern)[i] == -1) { // resolving everything except -1 NODE_VALIDATION_CHECK(op, minus_one_idx == -1, "More than one element of output shape pattern has value of -1"); @@ -45,7 +44,7 @@ void shape_infer(const ov::opset1::Reshape* op, continue; } - auto pattern_dim = output_pattern[i]; + auto pattern_dim = (*output_pattern)[i]; if (pattern_dim == 0 && special_zero) { NODE_VALIDATION_CHECK(op, i < input_shape.size(), "'0' dimension is out of range"); output_shape[i] = input_shape[i]; @@ -59,7 +58,7 @@ void shape_infer(const ov::opset1::Reshape* op, } size_t input_product(1); for (size_t i = 0; i < input_shape.size(); ++i) { - if (i < output_pattern.size() && output_pattern[i] == 0 && special_zero) + if (i < output_pattern->size() && (*output_pattern)[i] == 0 && special_zero) continue; input_product = input_shape[i].get_length() * input_product; } @@ -81,7 +80,7 @@ void shape_infer(const ov::opset1::Reshape* op, } } - size_t zero_dims = std::count_if(output_pattern.begin(), output_pattern.end(), [](const int64_t& dim) { + size_t zero_dims = std::count_if(output_pattern->begin(), output_pattern->end(), [](const int64_t& dim) { return dim == 0; }); @@ -94,41 +93,44 @@ void shape_infer(const ov::opset1::Reshape* op, output_shape, " is incompatible with input shape ", input_shape); + + return output_shapes; } -template -inline void dynamic_shape(T& output_shape) { - OPENVINO_THROW("This code should be executed only for PartialShape class"); -} +namespace ov { +namespace op { +namespace shape_of { +template > +std::vector shape_infer(const Node* op, std::vector input_shapes) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); + const auto& input_shape = input_shapes[0]; + const auto& input_rank = input_shape.rank(); -template <> -inline void dynamic_shape(ov::PartialShape& output_shape) { - output_shape = ov::PartialShape::dynamic(); -} + auto output_shapes = std::vector(1); -template -void shape_of_shape_infer(const T& input_shape, T& output_shape) { - if (input_shape.rank().is_static()) { - const auto& rank = input_shape.size(); - if (rank) { - output_shape.resize(1); - output_shape[0] = rank; - } else { - output_shape.clear(); + if (input_rank.is_static()) { + if (input_shape.size()) { + output_shapes[0].emplace_back(input_shape.size()); } } else { - dynamic_shape(output_shape); + output_shapes[0] = PartialShape::dynamic(); } + return output_shapes; } +} // namespace shape_of -template -void shape_infer(const ov::opset1::ShapeOf* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); - shape_of_shape_infer(input_shapes[0], output_shapes[0]); +namespace v0 { +template > +std::vector shape_infer(const ShapeOf* op, const std::vector& input_shapes) { + return shape_of::shape_infer(op, input_shapes); } +} // namespace v0 -template -void shape_infer(const ov::opset3::ShapeOf* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 1 && output_shapes.size() == 1); - shape_of_shape_infer(input_shapes[0], output_shapes[0]); +namespace v3 { +template > +std::vector shape_infer(const ShapeOf* op, const std::vector& input_shapes) { + return shape_of::shape_infer(op, input_shapes); } +} // namespace v3 +} // namespace op +} // namespace ov diff --git a/src/core/shape_inference/include/shuffle_channels_shape_inference.hpp b/src/core/shape_inference/include/shuffle_channels_shape_inference.hpp index 5067f97224b..1d5c93abdcc 100644 --- a/src/core/shape_inference/include/shuffle_channels_shape_inference.hpp +++ b/src/core/shape_inference/include/shuffle_channels_shape_inference.hpp @@ -7,13 +7,14 @@ #include #include "openvino/core/validation_util.hpp" +#include "utils.hpp" namespace ov { namespace op { namespace v0 { -template -std::vector shape_infer(const ShuffleChannels* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const ShuffleChannels* op, const std::vector& input_shapes) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); const auto& group = op->get_group(); @@ -22,7 +23,7 @@ std::vector shape_infer(const ShuffleChannels* op, const std::vector(1, input_shape); + auto output_shapes = std::vector(1, input_shape); if (input_shape_rank.is_static()) { NODE_VALIDATION_CHECK(op, input_shape.size() >= 1, "The input tensor's shape is expected to be at least 1D."); @@ -43,14 +44,6 @@ std::vector shape_infer(const ShuffleChannels* op, const std::vector -void shape_infer(const ShuffleChannels* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} - } // namespace v0 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/slice_shape_inference.hpp b/src/core/shape_inference/include/slice_shape_inference.hpp index 4894b8d1686..83a4c4fef3d 100644 --- a/src/core/shape_inference/include/slice_shape_inference.hpp +++ b/src/core/shape_inference/include/slice_shape_inference.hpp @@ -41,12 +41,11 @@ struct AxesMap { namespace v8 { -template -void shape_infer(const Slice* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - using DimType = typename std::iterator_traits::value_type; +template > +std::vector shape_infer(const Slice* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + using DimType = typename T::value_type; const auto& num_of_inputs = input_shapes.size(); @@ -54,7 +53,6 @@ void shape_infer(const Slice* op, num_of_inputs == 4 || num_of_inputs == 5, "Slice has to have 4 or 5 inputs. Got: ", num_of_inputs); - NODE_VALIDATION_CHECK(op, output_shapes.size() == 1); const auto& input_shape = input_shapes[0]; const auto& input_rank = input_shape.rank(); @@ -88,17 +86,18 @@ void shape_infer(const Slice* op, start_shape.compatible(stop_shape) && start_shape.compatible(step_shape) && stop_shape.compatible(step_shape), "Slice `start`, `stop`, `step` inputs must have compatible shapes."); + auto output_shapes = std::vector(1); // it is not possible to define output shape if input data shape rank is undefined // even the lengths of begin, end, or strides are defined if (input_rank.is_dynamic()) { output_shapes[0] = PartialShape::dynamic(); - return; + return output_shapes; } // compute constant values of begin, end, and strides if possible - const auto start = slice::get_input_bounds(op, 1, constant_data); - const auto stop = slice::get_input_bounds(op, 2, constant_data); - const auto steps = get_input_const_data_as(op, 3, constant_data); + const auto start = get_input_bounds(op, 1, ta); + const auto stop = get_input_bounds(op, 2, ta); + const auto steps = get_input_const_data_as(op, 3, ta); slice::AxesMap axes_map; if (input_shapes.size() > 4) { @@ -106,7 +105,7 @@ void shape_infer(const Slice* op, input_shapes[4].compatible(start_shape), "Slice `axes` input must have compatible shape with `start`, `stop`, `step` inputs."); - if (auto axes = get_input_const_data_as(op, 4, constant_data)) { + if (auto axes = get_input_const_data_as(op, 4, ta)) { OPENVINO_SUPPRESS_DEPRECATED_START ov::normalize_axes(op, input_shape.rank().get_length(), *axes); OPENVINO_SUPPRESS_DEPRECATED_END @@ -150,6 +149,7 @@ void shape_infer(const Slice* op, out.emplace_back(0, input_dim.get_max_length()); } } + return output_shapes; } } // namespace v8 } // namespace op diff --git a/src/core/shape_inference/include/slice_shape_inference_utils.hpp b/src/core/shape_inference/include/slice_shape_inference_utils.hpp index a12adfe7f71..2797163e408 100644 --- a/src/core/shape_inference/include/slice_shape_inference_utils.hpp +++ b/src/core/shape_inference/include/slice_shape_inference_utils.hpp @@ -11,107 +11,6 @@ #include "validation_util.hpp" namespace ov { -namespace internal { -/** - * \brief Check if value of type T has got maximum value of type U. - * - * \tparam T Input value type - * \tparam U Type to get its minimum for comparision. Default same as T. - * - * \param value Input value. - * - * \return True if input value has got maximum value of type U otherwise false. - */ -template -constexpr bool is_max(const T& value) { - return std::numeric_limits::max() == value; -} - -/** - * \brief Check if value of type T has got minimum value of type U. - * - * \tparam T Input value type. - * \tparam U Type to get its minimum for comparision. Default same as T. - * - * \param value Input value. - * - * \return True if input value has got minimum value of type U otherwise false. - */ -template -constexpr bool is_min(const T& value) { - return std::numeric_limits::min() == value; -} -} // namespace internal - -namespace element { -/** - * \brief Check if value has got maximum value of ov::element::Type_t - * - * \tparam T Input value type. - * - * \param type ov::element type to get its maximum. - * \param value Input value for check. - * - * \return True if input value has got maximum number specified by ov::element type otherwise false. - */ -template -bool is_max_of(const element::Type_t& type, const T& value) { - switch (type) { - case element::i32: - return internal::is_max::value_type>(value); - case element::i64: - return internal::is_max::value_type>(value); - default: - return false; - } -} - -/** - * \brief Check if value has got minimum value of ov::element::Type_t - * - * \tparam T Input value type. - * - * \param type ov::element type to get its minimum. - * \param value Input value for check. - * - * \return True if input value has got minimum number specified by ov::element type otherwise false. - */ -template -bool is_min_of(const element::Type_t type, const T& value) { - switch (type) { - case element::i32: - return internal::is_min::value_type>(value); - case element::i64: - return internal::is_min::value_type>(value); - default: - return false; - } -} - -/** - * \brief Checks input value for element type maximum or minimum and return limit or value. - * - * \tparam T Type of input value. - * \tparam U Type of return value. Default same as T. - * - * \param type Type of ov::element::Type_t - * \param value Input value for check. - * - * \return If value is maximum or minimum get limit of U otherwise value as U. - */ -template -U get_value_or_limit_of(const element::Type_t& type, const T& value) { - if (is_min_of(type, value)) { - return std::numeric_limits::min(); - } else if (is_max_of(type, value)) { - return std::numeric_limits::max(); - } else { - return static_cast(value); - } -} - -} // namespace element - namespace op { namespace slice { @@ -135,14 +34,14 @@ inline int64_t get_sliced_value(const int64_t dim, const int64_t start, const in constexpr int64_t min_bound = 0; const auto& norm_dim = dim::is_inf_bound(dim) ? std::numeric_limits::max() : dim; - const auto is_norm_dim_max = ov::internal::is_max(norm_dim); + const auto is_norm_dim_max = ov::util::is_max(norm_dim); const auto is_start_lt_min_bound = start < min_bound; const auto are_bounds_diff_sign = is_start_lt_min_bound != (stop < 0); - const auto is_start_max = ov::internal::is_max(start); - const auto is_start_limit = is_start_max || ov::internal::is_min(start); - const auto is_stop_max = ov::internal::is_max(stop); + const auto is_start_max = ov::util::is_max(start); + const auto is_start_limit = is_start_max || ov::util::is_min(start); + const auto is_stop_max = ov::util::is_max(stop); const auto any_bound_max = is_start_max || is_stop_max; // Prepare bounds for sliced value calculation. int64_t lb, ub; @@ -187,21 +86,6 @@ inline int64_t get_sliced_value(const int64_t dim, const int64_t start, const in } } -// To get element type from constant or tensor. -inline element::Type get_input_const_element_type(const ov::Node* op, - size_t idx, - const std::map& constant_data = {}) { - if (constant_data.count(idx)) { - return constant_data.at(idx)->get_element_type(); - OPENVINO_SUPPRESS_DEPRECATED_START - } else if (const auto& constant = ov::get_constant_from_source(op->input_value(idx))) { - OPENVINO_SUPPRESS_DEPRECATED_END - return constant->get_element_type(); - } else { - return element::undefined; - } -} - using Bounds = std::pair; //!< Alias to dimension bounds for slice. /** @@ -243,58 +127,6 @@ constexpr bool is_ub_within_dim(const int64_t ub, const TDim& dim) { cmp::lt(ub, dim.get_max_length()); } -/** - * \brief Get the input bounds from constant input (constant map) or evaluate bunds - * and return them as vector of pairs (lower, upper). - * - * \tparam TShape Shape type. - * - * \param op Operator pointer. - * \param idx Input index. - * \param constant_data Map with constant data. - * - * \return Return vector of slice::Bounds. - */ -template > -std::unique_ptr get_input_bounds(const ov::Node* op, - size_t idx, - const std::map& constant_data) { - // Helper to create TResult from lowers and uppers. - const auto make_bounds_vec = - [](const element::Type& et, const std::vector& lowers, const std::vector& uppers) { - TResult out; - out.reserve(lowers.size()); - std::transform(lowers.begin(), - lowers.end(), - uppers.begin(), - std::back_inserter(out), - [&et](int64_t lb, int64_t ub) { - return std::make_pair(element::get_value_or_limit_of(et, lb), - element::get_value_or_limit_of(et, ub)); - }); - return out; - }; - - std::unique_ptr out; - if (auto lowers = op::get_input_const_data_as(op, idx, constant_data)) { - const auto& et = get_input_const_element_type(op, idx, constant_data); - out.reset(new TResult(make_bounds_vec(et, *lowers, *lowers))); - } else { - ov::Tensor lb, ub; - std::tie(lb, ub) = ov::evaluate_both_bounds(op->get_input_source_output(idx)); - - if (lb && ub) { - const auto& et = op->get_input_element_type(idx); - auto lowers = std::make_shared(lb.get_element_type(), lb.get_shape(), lb.data()) - ->cast_vector(); - auto uppers = std::make_shared(ub.get_element_type(), ub.get_shape(), ub.data()) - ->cast_vector(); - out.reset(new TResult(make_bounds_vec(et, lowers, uppers))); - } - } - return out; -} - /** * \brief Make sliced dimension for input dimension by step from start to stop bounds. * diff --git a/src/core/shape_inference/include/space_to_batch_shape_inference.hpp b/src/core/shape_inference/include/space_to_batch_shape_inference.hpp index 43005389430..9adae645222 100644 --- a/src/core/shape_inference/include/space_to_batch_shape_inference.hpp +++ b/src/core/shape_inference/include/space_to_batch_shape_inference.hpp @@ -7,19 +7,17 @@ #include #include "dimension_util.hpp" -#include "openvino/core/validation_util.hpp" #include "openvino/op/space_to_batch.hpp" -#include "openvino/opsets/opset2.hpp" #include "utils.hpp" namespace ov { namespace op { namespace v1 { -template -std::vector shape_infer(const SpaceToBatch* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const SpaceToBatch* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { using namespace ov::util; using TVal = typename TShape::value_type::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); @@ -29,10 +27,10 @@ std::vector shape_infer(const SpaceToBatch* op, const auto& pads_begin_shape = input_shapes[2]; const auto& pads_end_shape = input_shapes[3]; - auto inputs_same_ps = pads_begin_shape; + auto inputs_same_ps = static_cast(pads_begin_shape); NODE_VALIDATION_CHECK( op, - TShape::merge_into(inputs_same_ps, pads_end_shape) && TShape::merge_into(inputs_same_ps, block_shape), + TRShape::merge_into(inputs_same_ps, pads_end_shape) && TRShape::merge_into(inputs_same_ps, block_shape), "block_shape, pads_begin and pads_end inputs must have the same shape. Got: ", block_shape, ", ", @@ -54,10 +52,10 @@ std::vector shape_infer(const SpaceToBatch* op, data_rank_size, ")"); - TShape out_shape; + TRShape out_shape; out_shape.reserve(data_rank_size); - auto blocks = get_input_const_data_as(op, 1, constant_data); + auto blocks = get_input_const_data_as(op, 1, ta); if (blocks) { TVal block_prod = std::accumulate(begin(*blocks), end(*blocks), int64_t(1), std::multiplies()); out_shape.push_back(data_shape[0] * block_prod); @@ -65,13 +63,13 @@ std::vector shape_infer(const SpaceToBatch* op, out_shape.emplace_back(dim::inf_bound); } - std::vector pads_begin, pads_end; - if (blocks && get_data_as_int64(2, op, pads_begin, constant_data) && - get_data_as_int64(3, op, pads_end, constant_data)) { + auto pads_begin = get_input_const_data_as(op, 2, ta); + auto pads_end = get_input_const_data_as(op, 3, ta); + if (blocks && pads_begin && pads_end) { for (auto idx = spatial_dim_offset; idx < data_rank_size; ++idx) { NODE_VALIDATION_CHECK(op, (*blocks)[idx] > 0, "block_shape values must be greater than 0"); - const auto padded_dim = data_shape[idx] + static_cast(pads_begin[idx] + pads_end[idx]); + const auto padded_dim = data_shape[idx] + static_cast((*pads_begin)[idx] + (*pads_end)[idx]); const auto divisor = static_cast((*blocks)[idx]); if (static_cast(padded_dim.get_max_length()) == dim::inf_bound) { @@ -91,15 +89,6 @@ std::vector shape_infer(const SpaceToBatch* op, return {PartialShape::dynamic()}; } } - -template -void shape_infer(const SpaceToBatch* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = shape_infer(op, input_shapes, constant_data); -} - } // namespace v1 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/space_to_depth_shape_inference.hpp b/src/core/shape_inference/include/space_to_depth_shape_inference.hpp index 7fff113b4d5..0f0bbfede0c 100644 --- a/src/core/shape_inference/include/space_to_depth_shape_inference.hpp +++ b/src/core/shape_inference/include/space_to_depth_shape_inference.hpp @@ -14,8 +14,8 @@ namespace ov { namespace op { namespace v0 { -template -std::vector shape_infer(const ov::op::v0::SpaceToDepth* op, const std::vector& input_shapes) { +template > +std::vector shape_infer(const ov::op::v0::SpaceToDepth* op, const std::vector& input_shapes) { using TVal = typename TShape::value_type::value_type; NODE_VALIDATION_CHECK(op, input_shapes.size() == 1); @@ -31,7 +31,7 @@ std::vector shape_infer(const ov::op::v0::SpaceToDepth* op, const std::v const auto& block_size = op->get_block_size(); NODE_VALIDATION_CHECK(op, block_size > 0, "The block size must be greater than 0 ", block_size); - auto out_shape = data_shape; + TRShape out_shape = data_shape; out_shape[1] *= static_cast(std::pow(block_size, data_shape.size() - spatial_dim_offset)); const auto divisor = static_cast(block_size); @@ -44,14 +44,6 @@ std::vector shape_infer(const ov::op::v0::SpaceToDepth* op, const std::v return {PartialShape::dynamic()}; } } - -template -void shape_infer(const ov::op::v0::SpaceToDepth* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - output_shapes = shape_infer(op, input_shapes); -} - } // namespace v0 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/split_shape_inference.hpp b/src/core/shape_inference/include/split_shape_inference.hpp index 103415e4427..5ad007b79cd 100644 --- a/src/core/shape_inference/include/split_shape_inference.hpp +++ b/src/core/shape_inference/include/split_shape_inference.hpp @@ -4,9 +4,8 @@ #pragma once -#include -#include - +#include "openvino/core/validation_util.hpp" +#include "openvino/op/split.hpp" #include "utils.hpp" namespace ov { @@ -23,37 +22,33 @@ namespace v1 { * * \param op Split operator pointer. * \param input_shapes Split input shapes. - * \param output_shapes Split output shapes. - * \param constant_data Map of constant data. + * \param ta Tensor accessor to constant data. */ -template -void shape_infer(const Split* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { +template > +std::vector shape_infer(const Split* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { NODE_VALIDATION_CHECK(op, (input_shapes.size() == 2)); - output_shapes.clear(); - const auto& data_ps = input_shapes[0]; const auto& axis_ps = input_shapes[1]; NODE_VALIDATION_CHECK(op, axis_ps.rank().compatible(0), "'axis' input must be a scalar. Got: ", axis_ps); - auto each_output_shape = data_ps; + TRShape each_output_shape = data_ps; const auto data_rank = data_ps.rank(); - std::vector axes_values; + auto axes_values = get_input_const_data_as(op, 1, ta); const auto& num_splits = op->get_num_splits(); - if (get_data_as_int64(1, op, axes_values, constant_data) && data_rank.is_static()) { + if (axes_values && data_rank.is_static()) { NODE_VALIDATION_CHECK(op, - axes_values.size() == 1, + axes_values->size() == 1, "a scalar axis value is expected. Got: ", - axes_values.size(), + axes_values->size(), " axes"); OPENVINO_SUPPRESS_DEPRECATED_START - auto axis = ov::normalize_axis(op, axes_values[0], data_rank); + auto axis = ov::normalize_axis(op, (*axes_values)[0], data_rank); OPENVINO_SUPPRESS_DEPRECATED_END if (data_ps[axis].is_static()) { @@ -90,7 +85,7 @@ void shape_infer(const Split* op, each_output_shape = ov::PartialShape::dynamic(data_ps.rank()); } - output_shapes.resize(num_splits, each_output_shape); + return {num_splits, each_output_shape}; } } // namespace v1 diff --git a/src/core/shape_inference/include/squeeze_shape_inference.hpp b/src/core/shape_inference/include/squeeze_shape_inference.hpp index fa6c19a8732..774542b3edb 100644 --- a/src/core/shape_inference/include/squeeze_shape_inference.hpp +++ b/src/core/shape_inference/include/squeeze_shape_inference.hpp @@ -18,22 +18,20 @@ namespace v0 { * * \param op Squeeze operator pointer. * \param input_shapes Squeeze input shapes. - * \param output_shapes Output shapes result of squeeze shape inference. - * \param constant_data Map of constant data. + * \param ta Tensor accessor to constant data. */ -template -void shape_infer(const Squeeze* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - using DimType = typename std::iterator_traits::value_type; +template > +std::vector shape_infer(const Squeeze* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + using DimType = typename T::value_type; - NODE_VALIDATION_CHECK(op, output_shapes.size() == 1); const auto number_of_inputs = input_shapes.size(); OPENVINO_ASSERT(!input_shapes.empty()); const auto& arg_shape = input_shapes[0]; const auto& arg_rank = arg_shape.rank(); + auto output_shapes = std::vector(1); auto& output_shape = output_shapes[0]; std::unique_ptr> unique_axes; @@ -51,12 +49,12 @@ void shape_infer(const Squeeze* op, std::vector axes; if (arg_rank.is_static() && axes_shape.is_static()) { - if (get_data_as_int64(1, op, axes, constant_data)) { + if (auto axes = get_input_const_data_as(op, 1, ta)) { // The values of `axes` input are known OPENVINO_SUPPRESS_DEPRECATED_START - normalize_axes(op, arg_rank.get_length(), axes); + normalize_axes(op, arg_rank.get_length(), *axes); OPENVINO_SUPPRESS_DEPRECATED_END - unique_axes.reset(new std::set(axes.cbegin(), axes.cend())); + unique_axes.reset(new std::set(axes->cbegin(), axes->cend())); } else if (arg_rank.get_length() > 0 && shape_size(axes_shape.to_shape()) == 1) { // The `axes` input must be a Parameter with single element to ensure uniqueness of axes // only rank is deduced @@ -71,7 +69,7 @@ void shape_infer(const Squeeze* op, " doesn't contain squeezable dimension," " but axes input is expected to have one element."); output_shape = PartialShape::dynamic(arg_rank.get_length() - 1); - return; + return output_shapes; } } } else { @@ -122,6 +120,7 @@ void shape_infer(const Squeeze* op, } else { output_shape = PartialShape::dynamic(); } + return output_shapes; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/strided_slice_shape_inference.hpp b/src/core/shape_inference/include/strided_slice_shape_inference.hpp index 4b7a8c66955..2a870f158bf 100644 --- a/src/core/shape_inference/include/strided_slice_shape_inference.hpp +++ b/src/core/shape_inference/include/strided_slice_shape_inference.hpp @@ -14,15 +14,14 @@ namespace ov { namespace op { namespace v1 { -template -void shape_infer(const StridedSlice* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - using DimType = typename std::iterator_traits::value_type; +template > +std::vector shape_infer(const StridedSlice* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + using DimType = typename T::value_type; static constexpr std::array shape_names{"Begin", "End", "Strides"}; - NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 4) && output_shapes.size() == 1); + NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3 || input_shapes.size() == 4)); const auto& input_shape = input_shapes[0]; @@ -38,12 +37,12 @@ void shape_infer(const StridedSlice* op, const auto& begin_shape = input_shapes[1]; const auto& end_shape = input_shapes[2]; - + auto output_shapes = std::vector(1); // it is not possible to define output shape if input data shape rank is undefined // even the lengths of begin, end, or strides are defined if (input_shape.rank().is_dynamic()) { output_shapes[0] = ov::PartialShape::dynamic(); - return; + return output_shapes; } auto input_rank = input_shape.size(); @@ -59,12 +58,12 @@ void shape_infer(const StridedSlice* op, }; // compute constant values of begin, end, and strides if possible - const auto begin = slice::get_input_bounds(op, 1, constant_data); - const auto end = slice::get_input_bounds(op, 2, constant_data); + const auto begin = get_input_bounds(op, 1, ta); + const auto end = get_input_bounds(op, 2, ta); std::unique_ptr> strides; if (input_shapes.size() > 3) { - strides = get_input_const_data_as(op, 3, constant_data); + strides = get_input_const_data_as(op, 3, ta); } else if (begin) { // generate default strides strides.reset(new std::vector(begin->size(), 1)); @@ -90,7 +89,7 @@ void shape_infer(const StridedSlice* op, // if number of axes is undefined we cannot say about output rank if (number_axes < 0) { output_shapes[0] = ov::PartialShape::dynamic(); - return; + return output_shapes; } // collect indices of axes by which the shape needs to be changed @@ -196,6 +195,7 @@ void shape_infer(const StridedSlice* op, for (; input_shape_idx < input_shape.rank().get_length(); ++input_shape_idx) { out.push_back(input_shape[input_shape_idx]); } + return output_shapes; } } // namespace v1 } // namespace op diff --git a/src/core/shape_inference/include/topk_shape_inference.hpp b/src/core/shape_inference/include/topk_shape_inference.hpp index bb7fad47c30..8bd6ca9f39a 100644 --- a/src/core/shape_inference/include/topk_shape_inference.hpp +++ b/src/core/shape_inference/include/topk_shape_inference.hpp @@ -31,6 +31,7 @@ struct GetK { return static_cast(k); } }; +} // namespace util /** * \brief TopK shape inference * @@ -42,10 +43,10 @@ struct GetK { * * \return Vector of output shapes for */ -template -std::vector shape_infer(const util::TopKBase* op, - const std::vector& input_shapes, - const std::map& constant_data = {}) { +template > +std::vector shape_infer(const util::TopKBase* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { using TDim = typename TShape::value_type; using TDimValue = typename TDim::value_type; @@ -66,21 +67,21 @@ std::vector shape_infer(const util::TopKBase* op, const auto& k_shape = input_shapes[1]; NODE_SHAPE_INFER_CHECK(op, input_shapes, k_shape.rank().compatible(0), "The 'K' input must be a scalar."); - auto output_shape = input_shape; + TRShape output_shape = input_shape; if (input_shape.rank().is_static()) { OPENVINO_SUPPRESS_DEPRECATED_START const auto normalized_axis = ov::normalize_axis(op, op->get_provided_axis(), input_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END auto& dim_axis = output_shape[normalized_axis]; - if (auto k_as_shape = get_input_const_data_as_shape(op, 1, constant_data, GetK(op))) { - NODE_SHAPE_INFER_CHECK(op, - input_shapes, - k_as_shape->size() == 1, - "Only one value (scalar) should be provided as the 'K' input to TopK", - " (got ", - k_as_shape->size(), - " elements)."); + if (auto k_as_shape = + get_input_const_data_as_shape(op, 1, tensor_accessor, util::GetK(op))) { + NODE_VALIDATION_CHECK(op, + k_as_shape->size() == 1, + "Only one value (scalar) should be provided as the 'K' input to TopK", + " (got ", + k_as_shape->size(), + " elements)."); const auto& k = (*k_as_shape)[0]; if (k.is_static()) { @@ -105,39 +106,7 @@ std::vector shape_infer(const util::TopKBase* op, } } - return std::vector(2, output_shape); + return {2, output_shape}; } -} // namespace util - -namespace v1 { - -/** - * \brief TopK shape inference - * - * \tparam TShape Type of shape. - * - * \param op Pointer to TopK operator. - * \param input_shapes Input shapes of TopK. - * \param output_shapes Output shapes of TopK - * \param constant_data Map of constant data. Default empty. - */ -template -void shape_infer(const TopK* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = util::shape_infer(op, input_shapes, constant_data); -} -} // namespace v1 - -namespace v3 { -template -void shape_infer(const TopK* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map& constant_data = {}) { - output_shapes = util::shape_infer(op, input_shapes, constant_data); -} -} // namespace v3 } // namespace op } // namespace ov diff --git a/src/core/shape_inference/include/transpose_shape_inference.hpp b/src/core/shape_inference/include/transpose_shape_inference.hpp index c3c74d2c39d..46e41896023 100644 --- a/src/core/shape_inference/include/transpose_shape_inference.hpp +++ b/src/core/shape_inference/include/transpose_shape_inference.hpp @@ -21,8 +21,8 @@ namespace v1 { * * \return Output shape */ -template -T calc_output_shape(const Transpose* const op, const T& input_shape, std::vector& axes_order) { +template > +TRShape calc_output_shape(const Transpose* const op, const T& input_shape, std::vector& axes_order) { const auto output_rank = input_shape.size(); if (axes_order.empty()) { @@ -40,7 +40,7 @@ T calc_output_shape(const Transpose* const op, const T& input_shape, std::vector OPENVINO_SUPPRESS_DEPRECATED_END } - T output_shape; + TRShape output_shape; for (auto&& axis : axes_order) { output_shape.push_back(input_shape[axis]); } @@ -49,33 +49,32 @@ T calc_output_shape(const Transpose* const op, const T& input_shape, std::vector } /** - * \brief Do transpose inference on input and output shapes. + * \brief Do transpose shape inference on input and output shapes. * - * \tparam T Type of inference shapes. + * \tparam TShape Type of input shapes. + * \tparam TRShape Type of return shapes. * - * \param op Transpose operator pointer. - * \param input_shapes Input shapes of transpose. - * \param output_shapes Output shapes of transpose which be modified by inference. - * \param constant_data Map of constant data. + * \param op Transpose operator pointer. + * \param input_shapes Input shapes of transpose. + * \param tensor_accessor Accessor to constant data. */ -template -void shape_infer(const Transpose* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { +template > +std::vector shape_infer(const Transpose* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { const auto& input_shape = input_shapes[Transpose::ARG]; - auto& output_shape = output_shapes[Transpose::ARG_T]; - std::vector axes; - const auto has_order = get_data_as_int64(Transpose::ORDER, op, axes, constant_data); + const auto axes = get_input_const_data_as(op, Transpose::ORDER, tensor_accessor); - if (has_order && input_shape.rank().is_static()) { - output_shape = calc_output_shape(op, input_shape, axes); - } else if (has_order) { - output_shape = ov::PartialShape::dynamic(axes.size()); + auto output_shapes = std::vector(); + if (axes && input_shape.rank().is_static()) { + output_shapes.push_back(calc_output_shape(op, input_shape, *axes)); + } else if (axes) { + output_shapes.push_back(ov::PartialShape::dynamic(axes->size())); } else { - output_shape = ov::PartialShape::dynamic(input_shape.rank()); + output_shapes.push_back(ov::PartialShape::dynamic(input_shape.rank())); } + return output_shapes; } } // namespace v1 } // namespace op diff --git a/src/core/shape_inference/include/unsqueeze_shape_inference.hpp b/src/core/shape_inference/include/unsqueeze_shape_inference.hpp index 0ea9751f488..b66bb14a40a 100644 --- a/src/core/shape_inference/include/unsqueeze_shape_inference.hpp +++ b/src/core/shape_inference/include/unsqueeze_shape_inference.hpp @@ -20,23 +20,22 @@ void check_unsqueeze_axes_rank(const TOp* op, const Rank& rank) { OPENVINO_SUPPRESS_DEPRECATED_END } -template -void shape_infer(const Unsqueeze* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { - NODE_VALIDATION_CHECK(op, input_shapes.size() == 2 && output_shapes.size() == 1); +template > +std::vector shape_infer(const Unsqueeze* op, + const std::vector& input_shapes, + const ITensorAccessor& tensor_accessor = make_tensor_accessor()) { + NODE_VALIDATION_CHECK(op, input_shapes.size() == 2); check_unsqueeze_axes_rank(op, input_shapes[1].rank()); const auto& arg_shape = input_shapes[0]; + auto output_shapes = std::vector(1); auto& out_shape = output_shapes[0]; - std::vector axes_val; - const auto has_axes = get_data_as_int64(1, op, axes_val, constant_data); + const auto axes_val = get_input_const_data_as(op, 1, tensor_accessor); - if (has_axes && arg_shape.rank().is_static()) { - NODE_VALIDATION_CHECK(op, !axes_val.empty(), "'axes' input is mandatory"); + if (axes_val && arg_shape.rank().is_static()) { + NODE_VALIDATION_CHECK(op, !axes_val->empty(), "'axes' input is mandatory"); // Remove repeated axes on input - std::unordered_set tmp(axes_val.begin(), axes_val.end()); + std::unordered_set tmp(axes_val->begin(), axes_val->end()); std::vector unique_axes(tmp.begin(), tmp.end()); const auto expanded_rank = arg_shape.rank().get_length() + unique_axes.size(); @@ -69,6 +68,7 @@ void shape_infer(const Unsqueeze* op, } else { out_shape = ov::PartialShape::dynamic(); } + return output_shapes; } } // namespace v0 } // namespace op diff --git a/src/core/shape_inference/include/utils.hpp b/src/core/shape_inference/include/utils.hpp index d79f1aee460..836d511ce30 100644 --- a/src/core/shape_inference/include/utils.hpp +++ b/src/core/shape_inference/include/utils.hpp @@ -10,46 +10,10 @@ #include "element_visitor.hpp" #include "openvino/core/bound_evaluation_util.hpp" +#include "ov_optional.hpp" #include "shape_infer_type_utils.hpp" #include "tensor_data_accessor.hpp" -template -void copy_shape_infer(const OpType* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, - input_shapes.size() == 1 && output_shapes.size() == 1, - "Incorrect number of input/output shapes"); - output_shapes[0] = input_shapes[0]; -} - -template -void first_input_passthrough_infer(const OpType* op, - const std::vector& input_shapes, - std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, - output_shapes.size() == 1 && input_shapes.size() >= 1, - "Incorrect number of input and output shapes"); - output_shapes[0] = input_shapes[0]; -} - -template -void eltwise_shape_infer(const OpType* op, const std::vector& input_shapes, std::vector& output_shapes) { - NODE_VALIDATION_CHECK(op, - input_shapes.size() == 2 && output_shapes.size() == 1, - "Incorrect number of input/output shapes"); - auto output_shape = input_shapes[0]; - const auto& autob = op->get_autob(); - if (autob.m_type == ov::op::AutoBroadcastType::NONE) { - NODE_VALIDATION_CHECK(op, T::merge_into(output_shape, input_shapes[1]), "Argument shapes are inconsistent."); - } else if (autob.m_type == ov::op::AutoBroadcastType::NUMPY || autob.m_type == ov::op::AutoBroadcastType::PDPD) { - NODE_VALIDATION_CHECK(op, - T::broadcast_merge_into(output_shape, input_shapes[1], autob), - "Argument shapes are inconsistent."); - } else { - NODE_VALIDATION_CHECK(op, false, "Unsupported auto broadcast specification"); - } - output_shapes[0] = output_shape; -} - namespace ov { struct TensorTransform : element::NotSupported { @@ -87,7 +51,7 @@ TResult get_raw_data_as(const element::Type_t et, const void* const ptr, const s auto out_it = std::inserter(out, out.end()); using namespace ov::element; - IfTypeOf::apply( + IfTypeOf::apply( et, ptr, size, @@ -139,6 +103,106 @@ TResult get_tensor_data_as(const Tensor& t, UnaryOperation&& func) { std::forward(func)); } +namespace util { +/** + * \brief Check if value of type T has got maximum value of type U. + * + * \tparam T Input value type + * \tparam U Type to get its minimum for comparision. Default same as T. + * + * \param value Input value. + * + * \return True if input value has got maximum value of type U otherwise false. + */ +template +constexpr bool is_max(const T& value) { + return std::numeric_limits::max() == value; +} + +/** + * \brief Check if value of type T has got minimum value of type U. + * + * \tparam T Input value type. + * \tparam U Type to get its minimum for comparision. Default same as T. + * + * \param value Input value. + * + * \return True if input value has got minimum value of type U otherwise false. + */ +template +constexpr bool is_min(const T& value) { + return std::numeric_limits::min() == value; +} +} // namespace util + +namespace element { +/** + * \brief Check if value has got maximum value of ov::element::Type_t + * + * \tparam T Input value type. + * + * \param type ov::element type to get its maximum. + * \param value Input value for check. + * + * \return True if input value has got maximum number specified by ov::element type otherwise false. + */ +template +bool is_max_of(const element::Type_t& type, const T& value) { + switch (type) { + case element::i32: + return util::is_max::value_type>(value); + case element::i64: + return util::is_max::value_type>(value); + default: + return false; + } +} + +/** + * \brief Check if value has got minimum value of ov::element::Type_t + * + * \tparam T Input value type. + * + * \param type ov::element type to get its minimum. + * \param value Input value for check. + * + * \return True if input value has got minimum number specified by ov::element type otherwise false. + */ +template +bool is_min_of(const element::Type_t type, const T& value) { + switch (type) { + case element::i32: + return util::is_min::value_type>(value); + case element::i64: + return util::is_min::value_type>(value); + default: + return false; + } +} + +/** + * \brief Checks input value for element type maximum or minimum and return limit or value. + * + * \tparam T Type of input value. + * \tparam U Type of return value. Default same as T. + * + * \param type Type of ov::element::Type_t + * \param value Input value for check. + * + * \return If value is maximum or minimum get limit of U otherwise value as U. + */ +template +U get_value_or_limit_of(const element::Type_t& type, const T& value) { + if (is_min_of(type, value)) { + return std::numeric_limits::min(); + } else if (is_max_of(type, value)) { + return std::numeric_limits::max(); + } else { + return static_cast(value); + } +} +} // namespace element + namespace op { /** * \brief Get the operator's input const as pointer to vector of specified type. @@ -235,7 +299,7 @@ std::unique_ptr get_input_const_data_as(const ov::Node* op, * \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TDimValue &a)). * * \param op Pointer to operator. - * \param idx Operator input index. + * \param port Input port number. * \param tensor_accessor Tensor accessor object. * \param func Unary operation function object to apply in input data. * Default ov::utils::InTypeRange. @@ -244,127 +308,106 @@ std::unique_ptr get_input_const_data_as(const ov::Node* op, */ template > -std::unique_ptr get_input_const_data_as_shape(const ov::Node* op, - size_t idx, - const ITensorAccessor& tensor_accessor, - UnaryOperation&& func = ov::util::InTypeRange()) { + class UnaryOperation = ov::util::InTypeRange, + typename std::enable_if::value>::type* = nullptr> +ov::optional get_input_const_data_as_shape(const ov::Node* op, + size_t port, + const ITensorAccessor& tensor_accessor, + UnaryOperation&& func = ov::util::InTypeRange()) { + auto shape = ov::optional(); if (auto s = get_input_const_data_as(op, - idx, + port, tensor_accessor, std::forward(func))) { - return s; - } else { - PartialShape shape; - OPENVINO_SUPPRESS_DEPRECATED_START - if ((idx < op->get_input_size()) && ov::evaluate_as_partial_shape(op->input_value(idx), shape)) { - OPENVINO_SUPPRESS_DEPRECATED_END - return std::unique_ptr(new TShape(std::move(shape))); - } + shape = std::move(*s); } - return {}; + return shape; } -/** - * \brief Get the operator's input const as pointer to vector of specified type. - * - * The behaviour depends on shape type. The default output type is std::vector can be replace by other type - * which if is possible to construct it from constant data vector. - * - * \tparam TShape Shape type which enabled this version (not ov::PartialShape) - * \tparam TData Type use to cast input's data. - * \tparam TRes Result type which has got default type as std::vector. - * \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TData &a)). - * - * \param op Pointer to operator. - * \param idx Operator's input number. - * \param constant_data Map with constant. Default empty. - * \param func Unary operation function object. - * - * \return Pointer to constant data or nullptr if input has no constant data. - */ -template , class UnaryOperation = ov::util::Cast> -std::unique_ptr get_input_const_data_as(const ov::Node* op, - size_t idx, - const std::map& constant_data = {}, - UnaryOperation&& func = ov::util::Cast()) { - const auto tensor_accessor = make_tensor_accessor(constant_data); - return get_input_const_data_as(op, idx, tensor_accessor, std::forward(func)); -} - -/** - * \brief Get the input const data as shape object. - * - * The input data can be processed by unary operation. By default is validated and casted to shape's dimension type. - * - * \tparam TShape - * \tparam UnaryOperation Unary function object applied on data with signature (Ret f(const TDimValue &a)). - * - * \param op Pointer to operator. - * \param idx Operator input index. - * \param constant_data Map with constant data. Default empty. - * \param func Unary operation function object to apply in input data. - * Default ov::utils::InTypeRange. - * - * \return Unique pointer to shape created from input data. - */ template > -std::unique_ptr get_input_const_data_as_shape(const ov::Node* op, - size_t idx, - const std::map& constant_data = {}, - UnaryOperation&& func = ov::util::InTypeRange()) { - const auto tensor_accessor = make_tensor_accessor(constant_data); - return get_input_const_data_as_shape(op, - idx, - tensor_accessor, - std::forward(func)); + class UnaryOperation = ov::util::InTypeRange, + typename std::enable_if::value>::type* = nullptr> +ov::optional get_input_const_data_as_shape(const ov::Node* op, + size_t port, + const ITensorAccessor& tensor_accessor, + UnaryOperation&& func = ov::util::InTypeRange()) { + auto shape = ov::optional(); + if (auto t = tensor_accessor(port)) { + shape.emplace(get_tensor_data_as(t, std::forward(func))); + } else if (port < op->get_input_size()) { + PartialShape s; + if (auto c = ov::as_type_ptr(op->get_input_node_shared_ptr(port))) { + shape.emplace(get_raw_data_as(c->get_element_type(), + c->get_data_ptr(), + shape_size(c->get_shape()), + std::forward(func))); + OPENVINO_SUPPRESS_DEPRECATED_START + } else if (ov::evaluate_as_partial_shape(op->input_value(port), s)) { + OPENVINO_SUPPRESS_DEPRECATED_END + shape = std::move(s); + } + } + return shape; +} + +// To get element type from constant or tensor. +inline element::Type get_input_const_element_type(const ov::Node* const op, size_t port, const ITensorAccessor& ta) { + if (auto t = ta(port)) { + return t.get_element_type(); + OPENVINO_SUPPRESS_DEPRECATED_START + } else if (const auto& constant = ov::get_constant_from_source(op->input_value(port))) { + OPENVINO_SUPPRESS_DEPRECATED_END + return constant->get_element_type(); + } else { + return element::undefined; + } } /** - * \brief Get the input bounds from constant input (constant map) or evaluate bunds + * \brief Get the input bounds from constant input or try evaluate bunds * and return them as vector of pairs (lower, upper). * * \tparam TShape Shape type. * \tparam TData Bound value type. * - * \param op Operator pointer. - * \param idx Input index. - * \param constant_data Map with constant data. + * \param op Operator pointer. + * \param port Input port number. + * \param ta Tensor accessor to constant data. * - * \return Return vector of bounds as pair lower, upper. + * \return Return optional vector of bounds as pair lower, upper when evaluated successful. */ template >> -std::unique_ptr get_input_bounds(const ov::Node* op, - size_t idx, - const std::map& constant_data) { - const auto make_bound = [](TData lb, TData ub) -> typename TResult::value_type { - return {lb, ub}; +ov::optional get_input_bounds(const ov::Node* op, size_t port, const ITensorAccessor& ta) { + const auto make_bound = [](element::Type_t et) { + return [et](TData lb, TData ub) -> typename TResult::value_type { + return {element::get_value_or_limit_of(et, lb), element::get_value_or_limit_of(et, ub)}; + }; }; - if (auto lowers = op::get_input_const_data_as(op, idx, constant_data)) { - auto out = std::unique_ptr(new TResult); + ov::optional out; + + if (auto lowers = op::get_input_const_data_as(op, port, ta)) { + const auto& et = get_input_const_element_type(op, port, ta); + out.emplace(); out->reserve(lowers->size()); - std::transform(lowers->begin(), lowers->end(), lowers->begin(), std::back_inserter(*out), make_bound); - return out; + std::transform(lowers->cbegin(), lowers->cend(), lowers->begin(), std::back_inserter(*out), make_bound(et)); } else { - auto bounds = ov::evaluate_both_bounds(op->get_input_source_output(idx)); + auto bounds = ov::evaluate_both_bounds(op->get_input_source_output(port)); if (bounds.first && bounds.second) { + const auto& et = bounds.first.get_element_type(); constexpr auto cast = ov::util::Cast(); auto lowers = get_tensor_data_as(bounds.first, cast); auto uppers = get_tensor_data_as(bounds.second, cast); - auto out = std::unique_ptr(new TResult); + out.emplace(); out->reserve(lowers.size()); - std::transform(lowers.begin(), lowers.end(), uppers.begin(), std::back_inserter(*out), make_bound); - return out; + std::transform(lowers.begin(), lowers.end(), uppers.begin(), std::back_inserter(*out), make_bound(et)); } } - return {}; + return out; } - } // namespace op /** @@ -388,97 +431,18 @@ struct result_shape { using type = PartialShape; }; +/** + * @brief Get correct result shape for ov::Shape which is same type. + */ +template <> +struct result_shape { + using type = ov::Shape; +}; + template using result_shape_t = typename result_shape::type; } // namespace ov -// Helper to reduce duplicates of code for get_data_as_... specific type functions. -template -inline bool get_data_as(const ov::Node* op, - size_t idx, - std::vector& data_out, - const std::map& constant_data = {}) { - if (auto out = ov::op::get_input_const_data_as(op, idx, constant_data, ov::util::Cast())) { - data_out = std::move(*out); - return true; - } else { - return false; - } -} - -template -inline bool get_data_as_int64(size_t idx, - const ov::Node* op, - std::vector& axes_value, - const std::map& constant_data = {}) { - return get_data_as(op, idx, axes_value, constant_data); -} - -template -inline bool get_data_as_float(size_t idx, - const ov::Node* op, - std::vector& axes_value, - const std::map& constant_data = {}) { - return get_data_as(op, idx, axes_value, constant_data); -} - -/** - * \brief Get the operator's constant data as shape of type T. - * - * \note The constant data are get as size_t (Dimension value type for static shape). If pointed input is signed the - * output shape dimension can be wrongly interpreted. - * - * \tparam TShape Shape type. - * - * \param idx Operator's input index. - * \param op Pointer to operator. - * \param shape Output shape made from constant data. - * \param constant_data Map with constant tensors. Optional default empty. - * - * \return true If constant data acquired as shape otherwise throws NodeValidation exception. - */ -template -inline bool get_data_as_shape(size_t idx, - const ov::Node* op, - TShape& shape, - const std::map& constant_data = {}) { - using TDimValue = typename TShape::value_type::value_type; - shape = - std::move(*ov::op::get_input_const_data_as_shape(op, idx, constant_data, ov::util::Cast())); - return true; -} - -/** - * \brief Get the operator's constant data as ov::PartialShape. - * - * If data not get as constant then try evaluate this input as partial shape from input's bounds and labels. - * - * \note The constant data are get as int64_t. If pointed input is unsigned then output shape - * dimension can be wrongly interpreted. - * - * \param idx Operator's input index. - * \param op Pointer to operator. - * \param shape Output shape made from constant data. - * \param constant_data Map with constant tensors. Optional default empty. - * - * \return true If constant data acquired as shape otherwise throws NodeValidation exception. - */ -template <> -inline bool get_data_as_shape( - size_t idx, - const ov::Node* op, - ov::PartialShape& shape, - const std::map>& constant_data) { - if (constant_data.count(idx)) { - shape = ov::PartialShape(ov::opset1::Constant(constant_data.at(idx)).cast_vector()); - return true; - } else { - OPENVINO_SUPPRESS_DEPRECATED_START - return ov::evaluate_as_partial_shape(op->input_value(idx), shape); - OPENVINO_SUPPRESS_DEPRECATED_END - } -} - /** * @brief Check for valid quotient of dimension division. * diff --git a/src/core/shape_inference/include/variadic_split_shape_inference.hpp b/src/core/shape_inference/include/variadic_split_shape_inference.hpp index e0290269ab7..a80ec8302f3 100644 --- a/src/core/shape_inference/include/variadic_split_shape_inference.hpp +++ b/src/core/shape_inference/include/variadic_split_shape_inference.hpp @@ -4,30 +4,29 @@ #pragma once -#include -#include - +#include "openvino/core/validation_util.hpp" +#include "openvino/op/variadic_split.hpp" #include "utils.hpp" namespace ov { namespace op { namespace v1 { -template -void shape_infer(const VariadicSplit* op, - const std::vector& input_shapes, - std::vector& output_shapes, - const std::map>& constant_data = {}) { +template > +std::vector shape_infer(const VariadicSplit* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { constexpr bool is_dynamic_shape = std::is_base_of::value; NODE_VALIDATION_CHECK(op, (input_shapes.size() == 3)); - output_shapes.clear(); + auto output_shapes = std::vector(); + auto axis_pshape = input_shapes[1]; auto split_lengths_pshape = input_shapes[2]; NODE_VALIDATION_CHECK(op, - axis_pshape.rank().compatible(0) || axis_pshape.compatible({1}), + axis_pshape.rank().compatible(0) || axis_pshape.compatible(TRShape{1}), "Axis should be a scalar or of shape [1]. Got ", axis_pshape, " instead."); @@ -42,32 +41,31 @@ void shape_infer(const VariadicSplit* op, const auto num_outputs = split_lengths_pshape[0].get_length(); const auto& data_shape = input_shapes[0]; - std::vector axis_values; - std::vector split_lengths; - if (data_shape.rank().is_static() && get_data_as_int64(1, op, axis_values, constant_data)) { + auto axis_values = get_input_const_data_as(op, 1, ta); + if (data_shape.rank().is_static() && axis_values) { NODE_VALIDATION_CHECK(op, - axis_values.size() == 1, + axis_values->size() == 1, "a scalar axis value is expected. Got: ", - axis_values.size(), + axis_values->size(), " axes"); - const auto axis_val = axis_values[0]; + const auto axis_val = (*axis_values)[0]; // Adjust split axis in case of negatives OPENVINO_SUPPRESS_DEPRECATED_START const int64_t axis = ov::normalize_axis(op, axis_val, data_shape.rank()); OPENVINO_SUPPRESS_DEPRECATED_END - if (get_data_as_int64(2, op, split_lengths, constant_data)) { + if (auto split_lengths = get_input_const_data_as(op, 2, ta)) { // Adjust split lengths in case of negatives int64_t sum_of_splits = 0; int64_t negative_one_idx = -1; - for (size_t i = 0; i < split_lengths.size(); i++) { + for (size_t i = 0; i < split_lengths->size(); i++) { NODE_VALIDATION_CHECK(op, - split_lengths[i] >= -1, + (*split_lengths)[i] >= -1, "Invalid value ", - split_lengths[i], + (*split_lengths)[i], " in split lengths input. Should be >= -1."); - if (split_lengths[i] == -1) { + if ((*split_lengths)[i] == -1) { NODE_VALIDATION_CHECK(op, negative_one_idx == -1, "Cannot infer split with multiple -1 values at ", @@ -76,14 +74,14 @@ void shape_infer(const VariadicSplit* op, i); negative_one_idx = i; } else { - sum_of_splits += split_lengths[i]; + sum_of_splits += (*split_lengths)[i]; } } const auto dimension_at_axis = data_shape[axis]; if (negative_one_idx >= 0 && dimension_at_axis.is_static()) { - split_lengths[negative_one_idx] = dimension_at_axis.get_length() - sum_of_splits; - sum_of_splits += split_lengths[negative_one_idx]; + (*split_lengths)[negative_one_idx] = dimension_at_axis.get_length() - sum_of_splits; + sum_of_splits += (*split_lengths)[negative_one_idx]; } if (data_shape[axis].is_static()) { NODE_VALIDATION_CHECK(op, @@ -95,13 +93,13 @@ void shape_infer(const VariadicSplit* op, } for (uint64_t output = 0; output < static_cast(num_outputs); ++output) { - if (split_lengths.at(output) == -1) { - auto out_shape = data_shape; + if (split_lengths->at(output) == -1) { + TRShape out_shape = data_shape; out_shape[axis] = Dimension::dynamic(); output_shapes.push_back(out_shape); } else { - auto out_shape = data_shape; - out_shape[axis] = split_lengths.at(output); + TRShape out_shape = data_shape; + out_shape[axis] = split_lengths->at(output); output_shapes.push_back(out_shape); } } @@ -110,7 +108,7 @@ void shape_infer(const VariadicSplit* op, // case NODE_VALIDATION_CHECK(op, is_dynamic_shape, "Cannot infer static shape due to lack of split_lengths."); - auto out_shape = data_shape; + TRShape out_shape = data_shape; out_shape[axis] = Dimension::dynamic(); output_shapes.resize(num_outputs, out_shape); } @@ -123,6 +121,7 @@ void shape_infer(const VariadicSplit* op, // we don't even known the number of outputs in this case. // just leave output_shapes as empty. } + return output_shapes; } } // namespace v1 diff --git a/src/core/src/op/assign.cpp b/src/core/src/op/assign.cpp index 5ab1bc6e468..5d77b602b39 100644 --- a/src/core/src/op/assign.cpp +++ b/src/core/src/op/assign.cpp @@ -40,9 +40,8 @@ void op::v3::Assign::validate_and_infer_types() { } NODE_VALIDATION_CHECK(this, m_variable != nullptr, "Can't find variable with id = ", m_variable_id); } - std::vector output_shapes = {ov::PartialShape{}}; std::vector input_shapes = {input_shape}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, arg_t, output_shapes[0]); } @@ -67,10 +66,8 @@ op::v6::Assign::Assign(const Output& new_value, const std::shared_ptrupdate({get_input_partial_shape(0), get_input_element_type(0), m_variable->get_info().variable_id}); - std::vector output_shapes = {ov::PartialShape{}}; - std::vector input_shapes = {get_input_partial_shape(0)}; - shape_infer(this, input_shapes, output_shapes); - set_output_type(0, get_input_element_type(0), output_shapes[0]); + + set_output_type(0, get_input_element_type(0), get_input_partial_shape(0)); } shared_ptr op::v6::Assign::clone_with_new_inputs(const OutputVector& new_args) const { diff --git a/src/core/src/op/batch_to_space.cpp b/src/core/src/op/batch_to_space.cpp index 9fc1b82c4c7..716cff91431 100644 --- a/src/core/src/op/batch_to_space.cpp +++ b/src/core/src/op/batch_to_space.cpp @@ -183,7 +183,6 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, con OPENVINO_SUPPRESS_DEPRECATED_END if (outputs[0]->get_partial_shape().is_dynamic()) { - std::map constant_data; std::vector input_shapes; input_shapes.reserve(inputs.size()); @@ -192,10 +191,9 @@ bool ngraph::op::v1::BatchToSpace::evaluate(const HostTensorVector& outputs, con if (input_shapes.back().is_dynamic()) { return false; } - constant_data.emplace(i, inputs[i]); } - const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape(); + const auto output_shape = shape_infer(this, input_shapes, ov::make_tensor_accessor(inputs)).front().to_shape(); outputs[0]->set_element_type(inputs[0]->get_element_type()); outputs[0]->set_shape(output_shape); diff --git a/src/core/src/op/broadcast.cpp b/src/core/src/op/broadcast.cpp index 69a450beddc..5eaac6d2d05 100644 --- a/src/core/src/op/broadcast.cpp +++ b/src/core/src/op/broadcast.cpp @@ -154,7 +154,6 @@ void op::v3::Broadcast::validate_and_infer_types() { axes_et); } - std::vector output_shapes = {ov::PartialShape()}; std::vector input_shapes; const auto& arg_shape = get_input_partial_shape(0); const auto& target_shape = get_input_partial_shape(1); @@ -165,7 +164,7 @@ void op::v3::Broadcast::validate_and_infer_types() { input_shapes = {arg_shape, target_shape, axes_mapping}; } - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_input_is_relevant_to_shape(0); // arg - Result element type set_input_is_relevant_to_shape(1); // target_shape - Result shape @@ -282,9 +281,8 @@ void op::v1::Broadcast::validate_and_infer_types() { const auto& target_shape = get_input_partial_shape(1); const auto& axes_mapping = get_input_partial_shape(2); - std::vector output_shapes = {ov::PartialShape()}; std::vector input_shapes = {arg_shape, target_shape, axes_mapping}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_input_is_relevant_to_shape(0); // arg - Result element type set_input_is_relevant_to_shape(1); // target_shape - Result shape diff --git a/src/core/src/op/concat.cpp b/src/core/src/op/concat.cpp index eaf269984d6..b113eae0481 100644 --- a/src/core/src/op/concat.cpp +++ b/src/core/src/op/concat.cpp @@ -67,9 +67,7 @@ void op::Concat::validate_and_infer_types() { input_shapes.push_back(input_shape); } - std::vector output_shapes(1, PartialShape{}); - - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, inputs_et, output_shapes.front()); } diff --git a/src/core/src/op/ctc_greedy_decoder.cpp b/src/core/src/op/ctc_greedy_decoder.cpp index e1824821bc7..fe436fbea3e 100644 --- a/src/core/src/op/ctc_greedy_decoder.cpp +++ b/src/core/src/op/ctc_greedy_decoder.cpp @@ -25,9 +25,8 @@ void op::CTCGreedyDecoder::validate_and_infer_types() { const auto& seq_mask_pshape = get_input_partial_shape(1); const auto& input_et = get_input_element_type(0); - std::vector output_shapes = {ov::PartialShape{}}; std::vector input_shapes = {logits_pshape, seq_mask_pshape}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, input_et, output_shapes[0]); } diff --git a/src/core/src/op/detection_output.cpp b/src/core/src/op/detection_output.cpp index 4dd8e806f0b..76b1eb624ce 100644 --- a/src/core/src/op/detection_output.cpp +++ b/src/core/src/op/detection_output.cpp @@ -39,8 +39,7 @@ void ov::op::v0::DetectionOutput::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - auto output_shapes = std::vector(1); - shape_infer(this, input_shapes, output_shapes); + auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes[0]); } @@ -99,8 +98,7 @@ void ov::op::v8::DetectionOutput::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - auto output_shapes = std::vector(1); - shape_infer(this, input_shapes, output_shapes); + auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes[0]); } diff --git a/src/core/src/op/einsum.cpp b/src/core/src/op/einsum.cpp index 462c71b606b..2d752899038 100644 --- a/src/core/src/op/einsum.cpp +++ b/src/core/src/op/einsum.cpp @@ -193,9 +193,8 @@ void op::v7::Einsum::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - std::vector output_shapes = {ov::PartialShape::dynamic()}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, input_type_0, output_shapes[0]); } diff --git a/src/core/src/op/extractimagepatches.cpp b/src/core/src/op/extractimagepatches.cpp index 2aac59c2c93..a4178e73bca 100644 --- a/src/core/src/op/extractimagepatches.cpp +++ b/src/core/src/op/extractimagepatches.cpp @@ -29,8 +29,8 @@ op::v3::ExtractImagePatches::ExtractImagePatches(const Output& image, void op::v3::ExtractImagePatches::validate_and_infer_types() { OV_OP_SCOPE(v3_ExtractImagePatches_validate_and_infer_types); - std::vector input_shapes = {get_input_partial_shape(0)}, output_shapes = {PartialShape{}}; - shape_infer(this, input_shapes, output_shapes); + std::vector input_shapes = {get_input_partial_shape(0)}; + const auto output_shapes = shape_infer(this, input_shapes); if (output_shapes[0].is_dynamic()) set_input_is_relevant_to_shape(0); set_output_type(0, get_input_element_type(0), output_shapes[0]); diff --git a/src/core/src/op/eye.cpp b/src/core/src/op/eye.cpp index 97e3582e8b7..661cc4b1e12 100644 --- a/src/core/src/op/eye.cpp +++ b/src/core/src/op/eye.cpp @@ -139,16 +139,14 @@ bool ov::op::v9::Eye::evaluate(const ov::HostTensorVector& outputs, const ov::Ho diagonal_index = 0; } - std::map constant_data; std::vector input_shapes; input_shapes.reserve(inputs.size()); for (size_t i = 0; i < inputs.size(); ++i) { input_shapes.push_back(inputs[i]->get_partial_shape()); - constant_data.emplace(i, inputs[i]); } - const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape(); + const auto output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape(); outputs[0]->set_element_type(get_out_type()); outputs[0]->set_shape(output_shape); diff --git a/src/core/src/op/gather_elements.cpp b/src/core/src/op/gather_elements.cpp index cb519d2c9bc..f0bd0abc6c1 100644 --- a/src/core/src/op/gather_elements.cpp +++ b/src/core/src/op/gather_elements.cpp @@ -33,8 +33,7 @@ void op::v6::GatherElements::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - auto output_shapes = std::vector(1); - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, data_type, output_shapes[0]); } diff --git a/src/core/src/op/gather_nd.cpp b/src/core/src/op/gather_nd.cpp index 08412516902..72c9e0c3ae6 100644 --- a/src/core/src/op/gather_nd.cpp +++ b/src/core/src/op/gather_nd.cpp @@ -28,8 +28,8 @@ void op::v5::GatherND::validate_and_infer_types() { "The indices type is expected to be an integer type. Got: ", indices_type); - std::vector out_shapes(1); - shape_infer(this, {get_input_partial_shape(0), get_input_partial_shape(1)}, out_shapes); + const auto out_shapes = + shape_infer(this, std::vector{get_input_partial_shape(0), get_input_partial_shape(1)}); set_output_type(0, data_type, out_shapes[0]); } @@ -55,8 +55,8 @@ void op::v8::GatherND::validate_and_infer_types() { "The indices type is expected to be an integer type. Got: ", indices_type); - std::vector out_shapes(1); - shape_infer(this, {get_input_partial_shape(0), get_input_partial_shape(1)}, out_shapes); + const auto out_shapes = + shape_infer(this, std::vector{get_input_partial_shape(0), get_input_partial_shape(1)}); set_output_type(0, data_type, ov::PartialShape(out_shapes[0])); } diff --git a/src/core/src/op/generate_proposals.cpp b/src/core/src/op/generate_proposals.cpp index 9229e8486d5..7e40b0b6687 100644 --- a/src/core/src/op/generate_proposals.cpp +++ b/src/core/src/op/generate_proposals.cpp @@ -51,12 +51,11 @@ void op::v9::GenerateProposals::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, m_attrs.post_nms_count > 0, "Attribute post_nms_count must be larger than 0."); NODE_VALIDATION_CHECK(this, m_attrs.nms_eta == 1.0, "Attribute min_size must be 1.0."); - std::vector output_shapes = {PartialShape{}, PartialShape{}, PartialShape{}}; std::vector input_shapes = {get_input_partial_shape(0), get_input_partial_shape(1), get_input_partial_shape(2), get_input_partial_shape(3)}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); const auto& input_et = get_input_element_type(0); set_output_type(0, input_et, output_shapes[0]); diff --git a/src/core/src/op/grid_sample.cpp b/src/core/src/op/grid_sample.cpp index 9262f115ab9..b4fd20d8058 100644 --- a/src/core/src/op/grid_sample.cpp +++ b/src/core/src/op/grid_sample.cpp @@ -32,8 +32,10 @@ void op::v9::GridSample::validate_and_infer_types() { "The element type of the grid input tensor must be a floating point type."); } - std::vector out_shapes(1); - shape_infer(this, {get_input_partial_shape(0), get_input_partial_shape(1)}, out_shapes); + OPENVINO_SUPPRESS_DEPRECATED_START + const auto input_shapes = get_node_input_partial_shapes(*this); + OPENVINO_SUPPRESS_DEPRECATED_END + const auto out_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), out_shapes[0]); } diff --git a/src/core/src/op/gru_cell.cpp b/src/core/src/op/gru_cell.cpp index 4e82409fedd..10412d321d2 100644 --- a/src/core/src/op/gru_cell.cpp +++ b/src/core/src/op/gru_cell.cpp @@ -100,8 +100,7 @@ void op::v3::GRUCell::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - std::vector output_shapes{ov::PartialShape::dynamic(2)}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, result_et, output_shapes[0]); } diff --git a/src/core/src/op/gru_sequence.cpp b/src/core/src/op/gru_sequence.cpp index e5c0b0321a3..7f2e3541f34 100644 --- a/src/core/src/op/gru_sequence.cpp +++ b/src/core/src/op/gru_sequence.cpp @@ -61,11 +61,9 @@ void op::v5::GRUSequence::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - std::vector output_shapes = {ov::PartialShape::dynamic(4), ov::PartialShape::dynamic(3)}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); // Set output size, type and shape - set_output_size(2); set_output_type(0, result_et, output_shapes[0]); set_output_type(1, result_et, output_shapes[1]); } diff --git a/src/core/src/op/irdft.cpp b/src/core/src/op/irdft.cpp index bed3385e3cc..2c75478832f 100644 --- a/src/core/src/op/irdft.cpp +++ b/src/core/src/op/irdft.cpp @@ -42,7 +42,6 @@ void ov::op::v9::IRDFT::validate_and_infer_types() { validate_types(); - std::vector output_shapes = {ov::PartialShape()}; std::vector input_shapes; const auto& data = get_input_partial_shape(0); @@ -54,6 +53,6 @@ void ov::op::v9::IRDFT::validate_and_infer_types() { input_shapes = {data, axes, signal_size}; } - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes[0]); } diff --git a/src/core/src/op/lstm_cell.cpp b/src/core/src/op/lstm_cell.cpp index 1a6c46d7ad6..a2cdf536037 100644 --- a/src/core/src/op/lstm_cell.cpp +++ b/src/core/src/op/lstm_cell.cpp @@ -158,10 +158,9 @@ void op::v0::LSTMCell::validate_and_infer_types() { "Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not " "match."); - std::vector output_shapes = {ov::PartialShape{}, ov::PartialShape{}}; std::vector input_shapes = {x_pshape, ht_pshape, ct_pshape, w_pshape, r_pshape, b_pshape, p_pshape}; - shape_infer(this, input_shapes, output_shapes); + std::vector output_shapes = shape_infer(this, input_shapes); // Mark inputs which are relevant to output parameters set_input_is_relevant_to_shape(0); set_input_is_relevant_to_shape(1); @@ -176,13 +175,13 @@ void op::v0::LSTMCell::validate_and_infer_types() { Output op::v0::LSTMCell::get_default_bias_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), - Shape{s_gates_count * get_hidden_size()}, + Shape{lstm_cell::gates_count * get_hidden_size()}, vector{0.f})}; } Output op::v0::LSTMCell::get_default_peepholes_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), - Shape{s_peepholes_count * get_hidden_size()}, + Shape{lstm_cell::peepholes_count * get_hidden_size()}, vector{0.f})}; } @@ -352,10 +351,8 @@ void op::v4::LSTMCell::validate_and_infer_types() { "Element types for X, initial_hidden_state, initial_cell_state, W, R and B do not " "match."); - std::vector output_shapes = {ov::PartialShape{}, ov::PartialShape{}}; std::vector input_shapes = {x_pshape, ht_pshape, ct_pshape, w_pshape, r_pshape, b_pshape}; - shape_infer(this, input_shapes, output_shapes); - + std::vector output_shapes = shape_infer(this, input_shapes); // Mark inputs which are relevant to output parameters set_input_is_relevant_to_shape(0); set_input_is_relevant_to_shape(1); @@ -370,7 +367,7 @@ void op::v4::LSTMCell::validate_and_infer_types() { Output op::v4::LSTMCell::get_default_bias_input() const { return Output{op::v0::Constant::create(get_input_element_type(0), - Shape{s_gates_count * get_hidden_size()}, + Shape{lstm_cell::gates_count * get_hidden_size()}, vector{0.f})}; } diff --git a/src/core/src/op/matmul.cpp b/src/core/src/op/matmul.cpp index 512e0f6214f..7f627470e54 100644 --- a/src/core/src/op/matmul.cpp +++ b/src/core/src/op/matmul.cpp @@ -44,8 +44,7 @@ bool evaluate(const op::MatMul* op, const HostTensorPtr& arg0, const HostTensorP ov::Shape arg1_shape = arg1->get_shape(); std::vector input_shapes = {arg0_shape, arg1_shape}; - std::vector output_shapes = {ov::PartialShape{}}; - shape_infer(op, input_shapes, output_shapes); + std::vector output_shapes = shape_infer(op, input_shapes); ov::Shape output_shape = output_shapes[0].to_shape(); output->set_element_type(arg0->get_element_type()); @@ -119,7 +118,6 @@ void ngraph::op::v0::MatMul::validate_and_infer_types() { const auto &A_shape = get_input_partial_shape(0), B_shape = get_input_partial_shape(1); std::vector input_shapes = {A_shape, B_shape}; - std::vector output_shapes = {ov::PartialShape{}}; - shape_infer(this, input_shapes, output_shapes); + std::vector output_shapes = shape_infer(this, input_shapes); set_output_type(0, result_et, output_shapes[0]); } diff --git a/src/core/src/op/one_hot.cpp b/src/core/src/op/one_hot.cpp index 5c90ec2539b..58f96605afe 100644 --- a/src/core/src/op/one_hot.cpp +++ b/src/core/src/op/one_hot.cpp @@ -51,10 +51,9 @@ void op::v1::OneHot::validate_and_infer_types() { const auto& on_value_shape = get_input_partial_shape(2); const auto& off_value_shape = get_input_partial_shape(3); - std::vector input_shapes = {indices_shape, depth_shape, on_value_shape, off_value_shape}, - output_shapes = {PartialShape{}}; + std::vector input_shapes = {indices_shape, depth_shape, on_value_shape, off_value_shape}; resolve_axis(this); - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, on_value_et, output_shapes[0]); } diff --git a/src/core/src/op/range.cpp b/src/core/src/op/range.cpp index 77d11edcb28..9efdaaa3a7c 100644 --- a/src/core/src/op/range.cpp +++ b/src/core/src/op/range.cpp @@ -83,12 +83,11 @@ void op::v4::Range::validate_and_infer_types() { "'step' input scalar should be a numeric type. Got: ", get_input_element_type(2)); - std::vector result_shapes = {PartialShape::dynamic()}; std::vector input_shapes; for (size_t i = 0; i < get_input_size(); i++) input_shapes.push_back(get_input_partial_shape(i)); - op::v4::shape_infer(this, input_shapes, result_shapes); + const auto result_shapes = op::v4::shape_infer(this, input_shapes); set_output_type(0, m_output_type, result_shapes[0]); } @@ -356,12 +355,11 @@ void op::v0::Range::validate_and_infer_types() { if (result_et == element::Type_t::dynamic) { set_output_type(0, result_et, ov::PartialShape::dynamic(1)); } else { - std::vector result_shapes = {PartialShape::dynamic()}; std::vector input_shapes; for (size_t i = 0; i < get_input_size(); i++) input_shapes.push_back(get_input_partial_shape(i)); - op::v0::shape_infer(this, input_shapes, result_shapes); + const auto result_shapes = op::v0::shape_infer(this, input_shapes); set_output_type(0, result_et, result_shapes[0]); } diff --git a/src/core/src/op/rdft.cpp b/src/core/src/op/rdft.cpp index db10930e29a..9aa7babae15 100644 --- a/src/core/src/op/rdft.cpp +++ b/src/core/src/op/rdft.cpp @@ -42,7 +42,6 @@ void ov::op::v9::RDFT::validate_and_infer_types() { validate_types(); - std::vector output_shapes = {ov::PartialShape()}; std::vector input_shapes; const auto& data = get_input_partial_shape(0); @@ -54,6 +53,6 @@ void ov::op::v9::RDFT::validate_and_infer_types() { input_shapes = {data, axes, signal_size}; } - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes[0]); } diff --git a/src/core/src/op/read_value.cpp b/src/core/src/op/read_value.cpp index 84940c0c4cf..66e6d9875aa 100644 --- a/src/core/src/op/read_value.cpp +++ b/src/core/src/op/read_value.cpp @@ -4,8 +4,6 @@ #include "ngraph/op/read_value.hpp" -#include - #include "itt.hpp" #include "ngraph/op/util/variable_context.hpp" #include "ngraph/ops.hpp" @@ -22,19 +20,14 @@ op::v3::ReadValue::ReadValue(const Output& init_value, const std::string& void op::v3::ReadValue::validate_and_infer_types() { OV_OP_SCOPE(v3_ReadValue_validate_and_infer_types); auto arg_t = get_input_element_type(0); - auto input_shape = get_input_partial_shape(0); + const auto& input_shape = get_input_partial_shape(0); - std::vector output_shapes = {ov::PartialShape{}}; - std::vector input_shapes = {input_shape}; - shape_infer(this, input_shapes, output_shapes); - - const auto& output_shape = output_shapes[0]; - VariableInfo info = {output_shape, arg_t, m_variable_id}; + VariableInfo info = {input_shape, arg_t, m_variable_id}; if (m_variable == nullptr) m_variable = std::make_shared(info); else m_variable->update(info); - set_output_type(0, arg_t, output_shape); + set_output_type(0, arg_t, input_shape); } shared_ptr op::v3::ReadValue::clone_with_new_inputs(const OutputVector& new_args) const { @@ -58,13 +51,10 @@ op::v6::ReadValue::ReadValue(const Output& init_value, const shared_ptr output_shapes = {ov::PartialShape{}}; - std::vector input_shapes = {input_shape}; - shape_infer(this, input_shapes, output_shapes); - const auto& output_shape = output_shapes[0]; - NGRAPH_CHECK(m_variable, "Variable is not initialized."); - VariableInfo var_info = {output_shape, element::dynamic, m_variable->get_info().variable_id}; + const auto& input_shape = get_input_partial_shape(0); + + OPENVINO_ASSERT(m_variable, "Variable is not initialized."); + VariableInfo var_info = {input_shape, element::dynamic, m_variable->get_info().variable_id}; NODE_VALIDATION_CHECK(this, element::Type::merge(var_info.data_type, m_variable->get_info().data_type, arg_t), "Variables types are inconsistent."); @@ -72,7 +62,7 @@ void op::v6::ReadValue::validate_and_infer_types() { ov::PartialShape::merge_into(var_info.data_shape, m_variable->get_info().data_shape), "Variable shape and output shape are inconsistent."); m_variable->update(var_info); - set_output_type(0, arg_t, output_shape); + set_output_type(0, arg_t, input_shape); } shared_ptr op::v6::ReadValue::clone_with_new_inputs(const OutputVector& new_args) const { diff --git a/src/core/src/op/region_yolo.cpp b/src/core/src/op/region_yolo.cpp index 08a0a69a66f..8ed7948d19c 100644 --- a/src/core/src/op/region_yolo.cpp +++ b/src/core/src/op/region_yolo.cpp @@ -54,8 +54,7 @@ void op::RegionYolo::validate_and_infer_types() { "Type of input is expected to be a floating point type. Got: ", input_et); std::vector input_shapes = {get_input_partial_shape(0)}; - std::vector output_shapes = {ov::PartialShape{}}; - shape_infer(this, input_shapes, output_shapes); + std::vector output_shapes = shape_infer(this, input_shapes); set_output_type(0, input_et, output_shapes[0]); } diff --git a/src/core/src/op/reorg_yolo.cpp b/src/core/src/op/reorg_yolo.cpp index 086f635d91f..db68574c2f7 100644 --- a/src/core/src/op/reorg_yolo.cpp +++ b/src/core/src/op/reorg_yolo.cpp @@ -28,8 +28,7 @@ void op::ReorgYolo::validate_and_infer_types() { auto input_et = get_input_element_type(0); std::vector input_shapes = {get_input_partial_shape(0)}; - std::vector output_shapes = {ov::PartialShape{}}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, input_et, output_shapes[0]); } diff --git a/src/core/src/op/select.cpp b/src/core/src/op/select.cpp index 24e75b8370a..87a7ecba25d 100644 --- a/src/core/src/op/select.cpp +++ b/src/core/src/op/select.cpp @@ -43,9 +43,7 @@ void op::v1::Select::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - auto output_shapes = std::vector(1); - - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, result_et, output_shapes[0]); } diff --git a/src/core/src/op/slice.cpp b/src/core/src/op/slice.cpp index f7812f22ee3..efa941556ca 100644 --- a/src/core/src/op/slice.cpp +++ b/src/core/src/op/slice.cpp @@ -90,9 +90,8 @@ void op::v8::Slice::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - std::vector output_shapes = {ov::PartialShape::dynamic()}; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes.front()); } @@ -153,14 +152,12 @@ bool op::v8::Slice::evaluate(const HostTensorVector& outputs, const HostTensorVe OPENVINO_ASSERT(inputs[0]->get_partial_shape().is_static(), "Can't evaluate Slice elements without static HostTensor data shape."); - auto constant_data = std::map>{}; auto input_shapes = std::vector(); input_shapes.reserve(inputs.size()); for (size_t i = 0; i < inputs.size(); ++i) { auto&& tensor = inputs[i]; input_shapes.push_back(tensor->get_partial_shape()); - constant_data.emplace(i, tensor); } OPENVINO_SUPPRESS_DEPRECATED_START @@ -177,8 +174,7 @@ bool op::v8::Slice::evaluate(const HostTensorVector& outputs, const HostTensorVe } OPENVINO_SUPPRESS_DEPRECATED_END - auto output_shapes = std::vector(1); - shape_infer(this, input_shapes, output_shapes, constant_data); + const auto output_shapes = shape_infer(this, input_shapes, make_tensor_accessor(inputs)); OPENVINO_ASSERT(output_shapes.front().is_static(), "Can't calculate static output shape for Slice evaluation."); outputs[0]->set_shape(output_shapes.front().to_shape()); diff --git a/src/core/src/op/space_to_batch.cpp b/src/core/src/op/space_to_batch.cpp index b117a81ec2e..065abd234c9 100644 --- a/src/core/src/op/space_to_batch.cpp +++ b/src/core/src/op/space_to_batch.cpp @@ -78,7 +78,6 @@ bool ngraph::op::v1::SpaceToBatch::visit_attributes(ngraph::AttributeVisitor& vi bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVector& outputs, const HostTensorVector& inputs) const { if (outputs[0]->get_partial_shape().is_dynamic()) { - std::map constant_data; std::vector input_shapes; input_shapes.reserve(inputs.size()); @@ -87,10 +86,9 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto if (input_shapes.back().is_dynamic()) { return false; } - constant_data.emplace(i, inputs[i]); } - const auto output_shape = shape_infer(this, input_shapes, constant_data).front().to_shape(); + const auto output_shape = shape_infer(this, input_shapes, make_tensor_accessor(inputs)).front().to_shape(); outputs[0]->set_element_type(inputs[0]->get_element_type()); outputs[0]->set_shape(output_shape); diff --git a/src/core/src/op/split.cpp b/src/core/src/op/split.cpp index fb18079a29a..ef93ca3e51f 100644 --- a/src/core/src/op/split.cpp +++ b/src/core/src/op/split.cpp @@ -49,8 +49,7 @@ void op::v1::Split::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - std::vector output_shapes; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); for (size_t i = 0; i < m_num_splits; ++i) { set_output_type(i, get_input_element_type(0), output_shapes[i]); @@ -75,12 +74,10 @@ bool op::v1::Split::evaluate(const HostTensorVector& outputs, const HostTensorVe const auto& data_tensor = inputs[0]; const auto& axis_tensor = inputs[1]; - const auto constant_data = std::map>{{1, axis_tensor}}; const auto input_shapes = std::vector{data_tensor->get_partial_shape(), axis_tensor->get_partial_shape()}; - auto output_shapes = std::vector(); - shape_infer(this, input_shapes, output_shapes, constant_data); + auto output_shapes = shape_infer(this, input_shapes, make_tensor_accessor(inputs)); auto outputs_data = std::vector(m_num_splits); for (size_t i = 0; i < m_num_splits; ++i) { diff --git a/src/core/src/op/squeeze.cpp b/src/core/src/op/squeeze.cpp index 812cdc6ea69..992f279166d 100644 --- a/src/core/src/op/squeeze.cpp +++ b/src/core/src/op/squeeze.cpp @@ -34,8 +34,7 @@ void op::Squeeze::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - auto output_shapes = std::vector(1); - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes[0]); } @@ -65,16 +64,13 @@ bool op::v0::Squeeze::evaluate(const HostTensorVector& outputs, const HostTensor OPENVINO_SUPPRESS_DEPRECATED_END if (has_evaluate()) { - auto output_shapes = std::vector{outputs[0]->get_partial_shape()}; auto input_shapes = std::vector{inputs[0]->get_partial_shape()}; - auto constant_data = std::map>(); if (inputs.size() == 2) { input_shapes.push_back(inputs[1]->get_partial_shape()); - constant_data.emplace(1, inputs[1]); } - shape_infer(this, input_shapes, output_shapes, constant_data); + auto output_shapes = shape_infer(this, input_shapes, make_tensor_accessor(inputs)); auto out_shape = output_shapes[0].get_shape(); outputs[0]->set_shape(out_shape); diff --git a/src/core/src/op/strided_slice.cpp b/src/core/src/op/strided_slice.cpp index 63b8cb185fb..ea953bf14f6 100644 --- a/src/core/src/op/strided_slice.cpp +++ b/src/core/src/op/strided_slice.cpp @@ -158,9 +158,8 @@ void op::v1::StridedSlice::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - auto output_shapes = std::vector(1, PartialShape::dynamic()); - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes[0]); } diff --git a/src/core/src/op/topk.cpp b/src/core/src/op/topk.cpp index dae497b3f8f..5d2273ba283 100644 --- a/src/core/src/op/topk.cpp +++ b/src/core/src/op/topk.cpp @@ -114,8 +114,7 @@ bool TopK_evaluate(const ov::op::util::TopKBase* const node, const auto sort_type = node->get_sort_type(); const auto input_shapes = vector{inputs[0]->get_partial_shape(), inputs[1]->get_partial_shape()}; - const auto constant_data = map{{1, inputs[1]}}; - auto output_shape = shape_infer(node, input_shapes, constant_data).front().to_shape(); + auto output_shape = shape_infer(node, input_shapes, ov::make_tensor_accessor(inputs)).front().to_shape(); if (output_shape[axis] == 0) { // the kernel can't handle K (output_shape[axis]) equal 0, use arg_shape[axis] instead. diff --git a/src/core/src/op/transpose.cpp b/src/core/src/op/transpose.cpp index 6b2ad190dd4..faf6590d564 100644 --- a/src/core/src/op/transpose.cpp +++ b/src/core/src/op/transpose.cpp @@ -42,11 +42,8 @@ void op::v1::Transpose::validate_and_infer_types() { set_input_is_relevant_to_shape(ORDER); std::vector input_shapes{arg_shape, input_order_shape}; - std::vector output_shapes(OUT_COUNT, ov::PartialShape{}); + std::vector output_shapes = shape_infer(this, input_shapes); - shape_infer(this, input_shapes, output_shapes); - - set_output_size(output_shapes.size()); set_output_type(ARG, get_input_element_type(ARG), output_shapes[ARG_T]); } diff --git a/src/core/src/op/unsqueeze.cpp b/src/core/src/op/unsqueeze.cpp index f55664b0700..ea71ef20562 100644 --- a/src/core/src/op/unsqueeze.cpp +++ b/src/core/src/op/unsqueeze.cpp @@ -27,9 +27,7 @@ void op::v0::Unsqueeze::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - auto output_shapes = std::vector(1); - - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes[0]); } diff --git a/src/core/src/op/util/elementwise_args.cpp b/src/core/src/op/util/elementwise_args.cpp index f74803a13f0..5252f511c35 100644 --- a/src/core/src/op/util/elementwise_args.cpp +++ b/src/core/src/op/util/elementwise_args.cpp @@ -4,8 +4,8 @@ #include "ngraph/op/util/elementwise_args.hpp" +#include "eltwise_shape_inference.hpp" #include "ngraph/op/util/binary_elementwise_arithmetic.hpp" -#include "utils.hpp" std::tuple ov::op::util::validate_and_infer_elementwise_args(Node* node) { OPENVINO_ASSERT(node != nullptr, "Node is empty! Cannot validate eltwise arguments."); @@ -28,8 +28,7 @@ std::tuple ov::op::util::validate_and_infer const auto& A_shape = node->get_input_partial_shape(0); const auto& B_shape = node->get_input_partial_shape(1); std::vector input_shapes = {A_shape, B_shape}; - std::vector output_shapes = {ov::PartialShape{}}; - eltwise_shape_infer(node, input_shapes, output_shapes); + const auto output_shapes = ov::op::eltwise_shape_infer(node, input_shapes); return std::make_tuple(result_et, output_shapes[0]); } diff --git a/src/core/src/op/util/fft_base.cpp b/src/core/src/op/util/fft_base.cpp index 7d615721adc..b97c354e79c 100644 --- a/src/core/src/op/util/fft_base.cpp +++ b/src/core/src/op/util/fft_base.cpp @@ -53,7 +53,6 @@ void ov::op::util::FFTBase::validate_and_infer_types() { validate_types(); - std::vector output_shapes = {ov::PartialShape()}; std::vector input_shapes; const auto& data = get_input_partial_shape(0); @@ -65,6 +64,6 @@ void ov::op::util::FFTBase::validate_and_infer_types() { input_shapes = {data, axes, signal_size}; } - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, get_input_element_type(0), output_shapes[0]); } diff --git a/src/core/src/op/util/gather_base.cpp b/src/core/src/op/util/gather_base.cpp index 876fedf3bc0..1197059fbee 100644 --- a/src/core/src/op/util/gather_base.cpp +++ b/src/core/src/op/util/gather_base.cpp @@ -32,9 +32,8 @@ void ov::op::util::GatherBase::validate_and_infer_types() { const auto& data_pshape = get_input_partial_shape(0); const auto& indices_pshape = get_input_partial_shape(1); const auto& axis_pshape = get_input_partial_shape(2); - std::vector input_shapes = {data_pshape, indices_pshape, axis_pshape}, - output_shapes = {PartialShape{}}; - shape_infer(this, input_shapes, output_shapes, {}); + std::vector input_shapes = {data_pshape, indices_pshape, axis_pshape}; + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, data_type, output_shapes[0]); } diff --git a/src/core/src/op/util/gather_nd_base.cpp b/src/core/src/op/util/gather_nd_base.cpp index a6fff029daf..5276273699f 100644 --- a/src/core/src/op/util/gather_nd_base.cpp +++ b/src/core/src/op/util/gather_nd_base.cpp @@ -23,17 +23,12 @@ ov::op::util::GatherNDBase::GatherNDBase(const Output& data, const Output< void ov::op::util::GatherNDBase::validate_inputs_and_infer_shape() { // check types of input tensors - const auto& data_type = get_input_element_type(0); const auto& indices_type = get_input_element_type(1); NODE_VALIDATION_CHECK(this, indices_type.is_integral_number(), "The indices type is expected to be an integer type. Got: ", indices_type); - - std::vector in_shapes{get_input_partial_shape(0), get_input_partial_shape(1)}; - std::vector out_shapes = op::gather_nd::gather_nd_base_shape_infer(this, in_shapes); - set_output_type(0, data_type, out_shapes[0]); } bool ov::op::util::GatherNDBase::visit_attributes(AttributeVisitor& visitor) { diff --git a/src/core/src/op/util/pad_base.cpp b/src/core/src/op/util/pad_base.cpp index a92c7da374a..51f7b45b810 100644 --- a/src/core/src/op/util/pad_base.cpp +++ b/src/core/src/op/util/pad_base.cpp @@ -99,7 +99,7 @@ void op::util::PadBase::validate_and_infer_types() { ")."); OPENVINO_SUPPRESS_DEPRECATED_START - const auto output_shapes = op::util::shape_infer(this, get_node_input_partial_shapes(*this)); + const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this)); OPENVINO_SUPPRESS_DEPRECATED_END set_output_type(0, result_et, output_shapes[0]); } diff --git a/src/core/src/op/util/scatter_nd_base.cpp b/src/core/src/op/util/scatter_nd_base.cpp index f4d70e1d2c9..766ab15cf31 100644 --- a/src/core/src/op/util/scatter_nd_base.cpp +++ b/src/core/src/op/util/scatter_nd_base.cpp @@ -47,9 +47,7 @@ void ov::op::util::ScatterNDBase::validate_and_infer_types() { const auto& indices = get_input_partial_shape(1); const auto& updates = get_input_partial_shape(2); - std::vector output_shapes = {ov::PartialShape()}; std::vector input_shapes = {inputs, indices, updates}; - - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); set_output_type(0, outputs_et, output_shapes[0]); } diff --git a/src/core/src/op/util/topk_base.cpp b/src/core/src/op/util/topk_base.cpp index 7461ef977e8..0327e1ac0d2 100644 --- a/src/core/src/op/util/topk_base.cpp +++ b/src/core/src/op/util/topk_base.cpp @@ -46,7 +46,7 @@ void ov::op::util::TopKBase::validate_and_infer_types() { set_axis(get_input_partial_shape(0).rank(), get_provided_axis()); OPENVINO_SUPPRESS_DEPRECATED_START - const auto output_shapes = op::util::shape_infer(this, get_node_input_partial_shapes(*this)); + const auto output_shapes = shape_infer(this, get_node_input_partial_shapes(*this)); OPENVINO_SUPPRESS_DEPRECATED_END set_output_type(0, get_input_element_type(0), output_shapes[0]); diff --git a/src/core/src/op/variadic_split.cpp b/src/core/src/op/variadic_split.cpp index d09ab3c4047..4b27b2dfbae 100644 --- a/src/core/src/op/variadic_split.cpp +++ b/src/core/src/op/variadic_split.cpp @@ -37,8 +37,7 @@ void ngraph::op::v1::VariadicSplit::validate_and_infer_types() { OPENVINO_SUPPRESS_DEPRECATED_START const auto input_shapes = get_node_input_partial_shapes(*this); OPENVINO_SUPPRESS_DEPRECATED_END - std::vector output_shapes; - shape_infer(this, input_shapes, output_shapes); + const auto output_shapes = shape_infer(this, input_shapes); const auto& data_type = get_input_element_type(0); for (size_t i = 0; i < output_shapes.size(); ++i) { @@ -94,8 +93,7 @@ bool op::v1::VariadicSplit::evaluate_variadic_split(const HostTensorVector& inpu std::vector input_shapes = {data_tensor->get_partial_shape(), axis_tensor->get_partial_shape(), split_lengths_tensor->get_partial_shape()}; - std::vector output_shapes; - shape_infer(this, input_shapes, output_shapes, {{1, axis_tensor}, {2, split_lengths_tensor}}); + auto output_shapes = shape_infer(this, input_shapes, make_tensor_accessor(inputs)); const auto data_shape = data_tensor->get_shape(); std::vector lower_bounds(data_shape.size(), 0); diff --git a/src/core/tests/type_prop/eye.cpp b/src/core/tests/type_prop/eye.cpp index e18378a37c1..349823d798b 100644 --- a/src/core/tests/type_prop/eye.cpp +++ b/src/core/tests/type_prop/eye.cpp @@ -366,7 +366,8 @@ TEST_F(TypePropEyeV9Test, default_ctor_no_arguments) { {1, std::make_shared(element::i64, Shape{}, &cols)}, {3, std::make_shared(element::i32, Shape{batch.size()}, batch.data())}}; - const auto output_shapes = op::v9::shape_infer(op.get(), PartialShapes{{}, {}, {}, {3}}, constant_map); + const auto output_shapes = + op::v9::shape_infer(op.get(), PartialShapes{{}, {}, {}, {3}}, make_tensor_accessor(constant_map)); EXPECT_EQ(op->get_out_type(), element::i32); EXPECT_EQ(output_shapes.front(), PartialShape({2, 4, 1, 8, 5})); diff --git a/src/core/tests/type_prop/one_hot.cpp b/src/core/tests/type_prop/one_hot.cpp index 228009d8c94..e0e45c6cbfb 100644 --- a/src/core/tests/type_prop/one_hot.cpp +++ b/src/core/tests/type_prop/one_hot.cpp @@ -206,7 +206,7 @@ TEST(type_prop, one_hot_v1_negative_depth) { OV_EXPECT_THROW(auto ont_hot = make_shared(indices, depth, on_value, off_value, axis), ov::Exception, - HasSubstr("can't be lower than zero")); + HasSubstr("can't be negative.")); } TEST(type_prop, one_hot_v1_on_off_values_not_compatible) { diff --git a/src/core/tests/type_prop/top_k.cpp b/src/core/tests/type_prop/top_k.cpp index 0a704581b5a..90523d20f56 100644 --- a/src/core/tests/type_prop/top_k.cpp +++ b/src/core/tests/type_prop/top_k.cpp @@ -73,10 +73,9 @@ TYPED_TEST_P(topk_type_prop, default_ctor_no_arguments) { op->set_mode(op::TopKMode::MIN); op->set_sort_type(op::TopKSortType::SORT_INDICES); - const auto constant_map = - std::map{{1, std::make_shared(element::i64, Shape{}, &k)}}; + const auto constant_map = std::unordered_map{{1, {element::i64, Shape{}, &k}}}; - const auto outputs = shape_infer(op.get(), PartialShapes{data_shape, {}}, constant_map); + const auto outputs = shape_infer(op.get(), PartialShapes{data_shape, {}}, ov::make_tensor_accessor(constant_map)); EXPECT_EQ(op->get_provided_axis(), exp_axis); EXPECT_EQ(op->get_axis(), exp_axis); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp index d377e60d338..2ccd7a1bce1 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp @@ -77,9 +77,7 @@ void ov::intel_cpu::MHANode::validate_and_infer_types() { auto matmul0 = std::make_shared(matmul0_in0, matmul0_in1); std::vector matmul0_input_shapes = {matmul0_shape0, matmul0_shape1}; - std::vector matmul0_output_shapes = {ov::PartialShape{}}; - - shape_infer(matmul0.get(), matmul0_input_shapes, matmul0_output_shapes); + std::vector matmul0_output_shapes = shape_infer(matmul0.get(), matmul0_input_shapes); const auto matmul1_shape0 = matmul0_output_shapes[0]; const auto matmul1_shape1 = transpose(get_input_partial_shape(3).get_shape(), {0, 2, 1, 3}); @@ -89,9 +87,7 @@ void ov::intel_cpu::MHANode::validate_and_infer_types() { auto matmul1 = std::make_shared(matmul1_in0, matmul1_in1); std::vector matmul1_input_shapes = {matmul1_shape0, matmul1_shape1}; - std::vector matmul1_output_shapes = {ov::PartialShape{}}; - - shape_infer(matmul1.get(), matmul1_input_shapes, matmul1_output_shapes); + std::vector matmul1_output_shapes = shape_infer(matmul1.get(), matmul1_input_shapes); const auto output_shape = transpose(matmul1_output_shapes[0].get_shape(), {0, 2, 1, 3}); diff --git a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp index e84da8c6ab3..30a3413bcc9 100644 --- a/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/utils/shape_inference/shape_inference.cpp @@ -7,9 +7,14 @@ #include #include #include +#include +#include #include #include +#include #include +#include +#include #include "adaptive_avg_pool_shape_inference.hpp" #include "adaptive_max_pool_shape_inference.hpp" @@ -24,6 +29,7 @@ #include "concat_shape_inference.hpp" #include "convolution_backprop_shape_inference.hpp" #include "convolution_shape_inference.hpp" +#include "copy_shape_inference.hpp" #include "ctc_greedy_decoder_seq_len_shape_inference.hpp" #include "ctc_greedy_decoder_shape_inference.hpp" #include "ctc_loss_shape_inference.hpp" @@ -32,6 +38,7 @@ #include "depth_to_space_shape_inference.hpp" #include "detection_output_shape_inference.hpp" #include "einsum_shape_inference.hpp" +#include "eltwise_shape_inference.hpp" #include "embedding_segments_sum_shape_inference.hpp" #include "embeddingbag_offsets_shape_inference.hpp" #include "embeddingbag_packed_shape_inference.hpp" @@ -67,7 +74,6 @@ #include "psroi_pooling_shape_inference.hpp" #include "range_shape_inference.hpp" #include "rdft_shape_inference.hpp" -#include "read_value_shape_inference.hpp" #include "reduce_shape_inference.hpp" #include "region_yolo_shape_inference.hpp" #include "reorg_yolo_shape_inference.hpp" @@ -101,200 +107,10 @@ namespace ov { namespace intel_cpu { - -class entryBase : public IShapeInferCommon { -public: - using iface_type = IShapeInferCommon; - - entryBase(std::shared_ptr node) : node{node} { - for (size_t i = 0; i < node->get_input_size(); i++) { - const auto& shape = node->get_input_partial_shape(i); - if (shape.rank().is_static()) { - input_ranks.push_back(shape.rank().get_length()); - } else { - input_ranks.push_back(-1); - } - } - } - - const ov::CoordinateDiff& get_pads_begin() override { - OPENVINO_ASSERT(false, "entryBase do not support get_pads_begin() by default."); - } - - const ov::CoordinateDiff& get_pads_end() override { - OPENVINO_ASSERT(false, "entryBase do not support get_pads_end() by default."); - } - - const std::vector& get_input_ranks() override { - return input_ranks; - } - -protected: - std::vector input_ranks; - std::shared_ptr node; -}; - -template -class entryIO : public entryBase { -public: - using entryBase::entryBase; - - IShapeInferCommon::Result - infer(const std::vector& input_shapes, const std::map& constant_data) override { - std::vector output_shapes(node->get_output_size()); - shape_infer(static_cast(node.get()), input_shapes, output_shapes); - return {std::move(output_shapes), ShapeInferStatus::success}; - } -}; - -template -class entryIOC : public entryBase { -public: - using entryBase::entryBase; - - IShapeInferCommon::Result - infer(const std::vector& input_shapes, const std::map& constant_data) override { - auto op = static_cast(node.get()); - std::vector output_shapes(op->get_output_size()); - shape_infer(op, input_shapes, output_shapes, constant_data); - return {std::move(output_shapes), ShapeInferStatus::success}; - } -}; - -class entryCopy : public entryBase { -public: - using entryBase::entryBase; - - IShapeInferCommon::Result - infer(const std::vector& input_shapes, const std::map& constant_data) override { - auto op = node.get(); - std::vector output_shapes(op->get_output_size()); - copy_shape_infer(op, input_shapes, output_shapes); - return {std::move(output_shapes), ShapeInferStatus::success}; - } -}; - -class entryFirstPassthrough : public entryBase { -public: - using entryBase::entryBase; - - IShapeInferCommon::Result - infer(const std::vector& input_shapes, const std::map& constant_data) override { - auto op = node.get(); - std::vector output_shapes(op->get_output_size()); - first_input_passthrough_infer(op, input_shapes, output_shapes); - return {std::move(output_shapes), ShapeInferStatus::success}; - } -}; - -class entryEltwise : public entryBase { -public: - using entryBase::entryBase; - - IShapeInferCommon::Result - infer(const std::vector& input_shapes, const std::map& constant_data) override { - auto op = node.get(); - std::vector output_shapes(op->get_output_size()); - eltwise_shape_infer(op, input_shapes, output_shapes); - return {std::move(output_shapes), ShapeInferStatus::success}; - } -}; - -class entryFallback : public entryBase { -public: - std::shared_ptr local_op_default; - - entryFallback(std::shared_ptr node) : entryBase(node) { - ngraph::OutputVector new_inputs; - auto op = node.get(); - for (size_t i = 0; i < op->get_input_size(); ++i) { - if (dynamic_cast(op->get_input_node_ptr(i))) { - new_inputs.push_back(op->get_input_node_ptr(i)->clone_with_new_inputs(ov::OutputVector{})); - } else { - new_inputs.push_back(std::make_shared(op->get_input_element_type(i), - op->get_input_partial_shape(i))); - } - } - - local_op_default = op->clone_with_new_inputs(new_inputs); - } - - virtual void post_validate_and_infer_types(const std::shared_ptr& local_op) {} - - IShapeInferCommon::Result - infer(const std::vector& input_shapes, const std::map& constant_data) override { - auto op = node.get(); - std::vector output_shapes; - - std::shared_ptr local_op; - if (!constant_data.empty()) { - ngraph::OutputVector new_inputs; - for (size_t i = 0; i < op->get_input_size(); ++i) { - if (constant_data.count(i)) { - new_inputs.push_back(std::make_shared(constant_data.at(i))); - } else if (dynamic_cast(op->get_input_node_ptr(i))) { - new_inputs.push_back(op->get_input_node_ptr(i)->clone_with_new_inputs(ov::OutputVector{})); - } else { - new_inputs.push_back(std::make_shared(op->get_input_element_type(i), - input_shapes[i].to_partial_shape())); - } - } - local_op = op->clone_with_new_inputs(new_inputs); - } else { - local_op = local_op_default; - for (size_t i = 0; i < local_op->get_input_size(); i++) { - if (auto parameter = dynamic_cast(local_op->get_input_node_ptr(i))) { - parameter->set_partial_shape(input_shapes[i].to_partial_shape()); - parameter->validate_and_infer_types(); - } - } - } - - local_op->validate_and_infer_types(); - - output_shapes.resize(local_op->get_output_size()); - for (size_t i = 0; i < output_shapes.size(); ++i) { - const auto& partial_shape = local_op->get_output_partial_shape(i); - - if (partial_shape.is_dynamic()) { - return {{}, ShapeInferStatus::skip}; - } - - output_shapes[i] = StaticShape(partial_shape.to_shape()); - } - - post_validate_and_infer_types(local_op); - - return {std::move(output_shapes), ShapeInferStatus::success}; - } -}; - -template -class ShapeInferWithPadding : public entryBase { -public: - ShapeInferWithPadding(std::shared_ptr node) : entryBase{std::move(node)}, m_pads_begin{}, m_pads_end{} {} - - IShapeInferCommon::Result infer(const std::vector& input_shapes, - const std::map& constant_data) override { - auto op = static_cast(node.get()); - auto out_shapes = shape_infer(op, input_shapes, m_pads_begin, m_pads_end, constant_data); - return {std::move(out_shapes), ShapeInferStatus::success}; - } - - const ov::CoordinateDiff& get_pads_begin() override { - return m_pads_begin; - } - - const ov::CoordinateDiff& get_pads_end() override { - return m_pads_end; - } - -protected: - ov::CoordinateDiff m_pads_begin, m_pads_end; -}; - /** * @brief Base shape inference object implementing the IStaticShapeInfer without padding support. + * + * Default shape inference is first input pass as output shape. */ class ShapeInferBase : public IStaticShapeInfer { public: @@ -310,19 +126,10 @@ public: } } - IShapeInferCommon::Result - infer(const std::vector& input_shapes, const std::map& constant_data) override { - // For backward compatibility, create ov tensors and run shape inference. - return infer(input_shapes, make_tensor_accessor(constant_data)); - } - - IShapeInferCommon::Result infer(const std::vector& input_shapes, const ov::ITensorAccessor&) override { - OPENVINO_THROW("Not implemented by base class"); - } - - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) override { - OPENVINO_THROW("Not implemented by base class"); + ov::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { + NODE_VALIDATION_CHECK(m_node.get(), input_shapes.size() > 0, "Incorrect number of input shapes"); + return {std::vector{input_shapes[0]}}; } const ov::CoordinateDiff& get_pads_begin() override { @@ -347,58 +154,82 @@ protected: }; /** - * @brief Shape inference using tensor accessor to get constant data. - * - * @tparam TOp Type of operator. - * @tparam MASK The bit mask where each bit corresponds to an input port number. + * @brief Shape inference which copy single input shape to output shape. */ +class ShapeInferCopy : public ShapeInferBase { +public: + using ShapeInferBase::ShapeInferBase; + + ov::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { + return {op::copy_shape_infer(m_node.get(), input_shapes)}; + } +}; + +/** + * @brief Shape inference applied for element wise operators. + */ +class ShapeInferEltwise : public ShapeInferBase { +public: + using ShapeInferBase::ShapeInferBase; + + ov::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { + return {op::eltwise_shape_infer(m_node.get(), input_shapes)}; + } +}; + +/** + * @brief Shape inference used as fallback if specific inference not implemented. + */ +class ShapeInferFallback : public ShapeInferBase { +public: + using ShapeInferBase::ShapeInferBase; + + ov::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor& tensor_accessor) override { + auto op = m_node.get(); + std::vector output_shapes; + + std::shared_ptr local_op; + ov::OutputVector new_inputs; + for (size_t i = 0; i < op->get_input_size(); ++i) { + if (auto t = tensor_accessor(i)) { + new_inputs.push_back( + std::make_shared(t.get_element_type(), t.get_shape(), t.data())); + } else if (dynamic_cast(op->get_input_node_ptr(i))) { + new_inputs.push_back(op->get_input_node_ptr(i)->clone_with_new_inputs(ov::OutputVector{})); + } else { + new_inputs.push_back(std::make_shared(op->get_input_element_type(i), + input_shapes[i].to_partial_shape())); + } + } + local_op = op->clone_with_new_inputs(new_inputs); + + local_op->validate_and_infer_types(); + + output_shapes.resize(local_op->get_output_size()); + for (size_t i = 0; i < output_shapes.size(); ++i) { + const auto& partial_shape = local_op->get_output_partial_shape(i); + + if (partial_shape.is_dynamic()) { + return {}; + } + + output_shapes[i] = StaticShape(partial_shape.to_shape()); + } + + return {std::move(output_shapes)}; + } +}; + template class ShapeInferTA : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; - IShapeInferCommon::Result infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) override { - return {shape_infer(static_cast(m_node.get()), input_shapes, tensor_accessor), ShapeInferStatus::success}; - } - - port_mask_t get_port_mask() const override { - return MASK; - } -}; - -template -class ShapeInferenceTA : public ShapeInferBase { -public: - using ShapeInferBase::ShapeInferBase; - - IShapeInferCommon::Result infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) override { - // Temporary support of StaticShape. - auto in_shapes = std::vector(); - in_shapes.reserve(input_shapes.size()); - for (auto& s : input_shapes) { - in_shapes.emplace_back(reinterpret_cast(s)); - } - - auto out_shapes = infer(in_shapes, tensor_accessor); - Result result{{}, out_shapes ? ShapeInferStatus::success : ShapeInferStatus::skip}; - - if (out_shapes) { - result.shapes.reserve(out_shapes->size()); - std::transform(out_shapes->begin(), - out_shapes->end(), - std::back_inserter(result.shapes), - [](StaticShapeCon& s) { - return std::move(reinterpret_cast(*s)); - }); - } - - return result; - } - - ov::optional> infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) override { + ov::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor& tensor_accessor) override { return {shape_infer(static_cast(m_node.get()), input_shapes, tensor_accessor)}; } @@ -419,8 +250,9 @@ class ShapeInferTA : public ShapeInferBase { public: using ShapeInferBase::ShapeInferBase; - IShapeInferCommon::Result infer(const std::vector& input_shapes, const ov::ITensorAccessor&) override { - return {shape_infer(static_cast(m_node.get()), input_shapes), ShapeInferStatus::success}; + ov::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { + return {shape_infer(static_cast(m_node.get()), input_shapes)}; } }; @@ -429,11 +261,6 @@ class ShapeInferPaddingBase : public ShapeInferBase { public: ShapeInferPaddingBase(std::shared_ptr node) : ShapeInferBase(std::move(node)), m_pads_begin{}, m_pads_end{} {} - IShapeInferCommon::Result infer(const std::vector& input_shapes, - const ITensorAccessor& tensor_accessor) override { - OPENVINO_THROW("Not implemented by base class"); - } - const ov::CoordinateDiff& get_pads_begin() override { return m_pads_begin; } @@ -457,10 +284,9 @@ class ShapeInferPaddingTA : public ShapeInferPaddingBase { public: using ShapeInferPaddingBase::ShapeInferPaddingBase; - IShapeInferCommon::Result infer(const std::vector& input_shapes, - const ov::ITensorAccessor& tensor_accessor) override { - return {shape_infer(static_cast(m_node.get()), input_shapes, m_pads_begin, m_pads_end, tensor_accessor), - ShapeInferStatus::success}; + ov::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor& tensor_accessor) override { + return {shape_infer(static_cast(m_node.get()), input_shapes, m_pads_begin, m_pads_end, tensor_accessor)}; } port_mask_t get_port_mask() const override { @@ -468,6 +294,23 @@ public: } }; +/** + * @brief Shape inference using tensor accessor to get constant data and padding + * + * @tparam TOp Type of operator. + * @tparam MASK The bit mask where each bit corresponds to an input port number. + */ +template +class ShapeInferPaddingTA : public ShapeInferPaddingBase { +public: + using ShapeInferPaddingBase::ShapeInferPaddingBase; + + ov::optional> infer(const std::vector& input_shapes, + const ov::ITensorAccessor&) override { + return {shape_infer(static_cast(m_node.get()), input_shapes, m_pads_begin, m_pads_end)}; + } +}; + /** * \brief Shape infer factory * @@ -506,12 +349,6 @@ private: static const TRegistry registry; }; -// Helpers to make shape inference objects (primary template). -template